]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2019 12:14:52 +0000 (14:14 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2019 12:14:52 +0000 (14:14 +0200)
added patches:
block-do-not-leak-memory-in-bio_copy_user_iov.patch
block-fix-the-return-errno-for-direct-io.patch
block-revert-v5.0-blk_mq_request_issue_directly-changes.patch
btrfs-do-not-allow-trimming-when-a-fs-is-mounted-with-the-nologreplay-option.patch
btrfs-prop-fix-vanished-compression-property-after-failed-set.patch
btrfs-prop-fix-zstd-compression-parameter-validation.patch
genirq-initialize-request_mutex-if-config_sparse_irq-n.patch
genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch
riscv-fix-syscall_get_arguments-and-syscall_set_arguments.patch
virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch

queue-5.0/block-do-not-leak-memory-in-bio_copy_user_iov.patch [new file with mode: 0644]
queue-5.0/block-fix-the-return-errno-for-direct-io.patch [new file with mode: 0644]
queue-5.0/block-revert-v5.0-blk_mq_request_issue_directly-changes.patch [new file with mode: 0644]
queue-5.0/btrfs-do-not-allow-trimming-when-a-fs-is-mounted-with-the-nologreplay-option.patch [new file with mode: 0644]
queue-5.0/btrfs-prop-fix-vanished-compression-property-after-failed-set.patch [new file with mode: 0644]
queue-5.0/btrfs-prop-fix-zstd-compression-parameter-validation.patch [new file with mode: 0644]
queue-5.0/genirq-initialize-request_mutex-if-config_sparse_irq-n.patch [new file with mode: 0644]
queue-5.0/genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch [new file with mode: 0644]
queue-5.0/riscv-fix-syscall_get_arguments-and-syscall_set_arguments.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch [new file with mode: 0644]

diff --git a/queue-5.0/block-do-not-leak-memory-in-bio_copy_user_iov.patch b/queue-5.0/block-do-not-leak-memory-in-bio_copy_user_iov.patch
new file mode 100644 (file)
index 0000000..a4c0ad6
--- /dev/null
@@ -0,0 +1,42 @@
+From a3761c3c91209b58b6f33bf69dd8bb8ec0c9d925 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+Date: Wed, 10 Apr 2019 16:27:51 -0400
+Subject: block: do not leak memory in bio_copy_user_iov()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jérôme Glisse <jglisse@redhat.com>
+
+commit a3761c3c91209b58b6f33bf69dd8bb8ec0c9d925 upstream.
+
+When bio_add_pc_page() fails in bio_copy_user_iov() we should free
+the page we just allocated otherwise we are leaking it.
+
+Cc: linux-block@vger.kernel.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1238,8 +1238,11 @@ struct bio *bio_copy_user_iov(struct req
+                       }
+               }
+-              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
++              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
++                      if (!map_data)
++                              __free_page(page);
+                       break;
++              }
+               len -= bytes;
+               offset = 0;
diff --git a/queue-5.0/block-fix-the-return-errno-for-direct-io.patch b/queue-5.0/block-fix-the-return-errno-for-direct-io.patch
new file mode 100644 (file)
index 0000000..1484c29
--- /dev/null
@@ -0,0 +1,55 @@
+From a89afe58f1a74aac768a5eb77af95ef4ee15beaa Mon Sep 17 00:00:00 2001
+From: Jason Yan <yanaijie@huawei.com>
+Date: Fri, 12 Apr 2019 10:09:16 +0800
+Subject: block: fix the return errno for direct IO
+
+From: Jason Yan <yanaijie@huawei.com>
+
+commit a89afe58f1a74aac768a5eb77af95ef4ee15beaa upstream.
+
+If the last bio returned is not dio->bio, the status of the bio will
+not assigned to dio->bio if it is error. This will cause the whole IO
+status wrong.
+
+    ksoftirqd/21-117   [021] ..s.  4017.966090:   8,0    C   N 4883648 [0]
+          <idle>-0     [018] ..s.  4017.970888:   8,0    C  WS 4924800 + 1024 [0]
+          <idle>-0     [018] ..s.  4017.970909:   8,0    D  WS 4935424 + 1024 [<idle>]
+          <idle>-0     [018] ..s.  4017.970924:   8,0    D  WS 4936448 + 321 [<idle>]
+    ksoftirqd/21-117   [021] ..s.  4017.995033:   8,0    C   R 4883648 + 336 [65475]
+    ksoftirqd/21-117   [021] d.s.  4018.001988: myprobe1: (blkdev_bio_end_io+0x0/0x168) bi_status=7
+    ksoftirqd/21-117   [021] d.s.  4018.001992: myprobe: (aio_complete_rw+0x0/0x148) x0=0xffff802f2595ad80 res=0x12a000 res2=0x0
+
+We always have to assign bio->bi_status to dio->bio.bi_status because we
+will only check dio->bio.bi_status when we return the whole IO to
+the upper layer.
+
+Fixes: 542ff7bf18c6 ("block: new direct I/O implementation")
+Cc: stable@vger.kernel.org
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jens Axboe <axboe@kernel.dk>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jason Yan <yanaijie@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/block_dev.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -298,10 +298,10 @@ static void blkdev_bio_end_io(struct bio
+       struct blkdev_dio *dio = bio->bi_private;
+       bool should_dirty = dio->should_dirty;
+-      if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
+-              if (bio->bi_status && !dio->bio.bi_status)
+-                      dio->bio.bi_status = bio->bi_status;
+-      } else {
++      if (bio->bi_status && !dio->bio.bi_status)
++              dio->bio.bi_status = bio->bi_status;
++
++      if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
+               if (!dio->is_sync) {
+                       struct kiocb *iocb = dio->iocb;
+                       ssize_t ret;
diff --git a/queue-5.0/block-revert-v5.0-blk_mq_request_issue_directly-changes.patch b/queue-5.0/block-revert-v5.0-blk_mq_request_issue_directly-changes.patch
new file mode 100644 (file)
index 0000000..5cfc21c
--- /dev/null
@@ -0,0 +1,280 @@
+From fd9c40f64c514bdc585a21e2e33fa5f83ca8811b Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Thu, 4 Apr 2019 10:08:43 -0700
+Subject: block: Revert v5.0 blk_mq_request_issue_directly() changes
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit fd9c40f64c514bdc585a21e2e33fa5f83ca8811b upstream.
+
+blk_mq_try_issue_directly() can return BLK_STS*_RESOURCE for requests that
+have been queued. If that happens when blk_mq_try_issue_directly() is called
+by the dm-mpath driver then dm-mpath will try to resubmit a request that is
+already queued and a kernel crash follows. Since it is nontrivial to fix
+blk_mq_request_issue_directly(), revert the blk_mq_request_issue_directly()
+changes that went into kernel v5.0.
+
+This patch reverts the following commits:
+* d6a51a97c0b2 ("blk-mq: replace and kill blk_mq_request_issue_directly") # v5.0.
+* 5b7a6f128aad ("blk-mq: issue directly with bypass 'false' in blk_mq_sched_insert_requests") # v5.0.
+* 7f556a44e61d ("blk-mq: refactor the code of issue request directly") # v5.0.
+
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Jianchao Wang <jianchao.w.wang@oracle.com>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Johannes Thumshirn <jthumshirn@suse.de>
+Cc: James Smart <james.smart@broadcom.com>
+Cc: Dongli Zhang <dongli.zhang@oracle.com>
+Cc: Laurence Oberman <loberman@redhat.com>
+Cc: <stable@vger.kernel.org>
+Reported-by: Laurence Oberman <loberman@redhat.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Fixes: 7f556a44e61d ("blk-mq: refactor the code of issue request directly") # v5.0.
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c     |    4 -
+ block/blk-mq-sched.c |    8 ++-
+ block/blk-mq.c       |  122 ++++++++++++++++++++++++++-------------------------
+ block/blk-mq.h       |    6 --
+ 4 files changed, 71 insertions(+), 69 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1246,8 +1246,6 @@ static int blk_cloned_rq_check_limits(st
+  */
+ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+ {
+-      blk_qc_t unused;
+-
+       if (blk_cloned_rq_check_limits(q, rq))
+               return BLK_STS_IOERR;
+@@ -1263,7 +1261,7 @@ blk_status_t blk_insert_cloned_request(s
+        * bypass a potential scheduler on the bottom device for
+        * insert.
+        */
+-      return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
++      return blk_mq_request_issue_directly(rq, true);
+ }
+ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct
+                * busy in case of 'none' scheduler, and this way may save
+                * us one extra enqueue & dequeue to sw queue.
+                */
+-              if (!hctx->dispatch_busy && !e && !run_queue_async)
++              if (!hctx->dispatch_busy && !e && !run_queue_async) {
+                       blk_mq_try_issue_list_directly(hctx, list);
+-              else
+-                      blk_mq_insert_requests(hctx, ctx, list);
++                      if (list_empty(list))
++                              return;
++              }
++              blk_mq_insert_requests(hctx, ctx, list);
+       }
+       blk_mq_run_hw_queue(hctx, run_queue_async);
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1805,74 +1805,76 @@ static blk_status_t __blk_mq_issue_direc
+       return ret;
+ }
+-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
++static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                               struct request *rq,
+                                               blk_qc_t *cookie,
+-                                              bool bypass, bool last)
++                                              bool bypass_insert, bool last)
+ {
+       struct request_queue *q = rq->q;
+       bool run_queue = true;
+-      blk_status_t ret = BLK_STS_RESOURCE;
+-      int srcu_idx;
+-      bool force = false;
+-      hctx_lock(hctx, &srcu_idx);
+       /*
+-       * hctx_lock is needed before checking quiesced flag.
++       * RCU or SRCU read lock is needed before checking quiesced flag.
+        *
+-       * When queue is stopped or quiesced, ignore 'bypass', insert
+-       * and return BLK_STS_OK to caller, and avoid driver to try to
+-       * dispatch again.
++       * When queue is stopped or quiesced, ignore 'bypass_insert' from
++       * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
++       * and avoid driver to try to dispatch again.
+        */
+-      if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
++      if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+               run_queue = false;
+-              bypass = false;
+-              goto out_unlock;
++              bypass_insert = false;
++              goto insert;
+       }
+-      if (unlikely(q->elevator && !bypass))
+-              goto out_unlock;
++      if (q->elevator && !bypass_insert)
++              goto insert;
+       if (!blk_mq_get_dispatch_budget(hctx))
+-              goto out_unlock;
++              goto insert;
+       if (!blk_mq_get_driver_tag(rq)) {
+               blk_mq_put_dispatch_budget(hctx);
+-              goto out_unlock;
++              goto insert;
+       }
+-      /*
+-       * Always add a request that has been through
+-       *.queue_rq() to the hardware dispatch list.
+-       */
+-      force = true;
+-      ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
+-out_unlock:
++      return __blk_mq_issue_directly(hctx, rq, cookie, last);
++insert:
++      if (bypass_insert)
++              return BLK_STS_RESOURCE;
++
++      blk_mq_request_bypass_insert(rq, run_queue);
++      return BLK_STS_OK;
++}
++
++static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
++              struct request *rq, blk_qc_t *cookie)
++{
++      blk_status_t ret;
++      int srcu_idx;
++
++      might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
++
++      hctx_lock(hctx, &srcu_idx);
++
++      ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
++      if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
++              blk_mq_request_bypass_insert(rq, true);
++      else if (ret != BLK_STS_OK)
++              blk_mq_end_request(rq, ret);
++
++      hctx_unlock(hctx, srcu_idx);
++}
++
++blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
++{
++      blk_status_t ret;
++      int srcu_idx;
++      blk_qc_t unused_cookie;
++      struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
++
++      hctx_lock(hctx, &srcu_idx);
++      ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
+       hctx_unlock(hctx, srcu_idx);
+-      switch (ret) {
+-      case BLK_STS_OK:
+-              break;
+-      case BLK_STS_DEV_RESOURCE:
+-      case BLK_STS_RESOURCE:
+-              if (force) {
+-                      blk_mq_request_bypass_insert(rq, run_queue);
+-                      /*
+-                       * We have to return BLK_STS_OK for the DM
+-                       * to avoid livelock. Otherwise, we return
+-                       * the real result to indicate whether the
+-                       * request is direct-issued successfully.
+-                       */
+-                      ret = bypass ? BLK_STS_OK : ret;
+-              } else if (!bypass) {
+-                      blk_mq_sched_insert_request(rq, false,
+-                                                  run_queue, false);
+-              }
+-              break;
+-      default:
+-              if (!bypass)
+-                      blk_mq_end_request(rq, ret);
+-              break;
+-      }
+       return ret;
+ }
+@@ -1880,20 +1882,22 @@ out_unlock:
+ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+               struct list_head *list)
+ {
+-      blk_qc_t unused;
+-      blk_status_t ret = BLK_STS_OK;
+-
+       while (!list_empty(list)) {
++              blk_status_t ret;
+               struct request *rq = list_first_entry(list, struct request,
+                               queuelist);
+               list_del_init(&rq->queuelist);
+-              if (ret == BLK_STS_OK)
+-                      ret = blk_mq_try_issue_directly(hctx, rq, &unused,
+-                                                      false,
++              ret = blk_mq_request_issue_directly(rq, list_empty(list));
++              if (ret != BLK_STS_OK) {
++                      if (ret == BLK_STS_RESOURCE ||
++                                      ret == BLK_STS_DEV_RESOURCE) {
++                              blk_mq_request_bypass_insert(rq,
+                                                       list_empty(list));
+-              else
+-                      blk_mq_sched_insert_request(rq, false, true, false);
++                              break;
++                      }
++                      blk_mq_end_request(rq, ret);
++              }
+       }
+       /*
+@@ -1901,7 +1905,7 @@ void blk_mq_try_issue_list_directly(stru
+        * the driver there was more coming, but that turned out to
+        * be a lie.
+        */
+-      if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
++      if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
+               hctx->queue->mq_ops->commit_rqs(hctx);
+ }
+@@ -2014,13 +2018,13 @@ static blk_qc_t blk_mq_make_request(stru
+               if (same_queue_rq) {
+                       data.hctx = same_queue_rq->mq_hctx;
+                       blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+-                                      &cookie, false, true);
++                                      &cookie);
+               }
+       } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
+                       !data.hctx->dispatch_busy)) {
+               blk_mq_put_ctx(data.ctx);
+               blk_mq_bio_to_request(rq, bio);
+-              blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
++              blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+       } else {
+               blk_mq_put_ctx(data.ctx);
+               blk_mq_bio_to_request(rq, bio);
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -67,10 +67,8 @@ void blk_mq_request_bypass_insert(struct
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+                               struct list_head *list);
+-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+-                                              struct request *rq,
+-                                              blk_qc_t *cookie,
+-                                              bool bypass, bool last);
++/* Used by blk_insert_cloned_request() to issue request directly */
++blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
+ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+                                   struct list_head *list);
diff --git a/queue-5.0/btrfs-do-not-allow-trimming-when-a-fs-is-mounted-with-the-nologreplay-option.patch b/queue-5.0/btrfs-do-not-allow-trimming-when-a-fs-is-mounted-with-the-nologreplay-option.patch
new file mode 100644 (file)
index 0000000..1a0ec69
--- /dev/null
@@ -0,0 +1,53 @@
+From f35f06c35560a86e841631f0243b83a984dc11a9 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 26 Mar 2019 10:49:56 +0000
+Subject: Btrfs: do not allow trimming when a fs is mounted with the nologreplay option
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit f35f06c35560a86e841631f0243b83a984dc11a9 upstream.
+
+Whan a filesystem is mounted with the nologreplay mount option, which
+requires it to be mounted in RO mode as well, we can not allow discard on
+free space inside block groups, because log trees refer to extents that
+are not pinned in a block group's free space cache (pinning the extents is
+precisely the first phase of replaying a log tree).
+
+So do not allow the fitrim ioctl to do anything when the filesystem is
+mounted with the nologreplay option, because later it can be mounted RW
+without that option, which causes log replay to happen and result in
+either a failure to replay the log trees (leading to a mount failure), a
+crash or some silent corruption.
+
+Reported-by: Darrick J. Wong <darrick.wong@oracle.com>
+Fixes: 96da09192cda ("btrfs: Introduce new mount option to disable tree log replay")
+CC: stable@vger.kernel.org # 4.9+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ioctl.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(s
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
++      /*
++       * If the fs is mounted with nologreplay, which requires it to be
++       * mounted in RO mode as well, we can not allow discard on free space
++       * inside block groups, because log trees refer to extents that are not
++       * pinned in a block group's free space cache (pinning the extents is
++       * precisely the first phase of replaying a log tree).
++       */
++      if (btrfs_test_opt(fs_info, NOLOGREPLAY))
++              return -EROFS;
++
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+                               dev_list) {
diff --git a/queue-5.0/btrfs-prop-fix-vanished-compression-property-after-failed-set.patch b/queue-5.0/btrfs-prop-fix-vanished-compression-property-after-failed-set.patch
new file mode 100644 (file)
index 0000000..88cfb96
--- /dev/null
@@ -0,0 +1,53 @@
+From 272e5326c7837697882ce3162029ba893059b616 Mon Sep 17 00:00:00 2001
+From: Anand Jain <anand.jain@oracle.com>
+Date: Tue, 2 Apr 2019 18:07:40 +0800
+Subject: btrfs: prop: fix vanished compression property after failed set
+
+From: Anand Jain <anand.jain@oracle.com>
+
+commit 272e5326c7837697882ce3162029ba893059b616 upstream.
+
+The compression property resets to NULL, instead of the old value if we
+fail to set the new compression parameter.
+
+  $ btrfs prop get /btrfs compression
+    compression=lzo
+  $ btrfs prop set /btrfs compression zli
+    ERROR: failed to set compression for /btrfs: Invalid argument
+  $ btrfs prop get /btrfs compression
+
+This is because the compression property ->validate() is successful for
+'zli' as the strncmp() used the length passed from the userspace.
+
+Fix it by using the expected string length in strncmp().
+
+Fixes: 63541927c8d1 ("Btrfs: add support for inode properties")
+Fixes: 5c1aab1dd544 ("btrfs: Add zstd support")
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Anand Jain <anand.jain@oracle.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/props.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct bt
+ static int prop_compression_validate(const char *value, size_t len)
+ {
+-      if (!strncmp("lzo", value, len))
++      if (!strncmp("lzo", value, 3))
+               return 0;
+-      else if (!strncmp("zlib", value, len))
++      else if (!strncmp("zlib", value, 4))
+               return 0;
+-      else if (!strncmp("zstd", value, len))
++      else if (!strncmp("zstd", value, 4))
+               return 0;
+       return -EINVAL;
diff --git a/queue-5.0/btrfs-prop-fix-zstd-compression-parameter-validation.patch b/queue-5.0/btrfs-prop-fix-zstd-compression-parameter-validation.patch
new file mode 100644 (file)
index 0000000..d842f9c
--- /dev/null
@@ -0,0 +1,43 @@
+From 50398fde997f6be8faebdb5f38e9c9c467370f51 Mon Sep 17 00:00:00 2001
+From: Anand Jain <anand.jain@oracle.com>
+Date: Tue, 2 Apr 2019 18:07:38 +0800
+Subject: btrfs: prop: fix zstd compression parameter validation
+
+From: Anand Jain <anand.jain@oracle.com>
+
+commit 50398fde997f6be8faebdb5f38e9c9c467370f51 upstream.
+
+We let pass zstd compression parameter even if it is not fully valid.
+For example:
+
+  $ btrfs prop set /btrfs compression zst
+  $ btrfs prop get /btrfs compression
+     compression=zst
+
+zlib and lzo are fine.
+
+Fix it by checking the correct prefix length.
+
+Fixes: 5c1aab1dd544 ("btrfs: Add zstd support")
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Anand Jain <anand.jain@oracle.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/props.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -396,7 +396,7 @@ static int prop_compression_apply(struct
+               btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+       } else if (!strncmp("zlib", value, 4)) {
+               type = BTRFS_COMPRESS_ZLIB;
+-      } else if (!strncmp("zstd", value, len)) {
++      } else if (!strncmp("zstd", value, 4)) {
+               type = BTRFS_COMPRESS_ZSTD;
+               btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
+       } else {
diff --git a/queue-5.0/genirq-initialize-request_mutex-if-config_sparse_irq-n.patch b/queue-5.0/genirq-initialize-request_mutex-if-config_sparse_irq-n.patch
new file mode 100644 (file)
index 0000000..4995c5e
--- /dev/null
@@ -0,0 +1,36 @@
+From e8458e7afa855317b14915d7b86ab3caceea7eb6 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Thu, 4 Apr 2019 15:45:12 +0800
+Subject: genirq: Initialize request_mutex if CONFIG_SPARSE_IRQ=n
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit e8458e7afa855317b14915d7b86ab3caceea7eb6 upstream.
+
+When CONFIG_SPARSE_IRQ is disable, the request_mutex in struct irq_desc
+is not initialized which causes malfunction.
+
+Fixes: 9114014cf4e6 ("genirq: Add mutex to irq desc to serialize request/free_irq()")
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Mukesh Ojha <mojha@codeaurora.org>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: <linux-arm-kernel@lists.infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190404074512.145533-1-wangkefeng.wang@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/irq/irqdesc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -558,6 +558,7 @@ int __init early_irq_init(void)
+               alloc_masks(&desc[i], node);
+               raw_spin_lock_init(&desc[i].lock);
+               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
++              mutex_init(&desc[i].request_mutex);
+               desc_set_defaults(i, &desc[i], node, NULL, NULL);
+       }
+       return arch_early_irq_init();
diff --git a/queue-5.0/genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch b/queue-5.0/genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch
new file mode 100644 (file)
index 0000000..a425bb9
--- /dev/null
@@ -0,0 +1,69 @@
+From 325aa19598e410672175ed50982f902d4e3f31c5 Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <swboyd@chromium.org>
+Date: Mon, 25 Mar 2019 11:10:26 -0700
+Subject: genirq: Respect IRQCHIP_SKIP_SET_WAKE in irq_chip_set_wake_parent()
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+commit 325aa19598e410672175ed50982f902d4e3f31c5 upstream.
+
+If a child irqchip calls irq_chip_set_wake_parent() but its parent irqchip
+has the IRQCHIP_SKIP_SET_WAKE flag set an error is returned.
+
+This is inconsistent behaviour vs. set_irq_wake_real() which returns 0 when
+the irqchip has the IRQCHIP_SKIP_SET_WAKE flag set. It doesn't attempt to
+walk the chain of parents and set irq wake on any chips that don't have the
+flag set either. If the intent is to call the .irq_set_wake() callback of
+the parent irqchip, then we expect irqchip implementations to omit the
+IRQCHIP_SKIP_SET_WAKE flag and implement an .irq_set_wake() function that
+calls irq_chip_set_wake_parent().
+
+The problem has been observed on a Qualcomm sdm845 device where set wake
+fails on any GPIO interrupts after applying work in progress wakeup irq
+patches to the GPIO driver. The chain of chips looks like this:
+
+     QCOM GPIO -> QCOM PDC (SKIP) -> ARM GIC (SKIP)
+
+The GPIO controllers parent is the QCOM PDC irqchip which in turn has ARM
+GIC as parent.  The QCOM PDC irqchip has the IRQCHIP_SKIP_SET_WAKE flag
+set, and so does the grandparent ARM GIC.
+
+The GPIO driver doesn't know if the parent needs to set wake or not, so it
+unconditionally calls irq_chip_set_wake_parent() causing this function to
+return a failure because the parent irqchip (PDC) doesn't have the
+.irq_set_wake() callback set. Returning 0 instead makes everything work and
+irqs from the GPIO controller can be configured for wakeup.
+
+Make it consistent by returning 0 (success) from irq_chip_set_wake_parent()
+when a parent chip has IRQCHIP_SKIP_SET_WAKE set.
+
+[ tglx: Massaged changelog ]
+
+Fixes: 08b55e2a9208e ("genirq: Add irqchip_set_wake_parent")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-gpio@vger.kernel.org
+Cc: Lina Iyer <ilina@codeaurora.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190325181026.247796-1-swboyd@chromium.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/irq/chip.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(st
+ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
+ {
+       data = data->parent_data;
++
++      if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
++              return 0;
++
+       if (data->chip->irq_set_wake)
+               return data->chip->irq_set_wake(data, on);
diff --git a/queue-5.0/riscv-fix-syscall_get_arguments-and-syscall_set_arguments.patch b/queue-5.0/riscv-fix-syscall_get_arguments-and-syscall_set_arguments.patch
new file mode 100644 (file)
index 0000000..1f36c13
--- /dev/null
@@ -0,0 +1,67 @@
+From 10a16997db3d99fc02c026cf2c6e6c670acafab0 Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+Date: Fri, 29 Mar 2019 20:12:21 +0300
+Subject: riscv: Fix syscall_get_arguments() and syscall_set_arguments()
+
+From: Dmitry V. Levin <ldv@altlinux.org>
+
+commit 10a16997db3d99fc02c026cf2c6e6c670acafab0 upstream.
+
+RISC-V syscall arguments are located in orig_a0,a1..a5 fields
+of struct pt_regs.
+
+Due to an off-by-one bug and a bug in pointer arithmetic
+syscall_get_arguments() was reading s3..s7 fields instead of a1..a5.
+Likewise, syscall_set_arguments() was writing s3..s7 fields
+instead of a1..a5.
+
+Link: http://lkml.kernel.org/r/20190329171221.GA32456@altlinux.org
+
+Fixes: e2c0cdfba7f69 ("RISC-V: User-facing API")
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Will Drewry <wad@chromium.org>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: linux-riscv@lists.infradead.org
+Cc: stable@vger.kernel.org # v4.15+
+Acked-by: Palmer Dabbelt <palmer@sifive.com>
+Signed-off-by: Dmitry V. Levin <ldv@altlinux.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/riscv/include/asm/syscall.h |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/arch/riscv/include/asm/syscall.h
++++ b/arch/riscv/include/asm/syscall.h
+@@ -79,10 +79,11 @@ static inline void syscall_get_arguments
+       if (i == 0) {
+               args[0] = regs->orig_a0;
+               args++;
+-              i++;
+               n--;
++      } else {
++              i--;
+       }
+-      memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
++      memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
+ }
+ static inline void syscall_set_arguments(struct task_struct *task,
+@@ -94,10 +95,11 @@ static inline void syscall_set_arguments
+         if (i == 0) {
+                 regs->orig_a0 = args[0];
+                 args++;
+-                i++;
+                 n--;
+-        }
+-      memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
++      } else {
++              i--;
++      }
++      memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
+ }
+ static inline int syscall_get_arch(void)
index 8bc8e4fca37924111eb3df83fc8d61e55b55434f..b762c73a56023f35d9db28e5f882605a46475941 100644 (file)
@@ -71,3 +71,13 @@ include-linux-bitrev.h-fix-constant-bitrev.patch
 mm-writeback-use-exact-memcg-dirty-counts.patch
 asoc-intel-fix-crash-at-suspend-resume-after-failed-codec-registration.patch
 asoc-fsl_esai-fix-channel-swap-issue-when-stream-starts.patch
+btrfs-do-not-allow-trimming-when-a-fs-is-mounted-with-the-nologreplay-option.patch
+btrfs-prop-fix-zstd-compression-parameter-validation.patch
+btrfs-prop-fix-vanished-compression-property-after-failed-set.patch
+riscv-fix-syscall_get_arguments-and-syscall_set_arguments.patch
+block-revert-v5.0-blk_mq_request_issue_directly-changes.patch
+block-do-not-leak-memory-in-bio_copy_user_iov.patch
+block-fix-the-return-errno-for-direct-io.patch
+genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch
+genirq-initialize-request_mutex-if-config_sparse_irq-n.patch
+virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch
diff --git a/queue-5.0/virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch b/queue-5.0/virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch
new file mode 100644 (file)
index 0000000..aeb0291
--- /dev/null
@@ -0,0 +1,60 @@
+From cf94db21905333e610e479688add629397a4b384 Mon Sep 17 00:00:00 2001
+From: Cornelia Huck <cohuck@redhat.com>
+Date: Mon, 8 Apr 2019 14:33:22 +0200
+Subject: virtio: Honour 'may_reduce_num' in vring_create_virtqueue
+
+From: Cornelia Huck <cohuck@redhat.com>
+
+commit cf94db21905333e610e479688add629397a4b384 upstream.
+
+vring_create_virtqueue() allows the caller to specify via the
+may_reduce_num parameter whether the vring code is allowed to
+allocate a smaller ring than specified.
+
+However, the split ring allocation code tries to allocate a
+smaller ring on allocation failure regardless of what the
+caller specified. This may cause trouble for e.g. virtio-pci
+in legacy mode, which does not support ring resizing. (The
+packed ring code does not resize in any case.)
+
+Let's fix this by bailing out immediately in the split ring code
+if the requested size cannot be allocated and may_reduce_num has
+not been specified.
+
+While at it, fix a typo in the usage instructions.
+
+Fixes: 2a2d1382fe9d ("virtio: Add improved queue allocation API")
+Cc: stable@vger.kernel.org # v4.6+
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Jens Freimann <jfreimann@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_ring.c |    2 ++
+ include/linux/virtio_ring.h  |    2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -871,6 +871,8 @@ static struct virtqueue *vring_create_vi
+                                         GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+               if (queue)
+                       break;
++              if (!may_reduce_num)
++                      return NULL;
+       }
+       if (!num)
+--- a/include/linux/virtio_ring.h
++++ b/include/linux/virtio_ring.h
+@@ -63,7 +63,7 @@ struct virtqueue;
+ /*
+  * Creates a virtqueue and allocates the descriptor ring.  If
+  * may_reduce_num is set, then this may allocate a smaller ring than
+- * expected.  The caller should query virtqueue_get_ring_size to learn
++ * expected.  The caller should query virtqueue_get_vring_size to learn
+  * the actual size of the ring.
+  */
+ struct virtqueue *vring_create_virtqueue(unsigned int index,