]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Aug 2020 09:50:12 +0000 (11:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Aug 2020 09:50:12 +0000 (11:50 +0200)
added patches:
blk-mq-order-adding-requests-to-hctx-dispatch-and-checking-sched_restart.patch
block-fix-get_max_io_size.patch
block-loop-set-discard-granularity-and-alignment-for-block-device-backed-loop.patch
btrfs-check-the-right-error-variable-in-btrfs_del_dir_entries_in_log.patch
btrfs-detect-nocow-for-swap-after-snapshot-delete.patch
btrfs-fix-space-cache-memory-leak-after-transaction-abort.patch
btrfs-reset-compression-level-for-lzo-on-remount.patch
fbcon-prevent-user-font-height-or-width-change-from-causing-potential-out-of-bounds-access.patch
hid-i2c-hid-always-sleep-60ms-after-i2c_hid_pwr_on-commands.patch
hwmon-gsc-hwmon-scale-temperature-to-millidegrees.patch

queue-5.8/blk-mq-order-adding-requests-to-hctx-dispatch-and-checking-sched_restart.patch [new file with mode: 0644]
queue-5.8/block-fix-get_max_io_size.patch [new file with mode: 0644]
queue-5.8/block-loop-set-discard-granularity-and-alignment-for-block-device-backed-loop.patch [new file with mode: 0644]
queue-5.8/btrfs-check-the-right-error-variable-in-btrfs_del_dir_entries_in_log.patch [new file with mode: 0644]
queue-5.8/btrfs-detect-nocow-for-swap-after-snapshot-delete.patch [new file with mode: 0644]
queue-5.8/btrfs-fix-space-cache-memory-leak-after-transaction-abort.patch [new file with mode: 0644]
queue-5.8/btrfs-reset-compression-level-for-lzo-on-remount.patch [new file with mode: 0644]
queue-5.8/fbcon-prevent-user-font-height-or-width-change-from-causing-potential-out-of-bounds-access.patch [new file with mode: 0644]
queue-5.8/hid-i2c-hid-always-sleep-60ms-after-i2c_hid_pwr_on-commands.patch [new file with mode: 0644]
queue-5.8/hwmon-gsc-hwmon-scale-temperature-to-millidegrees.patch [new file with mode: 0644]
queue-5.8/series

diff --git a/queue-5.8/blk-mq-order-adding-requests-to-hctx-dispatch-and-checking-sched_restart.patch b/queue-5.8/blk-mq-order-adding-requests-to-hctx-dispatch-and-checking-sched_restart.patch
new file mode 100644 (file)
index 0000000..bfb6478
--- /dev/null
@@ -0,0 +1,92 @@
+From d7d8535f377e9ba87edbf7fbbd634ac942f3f54f Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 17 Aug 2020 18:01:15 +0800
+Subject: blk-mq: order adding requests to hctx->dispatch and checking SCHED_RESTART
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit d7d8535f377e9ba87edbf7fbbd634ac942f3f54f upstream.
+
+SCHED_RESTART code path is relied to re-run queue for dispatch requests
+in hctx->dispatch. Meantime the SCHED_RSTART flag is checked when adding
+requests to hctx->dispatch.
+
+memory barriers have to be used for ordering the following two pair of OPs:
+
+1) adding requests to hctx->dispatch and checking SCHED_RESTART in
+blk_mq_dispatch_rq_list()
+
+2) clearing SCHED_RESTART and checking if there is request in hctx->dispatch
+in blk_mq_sched_restart().
+
+Without the added memory barrier, either:
+
+1) blk_mq_sched_restart() may miss requests added to hctx->dispatch meantime
+blk_mq_dispatch_rq_list() observes SCHED_RESTART, and not run queue in
+dispatch side
+
+or
+
+2) blk_mq_dispatch_rq_list still sees SCHED_RESTART, and not run queue
+in dispatch side, meantime checking if there is request in
+hctx->dispatch from blk_mq_sched_restart() is missed.
+
+IO hang in ltp/fs_fill test is reported by kernel test robot:
+
+       https://lkml.org/lkml/2020/7/26/77
+
+Turns out it is caused by the above out-of-order OPs. And the IO hang
+can't be observed any more after applying this patch.
+
+Fixes: bd166ef183c2 ("blk-mq-sched: add framework for MQ capable IO schedulers")
+Reported-by: kernel test robot <rong.a.chen@intel.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: David Jeffery <djeffery@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq-sched.c |    9 +++++++++
+ block/blk-mq.c       |    9 +++++++++
+ 2 files changed, 18 insertions(+)
+
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_
+               return;
+       clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
++      /*
++       * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
++       * in blk_mq_run_hw_queue(). Its pair is the barrier in
++       * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
++       * meantime new request added to hctx->dispatch is missed to check in
++       * blk_mq_run_hw_queue().
++       */
++      smp_mb();
++
+       blk_mq_run_hw_queue(hctx, true);
+ }
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1324,6 +1324,15 @@ bool blk_mq_dispatch_rq_list(struct requ
+               spin_unlock(&hctx->lock);
+               /*
++               * Order adding requests to hctx->dispatch and checking
++               * SCHED_RESTART flag. The pair of this smp_mb() is the one
++               * in blk_mq_sched_restart(). Avoid restart code path to
++               * miss the new added requests to hctx->dispatch, meantime
++               * SCHED_RESTART is observed here.
++               */
++              smp_mb();
++
++              /*
+                * If SCHED_RESTART was set by the caller of this function and
+                * it is no longer set that means that it was cleared by another
+                * thread and hence that a queue rerun is needed.
diff --git a/queue-5.8/block-fix-get_max_io_size.patch b/queue-5.8/block-fix-get_max_io_size.patch
new file mode 100644 (file)
index 0000000..af4e2f9
--- /dev/null
@@ -0,0 +1,37 @@
+From e4b469c66f3cbb81c2e94d31123d7bcdf3c1dabd Mon Sep 17 00:00:00 2001
+From: Keith Busch <kbusch@kernel.org>
+Date: Thu, 6 Aug 2020 14:58:37 -0700
+Subject: block: fix get_max_io_size()
+
+From: Keith Busch <kbusch@kernel.org>
+
+commit e4b469c66f3cbb81c2e94d31123d7bcdf3c1dabd upstream.
+
+A previous commit aligning splits to physical block sizes inadvertently
+modified one return case such that that it now returns 0 length splits
+when the number of sectors doesn't exceed the physical offset. This
+later hits a BUG in bio_split(). Restore the previous working behavior.
+
+Fixes: 9cc5169cd478b ("block: Improve physical block alignment of split bios")
+Reported-by: Eric Deal <eric.deal@wdc.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-merge.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(s
+       if (max_sectors > start_offset)
+               return max_sectors - start_offset;
+-      return sectors & (lbs - 1);
++      return sectors & ~(lbs - 1);
+ }
+ static inline unsigned get_max_segment_size(const struct request_queue *q,
diff --git a/queue-5.8/block-loop-set-discard-granularity-and-alignment-for-block-device-backed-loop.patch b/queue-5.8/block-loop-set-discard-granularity-and-alignment-for-block-device-backed-loop.patch
new file mode 100644 (file)
index 0000000..a5f6c3b
--- /dev/null
@@ -0,0 +1,108 @@
+From bcb21c8cc9947286211327d663ace69f07d37a76 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 17 Aug 2020 18:01:30 +0800
+Subject: block: loop: set discard granularity and alignment for block device backed loop
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit bcb21c8cc9947286211327d663ace69f07d37a76 upstream.
+
+In case of block device backend, if the backend supports write zeros, the
+loop device will set queue flag of QUEUE_FLAG_DISCARD. However,
+limits.discard_granularity isn't setup, and this way is wrong,
+see the following description in Documentation/ABI/testing/sysfs-block:
+
+       A discard_granularity of 0 means that the device does not support
+       discard functionality.
+
+Especially 9b15d109a6b2 ("block: improve discard bio alignment in
+__blkdev_issue_discard()") starts to take q->limits.discard_granularity
+for computing max discard sectors. And zero discard granularity may cause
+kernel oops, or fail discard request even though the loop queue claims
+discard support via QUEUE_FLAG_DISCARD.
+
+Fix the issue by setup discard granularity and alignment.
+
+Fixes: c52abf563049 ("loop: Better discard support for block devices")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Coly Li <colyli@suse.de>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Xiao Ni <xni@redhat.com>
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: Evan Green <evgreen@chromium.org>
+Cc: Gwendal Grignou <gwendal@chromium.org>
+Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Cc: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c |   33 ++++++++++++++++++---------------
+ 1 file changed, 18 insertions(+), 15 deletions(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -877,6 +877,7 @@ static void loop_config_discard(struct l
+       struct file *file = lo->lo_backing_file;
+       struct inode *inode = file->f_mapping->host;
+       struct request_queue *q = lo->lo_queue;
++      u32 granularity, max_discard_sectors;
+       /*
+        * If the backing device is a block device, mirror its zeroing
+@@ -889,11 +890,10 @@ static void loop_config_discard(struct l
+               struct request_queue *backingq;
+               backingq = bdev_get_queue(inode->i_bdev);
+-              blk_queue_max_discard_sectors(q,
+-                      backingq->limits.max_write_zeroes_sectors);
+-              blk_queue_max_write_zeroes_sectors(q,
+-                      backingq->limits.max_write_zeroes_sectors);
++              max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
++              granularity = backingq->limits.discard_granularity ?:
++                      queue_physical_block_size(backingq);
+       /*
+        * We use punch hole to reclaim the free space used by the
+@@ -902,23 +902,26 @@ static void loop_config_discard(struct l
+        * useful information.
+        */
+       } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+-              q->limits.discard_granularity = 0;
+-              q->limits.discard_alignment = 0;
+-              blk_queue_max_discard_sectors(q, 0);
+-              blk_queue_max_write_zeroes_sectors(q, 0);
++              max_discard_sectors = 0;
++              granularity = 0;
+       } else {
+-              q->limits.discard_granularity = inode->i_sb->s_blocksize;
+-              q->limits.discard_alignment = 0;
+-
+-              blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+-              blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
++              max_discard_sectors = UINT_MAX >> 9;
++              granularity = inode->i_sb->s_blocksize;
+       }
+-      if (q->limits.max_write_zeroes_sectors)
++      if (max_discard_sectors) {
++              q->limits.discard_granularity = granularity;
++              blk_queue_max_discard_sectors(q, max_discard_sectors);
++              blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+-      else
++      } else {
++              q->limits.discard_granularity = 0;
++              blk_queue_max_discard_sectors(q, 0);
++              blk_queue_max_write_zeroes_sectors(q, 0);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
++      }
++      q->limits.discard_alignment = 0;
+ }
+ static void loop_unprepare_queue(struct loop_device *lo)
diff --git a/queue-5.8/btrfs-check-the-right-error-variable-in-btrfs_del_dir_entries_in_log.patch b/queue-5.8/btrfs-check-the-right-error-variable-in-btrfs_del_dir_entries_in_log.patch
new file mode 100644 (file)
index 0000000..f5f85c0
--- /dev/null
@@ -0,0 +1,55 @@
+From fb2fecbad50964b9f27a3b182e74e437b40753ef Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 10 Aug 2020 17:31:16 -0400
+Subject: btrfs: check the right error variable in btrfs_del_dir_entries_in_log
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit fb2fecbad50964b9f27a3b182e74e437b40753ef upstream.
+
+With my new locking code dbench is so much faster that I tripped over a
+transaction abort from ENOSPC.  This turned out to be because
+btrfs_del_dir_entries_in_log was checking for ret == -ENOSPC, but this
+function sets err on error, and returns err.  So instead of properly
+marking the inode as needing a full commit, we were returning -ENOSPC
+and aborting in __btrfs_unlink_inode.  Fix this by checking the proper
+variable so that we return the correct thing in the case of ENOSPC.
+
+The ENOENT needs to be checked, because btrfs_lookup_dir_item_index()
+can return -ENOENT if the dir item isn't in the tree log (which would
+happen if we hadn't fsync'ed this guy).  We actually handle that case in
+__btrfs_unlink_inode, so it's an expected error to get back.
+
+Fixes: 4a500fd178c8 ("Btrfs: Metadata ENOSPC handling for tree log")
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+[ add note and comment about ENOENT ]
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3449,11 +3449,13 @@ fail:
+       btrfs_free_path(path);
+ out_unlock:
+       mutex_unlock(&dir->log_mutex);
+-      if (ret == -ENOSPC) {
++      if (err == -ENOSPC) {
+               btrfs_set_log_full_commit(trans);
+-              ret = 0;
+-      } else if (ret < 0)
+-              btrfs_abort_transaction(trans, ret);
++              err = 0;
++      } else if (err < 0 && err != -ENOENT) {
++              /* ENOENT can be returned if the entry hasn't been fsynced yet */
++              btrfs_abort_transaction(trans, err);
++      }
+       btrfs_end_log_trans(root);
diff --git a/queue-5.8/btrfs-detect-nocow-for-swap-after-snapshot-delete.patch b/queue-5.8/btrfs-detect-nocow-for-swap-after-snapshot-delete.patch
new file mode 100644 (file)
index 0000000..41d33e8
--- /dev/null
@@ -0,0 +1,181 @@
+From a84d5d429f9eb56f81b388609841ed993f0ddfca Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Tue, 18 Aug 2020 11:00:05 -0700
+Subject: btrfs: detect nocow for swap after snapshot delete
+
+From: Boris Burkov <boris@bur.io>
+
+commit a84d5d429f9eb56f81b388609841ed993f0ddfca upstream.
+
+can_nocow_extent and btrfs_cross_ref_exist both rely on a heuristic for
+detecting a must cow condition which is not exactly accurate, but saves
+unnecessary tree traversal. The incorrect assumption is that if the
+extent was created in a generation smaller than the last snapshot
+generation, it must be referenced by that snapshot. That is true, except
+the snapshot could have since been deleted, without affecting the last
+snapshot generation.
+
+The original patch claimed a performance win from this check, but it
+also leads to a bug where you are unable to use a swapfile if you ever
+snapshotted the subvolume it's in. Make the check slower and more strict
+for the swapon case, without modifying the general cow checks as a
+compromise. Turning swap on does not seem to be a particularly
+performance sensitive operation, so incurring a possibly unnecessary
+btrfs_search_slot seems worthwhile for the added usability.
+
+Note: Until the snapshot is competely cleaned after deletion,
+check_committed_refs will still cause the logic to think that cow is
+necessary, so the user must until 'btrfs subvolu sync' finished before
+activating the swapfile swapon.
+
+CC: stable@vger.kernel.org # 5.4+
+Suggested-by: Omar Sandoval <osandov@osandov.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ctree.h       |    4 ++--
+ fs/btrfs/extent-tree.c |   17 +++++++++++------
+ fs/btrfs/file.c        |    2 +-
+ fs/btrfs/inode.c       |   16 +++++++++-------
+ 4 files changed, 23 insertions(+), 16 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2468,7 +2468,7 @@ int btrfs_pin_extent_for_log_replay(stru
+                                   u64 bytenr, u64 num_bytes);
+ int btrfs_exclude_logged_extents(struct extent_buffer *eb);
+ int btrfs_cross_ref_exist(struct btrfs_root *root,
+-                        u64 objectid, u64 offset, u64 bytenr);
++                        u64 objectid, u64 offset, u64 bytenr, bool strict);
+ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+                                            struct btrfs_root *root,
+                                            u64 parent, u64 root_objectid,
+@@ -2854,7 +2854,7 @@ struct extent_map *btrfs_get_extent_fiem
+                                          u64 start, u64 len);
+ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+                             u64 *orig_start, u64 *orig_block_len,
+-                            u64 *ram_bytes);
++                            u64 *ram_bytes, bool strict);
+ void __btrfs_del_delalloc_inode(struct btrfs_root *root,
+                               struct btrfs_inode *inode);
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2306,7 +2306,8 @@ static noinline int check_delayed_ref(st
+ static noinline int check_committed_ref(struct btrfs_root *root,
+                                       struct btrfs_path *path,
+-                                      u64 objectid, u64 offset, u64 bytenr)
++                                      u64 objectid, u64 offset, u64 bytenr,
++                                      bool strict)
+ {
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_root *extent_root = fs_info->extent_root;
+@@ -2348,9 +2349,13 @@ static noinline int check_committed_ref(
+           btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
+               goto out;
+-      /* If extent created before last snapshot => it's definitely shared */
+-      if (btrfs_extent_generation(leaf, ei) <=
+-          btrfs_root_last_snapshot(&root->root_item))
++      /*
++       * If extent created before last snapshot => it's shared unless the
++       * snapshot has been deleted. Use the heuristic if strict is false.
++       */
++      if (!strict &&
++          (btrfs_extent_generation(leaf, ei) <=
++           btrfs_root_last_snapshot(&root->root_item)))
+               goto out;
+       iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+@@ -2375,7 +2380,7 @@ out:
+ }
+ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
+-                        u64 bytenr)
++                        u64 bytenr, bool strict)
+ {
+       struct btrfs_path *path;
+       int ret;
+@@ -2386,7 +2391,7 @@ int btrfs_cross_ref_exist(struct btrfs_r
+       do {
+               ret = check_committed_ref(root, path, objectid,
+-                                        offset, bytenr);
++                                        offset, bytenr, strict);
+               if (ret && ret != -ENOENT)
+                       goto out;
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1568,7 +1568,7 @@ int btrfs_check_can_nocow(struct btrfs_i
+       }
+       ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
+-                      NULL, NULL, NULL);
++                      NULL, NULL, NULL, false);
+       if (ret <= 0) {
+               ret = 0;
+               if (!nowait)
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1611,7 +1611,7 @@ next_slot:
+                               goto out_check;
+                       ret = btrfs_cross_ref_exist(root, ino,
+                                                   found_key.offset -
+-                                                  extent_offset, disk_bytenr);
++                                                  extent_offset, disk_bytenr, false);
+                       if (ret) {
+                               /*
+                                * ret could be -EIO if the above fails to read
+@@ -6957,7 +6957,7 @@ static struct extent_map *btrfs_new_exte
+  */
+ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+                             u64 *orig_start, u64 *orig_block_len,
+-                            u64 *ram_bytes)
++                            u64 *ram_bytes, bool strict)
+ {
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+       struct btrfs_path *path;
+@@ -7035,8 +7035,9 @@ noinline int can_nocow_extent(struct ino
+        * Do the same check as in btrfs_cross_ref_exist but without the
+        * unnecessary search.
+        */
+-      if (btrfs_file_extent_generation(leaf, fi) <=
+-          btrfs_root_last_snapshot(&root->root_item))
++      if (!strict &&
++          (btrfs_file_extent_generation(leaf, fi) <=
++           btrfs_root_last_snapshot(&root->root_item)))
+               goto out;
+       backref_offset = btrfs_file_extent_offset(leaf, fi);
+@@ -7072,7 +7073,8 @@ noinline int can_nocow_extent(struct ino
+        */
+       ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
+-                                  key.offset - backref_offset, disk_bytenr);
++                                  key.offset - backref_offset, disk_bytenr,
++                                  strict);
+       if (ret) {
+               ret = 0;
+               goto out;
+@@ -7293,7 +7295,7 @@ static int btrfs_get_blocks_direct_write
+               block_start = em->block_start + (start - em->start);
+               if (can_nocow_extent(inode, start, &len, &orig_start,
+-                                   &orig_block_len, &ram_bytes) == 1 &&
++                                   &orig_block_len, &ram_bytes, false) == 1 &&
+                   btrfs_inc_nocow_writers(fs_info, block_start)) {
+                       struct extent_map *em2;
+@@ -10103,7 +10105,7 @@ static int btrfs_swap_activate(struct sw
+               free_extent_map(em);
+               em = NULL;
+-              ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
++              ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
+               if (ret < 0) {
+                       goto out;
+               } else if (ret) {
diff --git a/queue-5.8/btrfs-fix-space-cache-memory-leak-after-transaction-abort.patch b/queue-5.8/btrfs-fix-space-cache-memory-leak-after-transaction-abort.patch
new file mode 100644 (file)
index 0000000..cceeb7e
--- /dev/null
@@ -0,0 +1,125 @@
+From bbc37d6e475eee8ffa2156ec813efc6bbb43c06d Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 14 Aug 2020 11:04:09 +0100
+Subject: btrfs: fix space cache memory leak after transaction abort
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit bbc37d6e475eee8ffa2156ec813efc6bbb43c06d upstream.
+
+If a transaction aborts it can cause a memory leak of the pages array of
+a block group's io_ctl structure. The following steps explain how that can
+happen:
+
+1) Transaction N is committing, currently in state TRANS_STATE_UNBLOCKED
+   and it's about to start writing out dirty extent buffers;
+
+2) Transaction N + 1 already started and another task, task A, just called
+   btrfs_commit_transaction() on it;
+
+3) Block group B was dirtied (extents allocated from it) by transaction
+   N + 1, so when task A calls btrfs_start_dirty_block_groups(), at the
+   very beginning of the transaction commit, it starts writeback for the
+   block group's space cache by calling btrfs_write_out_cache(), which
+   allocates the pages array for the block group's io_ctl with a call to
+   io_ctl_init(). Block group A is added to the io_list of transaction
+   N + 1 by btrfs_start_dirty_block_groups();
+
+4) While transaction N's commit is writing out the extent buffers, it gets
+   an IO error and aborts transaction N, also setting the file system to
+   RO mode;
+
+5) Task A has already returned from btrfs_start_dirty_block_groups(), is at
+   btrfs_commit_transaction() and has set transaction N + 1 state to
+   TRANS_STATE_COMMIT_START. Immediately after that it checks that the
+   filesystem was turned to RO mode, due to transaction N's abort, and
+   jumps to the "cleanup_transaction" label. After that we end up at
+   btrfs_cleanup_one_transaction() which calls btrfs_cleanup_dirty_bgs().
+   That helper finds block group B in the transaction's io_list but it
+   never releases the pages array of the block group's io_ctl, resulting in
+   a memory leak.
+
+In fact at the point when we are at btrfs_cleanup_dirty_bgs(), the pages
+array points to pages that were already released by us at
+__btrfs_write_out_cache() through the call to io_ctl_drop_pages(). We end
+up freeing the pages array only after waiting for the ordered extent to
+complete through btrfs_wait_cache_io(), which calls io_ctl_free() to do
+that. But in the transaction abort case we don't wait for the space cache's
+ordered extent to complete through a call to btrfs_wait_cache_io(), so
+that's why we end up with a memory leak - we wait for the ordered extent
+to complete indirectly by shutting down the work queues and waiting for
+any jobs in them to complete before returning from close_ctree().
+
+We can solve the leak simply by freeing the pages array right after
+releasing the pages (with the call to io_ctl_drop_pages()) at
+__btrfs_write_out_cache(), since we will never use it anymore after that
+and the pages array points to already released pages at that point, which
+is currently not a problem since no one will use it after that, but not a
+good practice anyway since it can easily lead to use-after-free issues.
+
+So fix this by freeing the pages array right after releasing the pages at
+__btrfs_write_out_cache().
+
+This issue can often be reproduced with test case generic/475 from fstests
+and kmemleak can detect it and reports it with the following trace:
+
+unreferenced object 0xffff9bbf009fa600 (size 512):
+  comm "fsstress", pid 38807, jiffies 4298504428 (age 22.028s)
+  hex dump (first 32 bytes):
+    00 a0 7c 4d 3d ed ff ff 40 a0 7c 4d 3d ed ff ff  ..|M=...@.|M=...
+    80 a0 7c 4d 3d ed ff ff c0 a0 7c 4d 3d ed ff ff  ..|M=.....|M=...
+  backtrace:
+    [<00000000f4b5cfe2>] __kmalloc+0x1a8/0x3e0
+    [<0000000028665e7f>] io_ctl_init+0xa7/0x120 [btrfs]
+    [<00000000a1f95b2d>] __btrfs_write_out_cache+0x86/0x4a0 [btrfs]
+    [<00000000207ea1b0>] btrfs_write_out_cache+0x7f/0xf0 [btrfs]
+    [<00000000af21f534>] btrfs_start_dirty_block_groups+0x27b/0x580 [btrfs]
+    [<00000000c3c23d44>] btrfs_commit_transaction+0xa6f/0xe70 [btrfs]
+    [<000000009588930c>] create_subvol+0x581/0x9a0 [btrfs]
+    [<000000009ef2fd7f>] btrfs_mksubvol+0x3fb/0x4a0 [btrfs]
+    [<00000000474e5187>] __btrfs_ioctl_snap_create+0x119/0x1a0 [btrfs]
+    [<00000000708ee349>] btrfs_ioctl_snap_create_v2+0xb0/0xf0 [btrfs]
+    [<00000000ea60106f>] btrfs_ioctl+0x12c/0x3130 [btrfs]
+    [<000000005c923d6d>] __x64_sys_ioctl+0x83/0xb0
+    [<0000000043ace2c9>] do_syscall_64+0x33/0x80
+    [<00000000904efbce>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+CC: stable@vger.kernel.org # 4.9+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c          |    1 +
+ fs/btrfs/free-space-cache.c |    2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4574,6 +4574,7 @@ static void btrfs_cleanup_bg_io(struct b
+               cache->io_ctl.inode = NULL;
+               iput(inode);
+       }
++      ASSERT(cache->io_ctl.pages == NULL);
+       btrfs_put_block_group(cache);
+ }
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1186,7 +1186,6 @@ static int __btrfs_wait_cache_io(struct
+       ret = update_cache_item(trans, root, inode, path, offset,
+                               io_ctl->entries, io_ctl->bitmaps);
+ out:
+-      io_ctl_free(io_ctl);
+       if (ret) {
+               invalidate_inode_pages2(inode->i_mapping);
+               BTRFS_I(inode)->generation = 0;
+@@ -1346,6 +1345,7 @@ static int __btrfs_write_out_cache(struc
+        * them out later
+        */
+       io_ctl_drop_pages(io_ctl);
++      io_ctl_free(io_ctl);
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+                            i_size_read(inode) - 1, &cached_state);
diff --git a/queue-5.8/btrfs-reset-compression-level-for-lzo-on-remount.patch b/queue-5.8/btrfs-reset-compression-level-for-lzo-on-remount.patch
new file mode 100644 (file)
index 0000000..11d4535
--- /dev/null
@@ -0,0 +1,48 @@
+From 282dd7d7718444679b046b769d872b188818ca35 Mon Sep 17 00:00:00 2001
+From: Marcos Paulo de Souza <mpdesouza@suse.com>
+Date: Mon, 3 Aug 2020 16:55:01 -0300
+Subject: btrfs: reset compression level for lzo on remount
+
+From: Marcos Paulo de Souza <mpdesouza@suse.com>
+
+commit 282dd7d7718444679b046b769d872b188818ca35 upstream.
+
+Currently a user can set mount "-o compress" which will set the
+compression algorithm to zlib, and use the default compress level for
+zlib (3):
+
+  relatime,compress=zlib:3,space_cache
+
+If the user remounts the fs using "-o compress=lzo", then the old
+compress_level is used:
+
+  relatime,compress=lzo:3,space_cache
+
+But lzo does not expose any tunable compression level. The same happens
+if we set any compress argument with different level, also with zstd.
+
+Fix this by resetting the compress_level when compress=lzo is
+specified.  With the fix applied, lzo is shown without compress level:
+
+  relatime,compress=lzo,space_cache
+
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Marcos Paulo de Souza <mpdesouza@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/super.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -558,6 +558,7 @@ int btrfs_parse_options(struct btrfs_fs_
+                       } else if (strncmp(args[0].from, "lzo", 3) == 0) {
+                               compress_type = "lzo";
+                               info->compress_type = BTRFS_COMPRESS_LZO;
++                              info->compress_level = 0;
+                               btrfs_set_opt(info->mount_opt, COMPRESS);
+                               btrfs_clear_opt(info->mount_opt, NODATACOW);
+                               btrfs_clear_opt(info->mount_opt, NODATASUM);
diff --git a/queue-5.8/fbcon-prevent-user-font-height-or-width-change-from-causing-potential-out-of-bounds-access.patch b/queue-5.8/fbcon-prevent-user-font-height-or-width-change-from-causing-potential-out-of-bounds-access.patch
new file mode 100644 (file)
index 0000000..608e7eb
--- /dev/null
@@ -0,0 +1,79 @@
+From 39b3cffb8cf3111738ea993e2757ab382253d86a Mon Sep 17 00:00:00 2001
+From: George Kennedy <george.kennedy@oracle.com>
+Date: Fri, 31 Jul 2020 12:33:11 -0400
+Subject: fbcon: prevent user font height or width change from causing potential out-of-bounds access
+
+From: George Kennedy <george.kennedy@oracle.com>
+
+commit 39b3cffb8cf3111738ea993e2757ab382253d86a upstream.
+
+Add a check to fbcon_resize() to ensure that a possible change to user font
+height or user font width will not allow a font data out-of-bounds access.
+NOTE: must use original charcount in calculation as font charcount can
+change and cannot be used to determine the font data allocated size.
+
+Signed-off-by: George Kennedy <george.kennedy@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Reported-by: syzbot+38a3699c7eaf165b97a6@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/1596213192-6635-1-git-send-email-george.kennedy@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/core/fbcon.c |   25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2191,6 +2191,9 @@ static void updatescrollmode(struct fbco
+       }
+ }
++#define PITCH(w) (((w) + 7) >> 3)
++#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
++
+ static int fbcon_resize(struct vc_data *vc, unsigned int width, 
+                       unsigned int height, unsigned int user)
+ {
+@@ -2200,6 +2203,24 @@ static int fbcon_resize(struct vc_data *
+       struct fb_var_screeninfo var = info->var;
+       int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
++      if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
++              int size;
++              int pitch = PITCH(vc->vc_font.width);
++
++              /*
++               * If user font, ensure that a possible change to user font
++               * height or width will not allow a font data out-of-bounds access.
++               * NOTE: must use original charcount in calculation as font
++               * charcount can change and cannot be used to determine the
++               * font data allocated size.
++               */
++              if (pitch <= 0)
++                      return -EINVAL;
++              size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
++              if (size > FNTSIZE(vc->vc_font.data))
++                      return -EINVAL;
++      }
++
+       virt_w = FBCON_SWAP(ops->rotate, width, height);
+       virt_h = FBCON_SWAP(ops->rotate, height, width);
+       virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
+@@ -2652,7 +2673,7 @@ static int fbcon_set_font(struct vc_data
+       int size;
+       int i, csum;
+       u8 *new_data, *data = font->data;
+-      int pitch = (font->width+7) >> 3;
++      int pitch = PITCH(font->width);
+       /* Is there a reason why fbconsole couldn't handle any charcount >256?
+        * If not this check should be changed to charcount < 256 */
+@@ -2668,7 +2689,7 @@ static int fbcon_set_font(struct vc_data
+       if (fbcon_invalid_charcount(info, charcount))
+               return -EINVAL;
+-      size = h * pitch * charcount;
++      size = CALC_FONTSZ(h, pitch, charcount);
+       new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
diff --git a/queue-5.8/hid-i2c-hid-always-sleep-60ms-after-i2c_hid_pwr_on-commands.patch b/queue-5.8/hid-i2c-hid-always-sleep-60ms-after-i2c_hid_pwr_on-commands.patch
new file mode 100644 (file)
index 0000000..c0cd7fc
--- /dev/null
@@ -0,0 +1,93 @@
+From eef4016243e94c438f177ca8226876eb873b9c75 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 11 Aug 2020 15:39:58 +0200
+Subject: HID: i2c-hid: Always sleep 60ms after I2C_HID_PWR_ON commands
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit eef4016243e94c438f177ca8226876eb873b9c75 upstream.
+
+Before this commit i2c_hid_parse() consists of the following steps:
+
+1. Send power on cmd
+2. usleep_range(1000, 5000)
+3. Send reset cmd
+4. Wait for reset to complete (device interrupt, or msleep(100))
+5. Send power on cmd
+6. Try to read HID descriptor
+
+Notice how there is an usleep_range(1000, 5000) after the first power-on
+command, but not after the second power-on command.
+
+Testing has shown that at least on the BMAX Y13 laptop's i2c-hid touchpad,
+not having a delay after the second power-on command causes the HID
+descriptor to read as all zeros.
+
+In case we hit this on other devices too, the descriptor being all zeros
+can be recognized by the following message being logged many, many times:
+
+hid-generic 0018:0911:5288.0002: unknown main item tag 0x0
+
+At the same time as the BMAX Y13's touchpad issue was debugged,
+Kai-Heng was working on debugging some issues with Goodix i2c-hid
+touchpads. It turns out that these need a delay after a PWR_ON command
+too, otherwise they stop working after a suspend/resume cycle.
+According to Goodix a delay of minimal 60ms is needed.
+
+Having multiple cases where we need a delay after sending the power-on
+command, seems to indicate that we should always sleep after the power-on
+command.
+
+This commit fixes the mentioned issues by moving the existing 1ms sleep to
+the i2c_hid_set_power() function and changing it to a 60ms sleep.
+
+Cc: stable@vger.kernel.org
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208247
+Reported-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Reported-and-tested-by: Andrea Borgia <andrea@borgia.bo.it>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/i2c-hid/i2c-hid-core.c |   22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -420,6 +420,19 @@ static int i2c_hid_set_power(struct i2c_
+               dev_err(&client->dev, "failed to change power setting.\n");
+ set_pwr_exit:
++
++      /*
++       * The HID over I2C specification states that if a DEVICE needs time
++       * after the PWR_ON request, it should utilise CLOCK stretching.
++       * However, it has been observered that the Windows driver provides a
++       * 1ms sleep between the PWR_ON and RESET requests.
++       * According to Goodix Windows even waits 60 ms after (other?)
++       * PWR_ON requests. Testing has confirmed that several devices
++       * will not work properly without a delay after a PWR_ON request.
++       */
++      if (!ret && power_state == I2C_HID_PWR_ON)
++              msleep(60);
++
+       return ret;
+ }
+@@ -441,15 +454,6 @@ static int i2c_hid_hwreset(struct i2c_cl
+       if (ret)
+               goto out_unlock;
+-      /*
+-       * The HID over I2C specification states that if a DEVICE needs time
+-       * after the PWR_ON request, it should utilise CLOCK stretching.
+-       * However, it has been observered that the Windows driver provides a
+-       * 1ms sleep between the PWR_ON and RESET requests and that some devices
+-       * rely on this.
+-       */
+-      usleep_range(1000, 5000);
+-
+       i2c_hid_dbg(ihid, "resetting...\n");
+       ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/queue-5.8/hwmon-gsc-hwmon-scale-temperature-to-millidegrees.patch b/queue-5.8/hwmon-gsc-hwmon-scale-temperature-to-millidegrees.patch
new file mode 100644 (file)
index 0000000..111141f
--- /dev/null
@@ -0,0 +1,33 @@
+From c1ae18d313e24bc7833e1749dd36dba5d47f259c Mon Sep 17 00:00:00 2001
+From: Tim Harvey <tharvey@gateworks.com>
+Date: Thu, 27 Aug 2020 10:20:24 -0700
+Subject: hwmon: (gsc-hwmon) Scale temperature to millidegrees
+
+From: Tim Harvey <tharvey@gateworks.com>
+
+commit c1ae18d313e24bc7833e1749dd36dba5d47f259c upstream.
+
+The GSC registers report temperature in decidegrees celcius so we
+need to scale it to represent the hwmon sysfs API of millidegrees.
+
+Cc: stable@vger.kernel.org
+Fixes: 3bce5377ef66 ("hwmon: Add Gateworks System Controller support")
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+Link: https://lore.kernel.org/r/1598548824-16898-1-git-send-email-tharvey@gateworks.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/gsc-hwmon.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/hwmon/gsc-hwmon.c
++++ b/drivers/hwmon/gsc-hwmon.c
+@@ -172,6 +172,7 @@ gsc_hwmon_read(struct device *dev, enum
+       case mode_temperature:
+               if (tmp > 0x8000)
+                       tmp -= 0xffff;
++              tmp *= 100; /* convert to millidegrees celsius */
+               break;
+       case mode_voltage_raw:
+               tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
index f1ca8cf5753d0461f943e6c9b3369eb0592efa0f..4005e32e1dc77ec8ba910313e814ebe5bc3bad7a 100644 (file)
@@ -164,3 +164,13 @@ libbpf-fix-map-index-used-in-error-message.patch
 bpf-selftests-global_funcs-check-err_str-before-strs.patch
 arm64-move-handling-of-erratum-1418040-into-c-code.patch
 arm64-allow-booting-of-late-cpus-affected-by-erratum.patch
+hwmon-gsc-hwmon-scale-temperature-to-millidegrees.patch
+block-fix-get_max_io_size.patch
+block-loop-set-discard-granularity-and-alignment-for-block-device-backed-loop.patch
+hid-i2c-hid-always-sleep-60ms-after-i2c_hid_pwr_on-commands.patch
+blk-mq-order-adding-requests-to-hctx-dispatch-and-checking-sched_restart.patch
+btrfs-reset-compression-level-for-lzo-on-remount.patch
+btrfs-check-the-right-error-variable-in-btrfs_del_dir_entries_in_log.patch
+btrfs-fix-space-cache-memory-leak-after-transaction-abort.patch
+btrfs-detect-nocow-for-swap-after-snapshot-delete.patch
+fbcon-prevent-user-font-height-or-width-change-from-causing-potential-out-of-bounds-access.patch