]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2018 08:03:11 +0000 (10:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2018 08:03:11 +0000 (10:03 +0200)
added patches:
blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch
block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch
block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch
block-reset-bi_iter.bi_done-after-splitting-bio.patch
ext4-check-for-allocation-block-validity-with-block-group-locked.patch
ext4-fix-check-to-prevent-initializing-reserved-inodes.patch
ext4-fix-false-negatives-and-false-positives-in-ext4_check_descriptors.patch
ext4-fix-inline-data-updates-with-checksums-enabled.patch
gpio-of-handle-fixed-regulator-flags-properly.patch
gpio-uniphier-set-legitimate-irq-trigger-type-in-.to_irq-hook.patch
i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch
nvmet-fc-fix-target-sgl-list-on-large-transfers.patch
random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
squashfs-be-more-careful-about-metadata-corruption.patch

15 files changed:
queue-4.17/blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch [new file with mode: 0644]
queue-4.17/block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch [new file with mode: 0644]
queue-4.17/block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch [new file with mode: 0644]
queue-4.17/block-reset-bi_iter.bi_done-after-splitting-bio.patch [new file with mode: 0644]
queue-4.17/ext4-check-for-allocation-block-validity-with-block-group-locked.patch [new file with mode: 0644]
queue-4.17/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch [new file with mode: 0644]
queue-4.17/ext4-fix-false-negatives-and-false-positives-in-ext4_check_descriptors.patch [new file with mode: 0644]
queue-4.17/ext4-fix-inline-data-updates-with-checksums-enabled.patch [new file with mode: 0644]
queue-4.17/gpio-of-handle-fixed-regulator-flags-properly.patch [new file with mode: 0644]
queue-4.17/gpio-uniphier-set-legitimate-irq-trigger-type-in-.to_irq-hook.patch [new file with mode: 0644]
queue-4.17/i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch [new file with mode: 0644]
queue-4.17/nvmet-fc-fix-target-sgl-list-on-large-transfers.patch [new file with mode: 0644]
queue-4.17/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch [new file with mode: 0644]
queue-4.17/series
queue-4.17/squashfs-be-more-careful-about-metadata-corruption.patch [new file with mode: 0644]

diff --git a/queue-4.17/blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch b/queue-4.17/blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch
new file mode 100644 (file)
index 0000000..0d0351b
--- /dev/null
@@ -0,0 +1,49 @@
+From 9362dd1109f87a9d0a798fbc890cb339c171ed35 Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:08 +0200
+Subject: blkdev: __blkdev_direct_IO_simple: fix leak in error case
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit 9362dd1109f87a9d0a798fbc890cb339c171ed35 upstream.
+
+Fixes: 72ecad22d9f1 ("block: support a full bio worth of IO for simplified bdev direct-io")
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/block_dev.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -219,7 +219,7 @@ __blkdev_direct_IO_simple(struct kiocb *
+       ret = bio_iov_iter_get_pages(&bio, iter);
+       if (unlikely(ret))
+-              return ret;
++              goto out;
+       ret = bio.bi_iter.bi_size;
+       if (iov_iter_rw(iter) == READ) {
+@@ -248,12 +248,13 @@ __blkdev_direct_IO_simple(struct kiocb *
+               put_page(bvec->bv_page);
+       }
+-      if (vecs != inline_vecs)
+-              kfree(vecs);
+-
+       if (unlikely(bio.bi_status))
+               ret = blk_status_to_errno(bio.bi_status);
++out:
++      if (vecs != inline_vecs)
++              kfree(vecs);
++
+       bio_uninit(&bio);
+       return ret;
diff --git a/queue-4.17/block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch b/queue-4.17/block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch
new file mode 100644 (file)
index 0000000..2d49dd1
--- /dev/null
@@ -0,0 +1,76 @@
+From b403ea2404889e1227812fa9657667a1deb9c694 Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:07 +0200
+Subject: block: bio_iov_iter_get_pages: fix size of last iovec
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit b403ea2404889e1227812fa9657667a1deb9c694 upstream.
+
+If the last page of the bio is not "full", the length of the last
+vector slot needs to be corrected. This slot has the index
+(bio->bi_vcnt - 1), but only in bio->bi_io_vec. In the "bv" helper
+array, which is shifted by the value of bio->bi_vcnt at function
+invocation, the correct index is (nr_pages - 1).
+
+v2: improved readability following suggestions from Ming Lei.
+v3: followed a formatting suggestion from Christoph Hellwig.
+
+Fixes: 2cefe4dbaadf ("block: add bio_iov_iter_get_pages()")
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |   18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -881,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page);
+  */
+ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+-      unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
++      unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+       struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+       struct page **pages = (struct page **)bv;
+-      size_t offset, diff;
++      size_t offset;
+       ssize_t size;
+       size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
+       if (unlikely(size <= 0))
+               return size ? size : -EFAULT;
+-      nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
++      idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       /*
+        * Deep magic below:  We need to walk the pinned pages backwards
+@@ -903,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *b
+       bio->bi_iter.bi_size += size;
+       bio->bi_vcnt += nr_pages;
+-      diff = (nr_pages * PAGE_SIZE - offset) - size;
+-      while (nr_pages--) {
+-              bv[nr_pages].bv_page = pages[nr_pages];
+-              bv[nr_pages].bv_len = PAGE_SIZE;
+-              bv[nr_pages].bv_offset = 0;
++      while (idx--) {
++              bv[idx].bv_page = pages[idx];
++              bv[idx].bv_len = PAGE_SIZE;
++              bv[idx].bv_offset = 0;
+       }
+       bv[0].bv_offset += offset;
+       bv[0].bv_len -= offset;
+-      if (diff)
+-              bv[bio->bi_vcnt - 1].bv_len -= diff;
++      bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
+       iov_iter_advance(iter, size);
+       return 0;
diff --git a/queue-4.17/block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch b/queue-4.17/block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch
new file mode 100644 (file)
index 0000000..c5de12d
--- /dev/null
@@ -0,0 +1,96 @@
+From 17d51b10d7773e4618bcac64648f30f12d4078fb Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:09 +0200
+Subject: block: bio_iov_iter_get_pages: pin more pages for multi-segment IOs
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit 17d51b10d7773e4618bcac64648f30f12d4078fb upstream.
+
+bio_iov_iter_get_pages() currently only adds pages for the next non-zero
+segment from the iov_iter to the bio. That's suboptimal for callers,
+which typically try to pin as many pages as fit into the bio. This patch
+converts the current bio_iov_iter_get_pages() into a static helper, and
+introduces a new helper that allocates as many pages as
+
+ 1) fit into the bio,
+ 2) are present in the iov_iter,
+ 3) and can be pinned by MM.
+
+Error is returned only if zero pages could be pinned. Because of 3), a
+zero return value doesn't necessarily mean all pages have been pinned.
+Callers that have to pin every page in the iov_iter must still call this
+function in a loop (this is currently the case).
+
+This change matters most for __blkdev_direct_IO_simple(), which calls
+bio_iov_iter_get_pages() only once. If it obtains less pages than
+requested, it returns a "short write" or "short read", and
+__generic_file_write_iter() falls back to buffered writes, which may
+lead to data corruption.
+
+Fixes: 72ecad22d9f1 ("block: support a full bio worth of IO for simplified bdev direct-io")
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |   35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -872,14 +872,16 @@ done:
+ EXPORT_SYMBOL(bio_add_page);
+ /**
+- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+  * @bio: bio to add pages to
+  * @iter: iov iterator describing the region to be mapped
+  *
+- * Pins as many pages from *iter and appends them to @bio's bvec array. The
++ * Pins pages from *iter and appends them to @bio's bvec array. The
+  * pages will have to be released using put_page() when done.
++ * For multi-segment *iter, this function only adds pages from the
++ * the next non-empty segment of the iov iterator.
+  */
+-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+       struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+@@ -916,6 +918,33 @@ int bio_iov_iter_get_pages(struct bio *b
+       iov_iter_advance(iter, size);
+       return 0;
+ }
++
++/**
++ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * @bio: bio to add pages to
++ * @iter: iov iterator describing the region to be mapped
++ *
++ * Pins pages from *iter and appends them to @bio's bvec array. The
++ * pages will have to be released using put_page() when done.
++ * The function tries, but does not guarantee, to pin as many pages as
++ * fit into the bio, or are requested in *iter, whatever is smaller.
++ * If MM encounters an error pinning the requested pages, it stops.
++ * Error is returned only if 0 pages could be pinned.
++ */
++int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++{
++      unsigned short orig_vcnt = bio->bi_vcnt;
++
++      do {
++              int ret = __bio_iov_iter_get_pages(bio, iter);
++
++              if (unlikely(ret))
++                      return bio->bi_vcnt > orig_vcnt ? 0 : ret;
++
++      } while (iov_iter_count(iter) && !bio_full(bio));
++
++      return 0;
++}
+ EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+ static void submit_bio_wait_endio(struct bio *bio)
diff --git a/queue-4.17/block-reset-bi_iter.bi_done-after-splitting-bio.patch b/queue-4.17/block-reset-bi_iter.bi_done-after-splitting-bio.patch
new file mode 100644 (file)
index 0000000..6ce6f0e
--- /dev/null
@@ -0,0 +1,34 @@
+From 5151842b9d8732d4cbfa8400b40bff894f501b2f Mon Sep 17 00:00:00 2001
+From: Greg Edwards <gedwards@ddn.com>
+Date: Thu, 26 Jul 2018 14:39:37 -0400
+Subject: block: reset bi_iter.bi_done after splitting bio
+
+From: Greg Edwards <gedwards@ddn.com>
+
+commit 5151842b9d8732d4cbfa8400b40bff894f501b2f upstream.
+
+After the bio has been updated to represent the remaining sectors, reset
+bi_done so bio_rewind_iter() does not rewind further than it should.
+
+This resolves a bio_integrity_process() failure on reads where the
+original request was split.
+
+Fixes: 63573e359d05 ("bio-integrity: Restore original iterator on verify stage")
+Signed-off-by: Greg Edwards <gedwards@ddn.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1806,6 +1806,7 @@ struct bio *bio_split(struct bio *bio, i
+               bio_integrity_trim(split);
+       bio_advance(bio, split->bi_iter.bi_size);
++      bio->bi_iter.bi_done = 0;
+       if (bio_flagged(bio, BIO_TRACE_COMPLETION))
+               bio_set_flag(split, BIO_TRACE_COMPLETION);
diff --git a/queue-4.17/ext4-check-for-allocation-block-validity-with-block-group-locked.patch b/queue-4.17/ext4-check-for-allocation-block-validity-with-block-group-locked.patch
new file mode 100644 (file)
index 0000000..26d8c2d
--- /dev/null
@@ -0,0 +1,69 @@
+From 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 12 Jul 2018 19:08:05 -0400
+Subject: ext4: check for allocation block validity with block group locked
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef upstream.
+
+With commit 044e6e3d74a3: "ext4: don't update checksum of new
+initialized bitmaps" the buffer valid bit will get set without
+actually setting up the checksum for the allocation bitmap, since the
+checksum will get calculated once we actually allocate an inode or
+block.
+
+If we are doing this, then we need to (re-)check the verified bit
+after we take the block group lock.  Otherwise, we could race with
+another process reading and verifying the bitmap, which would then
+complain about the checksum being invalid.
+
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1780137
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/balloc.c |    3 +++
+ fs/ext4/ialloc.c |    3 +++
+ 2 files changed, 6 insertions(+)
+
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -379,6 +379,8 @@ static int ext4_validate_block_bitmap(st
+               return -EFSCORRUPTED;
+       ext4_lock_group(sb, block_group);
++      if (buffer_verified(bh))
++              goto verified;
+       if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+                       desc, bh))) {
+               ext4_unlock_group(sb, block_group);
+@@ -401,6 +403,7 @@ static int ext4_validate_block_bitmap(st
+               return -EFSCORRUPTED;
+       }
+       set_buffer_verified(bh);
++verified:
+       ext4_unlock_group(sb, block_group);
+       return 0;
+ }
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -91,6 +91,8 @@ static int ext4_validate_inode_bitmap(st
+               return -EFSCORRUPTED;
+       ext4_lock_group(sb, block_group);
++      if (buffer_verified(bh))
++              goto verified;
+       blk = ext4_inode_bitmap(sb, desc);
+       if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
+@@ -108,6 +110,7 @@ static int ext4_validate_inode_bitmap(st
+               return -EFSBADCRC;
+       }
+       set_buffer_verified(bh);
++verified:
+       ext4_unlock_group(sb, block_group);
+       return 0;
+ }
diff --git a/queue-4.17/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch b/queue-4.17/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch
new file mode 100644 (file)
index 0000000..e5b046a
--- /dev/null
@@ -0,0 +1,71 @@
+From 5012284700775a4e6e3fbe7eac4c543c4874b559 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 28 Jul 2018 08:12:04 -0400
+Subject: ext4: fix check to prevent initializing reserved inodes
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 5012284700775a4e6e3fbe7eac4c543c4874b559 upstream.
+
+Commit 8844618d8aa7: "ext4: only look at the bg_flags field if it is
+valid" will complain if block group zero does not have the
+EXT4_BG_INODE_ZEROED flag set.  Unfortunately, this is not correct,
+since a freshly created file system has this flag cleared.  It gets
+almost immediately after the file system is mounted read-write --- but
+the following somewhat unlikely sequence will end up triggering a
+false positive report of a corrupted file system:
+
+   mkfs.ext4 /dev/vdc
+   mount -o ro /dev/vdc /vdc
+   mount -o remount,rw /dev/vdc
+
+Instead, when initializing the inode table for block group zero, test
+to make sure that itable_unused count is not too large, since that is
+the case that will result in some or all of the reserved inodes
+getting cleared.
+
+This fixes the failures reported by Eric Whiteney when running
+generic/230 and generic/231 in the the nojournal test case.
+
+Fixes: 8844618d8aa7 ("ext4: only look at the bg_flags field if it is valid")
+Reported-by: Eric Whitney <enwlinux@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ialloc.c |    5 ++++-
+ fs/ext4/super.c  |    8 +-------
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1395,7 +1395,10 @@ int ext4_init_inode_table(struct super_b
+                           ext4_itable_unused_count(sb, gdp)),
+                           sbi->s_inodes_per_block);
+-      if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
++      if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
++          ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
++                             ext4_itable_unused_count(sb, gdp)) <
++                            EXT4_FIRST_INO(sb)))) {
+               ext4_error(sb, "Something is wrong with group %u: "
+                          "used itable blocks: %d; "
+                          "itable unused count: %u",
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3106,14 +3106,8 @@ static ext4_group_t ext4_has_uninit_itab
+               if (!gdp)
+                       continue;
+-              if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+-                      continue;
+-              if (group != 0)
++              if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+                       break;
+-              ext4_error(sb, "Inode table for bg 0 marked as "
+-                         "needing zeroing");
+-              if (sb_rdonly(sb))
+-                      return ngroups;
+       }
+       return group;
diff --git a/queue-4.17/ext4-fix-false-negatives-and-false-positives-in-ext4_check_descriptors.patch b/queue-4.17/ext4-fix-false-negatives-and-false-positives-in-ext4_check_descriptors.patch
new file mode 100644 (file)
index 0000000..5b61532
--- /dev/null
@@ -0,0 +1,56 @@
+From 44de022c4382541cebdd6de4465d1f4f465ff1dd Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sun, 8 Jul 2018 19:35:02 -0400
+Subject: ext4: fix false negatives *and* false positives in ext4_check_descriptors()
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 44de022c4382541cebdd6de4465d1f4f465ff1dd upstream.
+
+Ext4_check_descriptors() was getting called before s_gdb_count was
+initialized.  So for file systems w/o the meta_bg feature, allocation
+bitmaps could overlap the block group descriptors and ext4 wouldn't
+notice.
+
+For file systems with the meta_bg feature enabled, there was a
+fencepost error which would cause the ext4_check_descriptors() to
+incorrectly believe that the block allocation bitmap overlaps with the
+block group descriptor blocks, and it would reject the mount.
+
+Fix both of these problems.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2307,7 +2307,7 @@ static int ext4_check_descriptors(struct
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+       ext4_fsblk_t last_block;
+-      ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
++      ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
+       ext4_fsblk_t block_bitmap;
+       ext4_fsblk_t inode_bitmap;
+       ext4_fsblk_t inode_table;
+@@ -4050,14 +4050,13 @@ static int ext4_fill_super(struct super_
+                       goto failed_mount2;
+               }
+       }
++      sbi->s_gdb_count = db_count;
+       if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+               ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
+               ret = -EFSCORRUPTED;
+               goto failed_mount2;
+       }
+-      sbi->s_gdb_count = db_count;
+-
+       timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
+       /* Register extent status tree shrinker */
diff --git a/queue-4.17/ext4-fix-inline-data-updates-with-checksums-enabled.patch b/queue-4.17/ext4-fix-inline-data-updates-with-checksums-enabled.patch
new file mode 100644 (file)
index 0000000..c2fcde0
--- /dev/null
@@ -0,0 +1,166 @@
+From 362eca70b53389bddf3143fe20f53dcce2cfdf61 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Tue, 10 Jul 2018 01:07:43 -0400
+Subject: ext4: fix inline data updates with checksums enabled
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 362eca70b53389bddf3143fe20f53dcce2cfdf61 upstream.
+
+The inline data code was updating the raw inode directly; this is
+problematic since if metadata checksums are enabled,
+ext4_mark_inode_dirty() must be called to update the inode's checksum.
+In addition, the jbd2 layer requires that get_write_access() be called
+before the metadata buffer is modified.  Fix both of these problems.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=200443
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inline.c |   19 +++++++++++--------
+ fs/ext4/inode.c  |   16 +++++++---------
+ 2 files changed, 18 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct
+               goto convert;
+       }
++      ret = ext4_journal_get_write_access(handle, iloc.bh);
++      if (ret)
++              goto out;
++
+       flags |= AOP_FLAG_NOFS;
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+@@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct
+ out_up_read:
+       up_read(&EXT4_I(inode)->xattr_sem);
+ out:
+-      if (handle)
++      if (handle && (ret != 1))
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+       return ret;
+@@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct in
+       ext4_write_unlock_xattr(inode, &no_expand);
+       brelse(iloc.bh);
++      mark_inode_dirty(inode);
+ out:
+       return copied;
+ }
+@@ -898,7 +903,6 @@ retry_journal:
+               goto out;
+       }
+-
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page) {
+               ret = -ENOMEM;
+@@ -916,6 +920,9 @@ retry_journal:
+               if (ret < 0)
+                       goto out_release_page;
+       }
++      ret = ext4_journal_get_write_access(handle, iloc.bh);
++      if (ret)
++              goto out_release_page;
+       up_read(&EXT4_I(inode)->xattr_sem);
+       *pagep = page;
+@@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct
+                                 unsigned len, unsigned copied,
+                                 struct page *page)
+ {
+-      int i_size_changed = 0;
+       int ret;
+       ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
+@@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+-      if (pos+copied > inode->i_size) {
++      if (pos+copied > inode->i_size)
+               i_size_write(inode, pos+copied);
+-              i_size_changed = 1;
+-      }
+       unlock_page(page);
+       put_page(page);
+@@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+-      if (i_size_changed)
+-              mark_inode_dirty(inode);
++      mark_inode_dirty(inode);
+       return copied;
+ }
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *f
+       loff_t old_size = inode->i_size;
+       int ret = 0, ret2;
+       int i_size_changed = 0;
++      int inline_data = ext4_has_inline_data(inode);
+       trace_ext4_write_end(inode, pos, len, copied);
+-      if (ext4_has_inline_data(inode)) {
++      if (inline_data) {
+               ret = ext4_write_inline_data_end(inode, pos, len,
+                                                copied, page);
+               if (ret < 0) {
+@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *f
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+-      if (i_size_changed)
++      if (i_size_changed || inline_data)
+               ext4_mark_inode_dirty(handle, inode);
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(str
+       int partial = 0;
+       unsigned from, to;
+       int size_changed = 0;
++      int inline_data = ext4_has_inline_data(inode);
+       trace_ext4_journalled_write_end(inode, pos, len, copied);
+       from = pos & (PAGE_SIZE - 1);
+@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(str
+       BUG_ON(!ext4_handle_valid(handle));
+-      if (ext4_has_inline_data(inode)) {
++      if (inline_data) {
+               ret = ext4_write_inline_data_end(inode, pos, len,
+                                                copied, page);
+               if (ret < 0) {
+@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(str
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+-      if (size_changed) {
++      if (size_changed || inline_data) {
+               ret2 = ext4_mark_inode_dirty(handle, inode);
+               if (!ret)
+                       ret = ret2;
+@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(s
+       }
+       if (inline_data) {
+-              BUFFER_TRACE(inode_bh, "get write access");
+-              ret = ext4_journal_get_write_access(handle, inode_bh);
+-
+-              err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
+-
++              ret = ext4_mark_inode_dirty(handle, inode);
+       } else {
+               ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                                            do_journal_get_write_access);
diff --git a/queue-4.17/gpio-of-handle-fixed-regulator-flags-properly.patch b/queue-4.17/gpio-of-handle-fixed-regulator-flags-properly.patch
new file mode 100644 (file)
index 0000000..7836a95
--- /dev/null
@@ -0,0 +1,40 @@
+From 906402a44b5d090e9c330c562b8aa65c80790ccc Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Mon, 11 Jun 2018 15:11:41 +0200
+Subject: gpio: of: Handle fixed regulator flags properly
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 906402a44b5d090e9c330c562b8aa65c80790ccc upstream.
+
+This fixes up the handling of fixed regulator polarity
+inversion flags: while I remembered to fix it for the
+undocumented "reg-fixed-voltage" I forgot about the
+official "regulator-fixed" binding, there are two ways
+to do a fixed regulator.
+
+The error was noticed and fixed.
+
+Fixes: a603a2b8d86e ("gpio: of: Add special quirk to parse regulator flags")
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Thierry Reding <thierry.reding@gmail.com>
+Reported-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib-of.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct
+        * Note that active low is the default.
+        */
+       if (IS_ENABLED(CONFIG_REGULATOR) &&
+-          (of_device_is_compatible(np, "reg-fixed-voltage") ||
++          (of_device_is_compatible(np, "regulator-fixed") ||
++           of_device_is_compatible(np, "reg-fixed-voltage") ||
+            of_device_is_compatible(np, "regulator-gpio"))) {
+               /*
+                * The regulator GPIO handles are specified such that the
diff --git a/queue-4.17/gpio-uniphier-set-legitimate-irq-trigger-type-in-.to_irq-hook.patch b/queue-4.17/gpio-uniphier-set-legitimate-irq-trigger-type-in-.to_irq-hook.patch
new file mode 100644 (file)
index 0000000..291959d
--- /dev/null
@@ -0,0 +1,51 @@
+From bbfbf04c2d4ef673f657175456f6693c9225748a Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Thu, 14 Jun 2018 14:27:45 +0900
+Subject: gpio: uniphier: set legitimate irq trigger type in .to_irq hook
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit bbfbf04c2d4ef673f657175456f6693c9225748a upstream.
+
+If a GPIO chip is a part of a hierarchy IRQ domain, there is no
+way to specify the trigger type when gpio(d)_to_irq() allocates an
+interrupt on-the-fly.
+
+Currently, uniphier_gpio_to_irq() sets IRQ_TYPE_NONE, but it causes
+an error in the .alloc() hook of the parent domain.
+(drivers/irq/irq-uniphier-aidet.c)
+
+Even if we change irq-uniphier-aidet.c to accept the NONE type,
+GIC complains about it since commit 83a86fbb5b56 ("irqchip/gic:
+Loudly complain about the use of IRQ_TYPE_NONE").
+
+Instead, use IRQ_TYPE_LEVEL_HIGH as a temporary value when an irq
+is allocated.  irq_set_irq_type() will override it when the irq is
+really requested.
+
+Fixes: dbe776c2ca54 ("gpio: uniphier: add UniPhier GPIO controller driver")
+Reported-by: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Tested-by: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-uniphier.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-uniphier.c
++++ b/drivers/gpio/gpio-uniphier.c
+@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct g
+       fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
+       fwspec.param_count = 2;
+       fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
+-      fwspec.param[1] = IRQ_TYPE_NONE;
++      /*
++       * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
++       * temporarily. Anyway, ->irq_set_type() will override it later.
++       */
++      fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+       return irq_create_fwspec_mapping(&fwspec);
+ }
diff --git a/queue-4.17/i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch b/queue-4.17/i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch
new file mode 100644 (file)
index 0000000..3809ffa
--- /dev/null
@@ -0,0 +1,140 @@
+From 2b16fd63059ab9a46d473620749672dc342e1d21 Mon Sep 17 00:00:00 2001
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Date: Thu, 28 Jun 2018 22:45:38 +0200
+Subject: i2c: rcar: handle RXDMA HW behaviour on Gen3
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+commit 2b16fd63059ab9a46d473620749672dc342e1d21 upstream.
+
+On Gen3, we can only do RXDMA once per transfer reliably. For that, we
+must reset the device, then we can have RXDMA once. This patch
+implements this. When there is no reset controller or the reset fails,
+RXDMA will be blocked completely. Otherwise, it will be disabled after
+the first RXDMA transfer. Based on a commit from the BSP by Hiromitsu
+Yamasaki, yet completely refactored to handle multiple read messages
+within one transfer.
+
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-rcar.c |   54 +++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 51 insertions(+), 3 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -32,6 +32,7 @@
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/reset.h>
+ #include <linux/slab.h>
+ /* register offsets */
+@@ -111,8 +112,9 @@
+ #define ID_ARBLOST    (1 << 3)
+ #define ID_NACK               (1 << 4)
+ /* persistent flags */
++#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
+ #define ID_P_PM_BLOCKED       (1 << 31)
+-#define ID_P_MASK     ID_P_PM_BLOCKED
++#define ID_P_MASK     (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
+ enum rcar_i2c_type {
+       I2C_RCAR_GEN1,
+@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
+       struct dma_chan *dma_rx;
+       struct scatterlist sg;
+       enum dma_data_direction dma_direction;
++
++      struct reset_control *rstc;
+ };
+ #define rcar_i2c_priv_to_dev(p)               ((p)->adap.dev.parent)
+@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rc
+       dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+                        sg_dma_len(&priv->sg), priv->dma_direction);
++      /* Gen3 can only do one RXDMA per transfer and we just completed it */
++      if (priv->devtype == I2C_RCAR_GEN3 &&
++          priv->dma_direction == DMA_FROM_DEVICE)
++              priv->flags |= ID_P_NO_RXDMA;
++
+       priv->dma_direction = DMA_NONE;
+ }
+@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c
+       unsigned char *buf;
+       int len;
+-      /* Do not use DMA if it's not available or for messages < 8 bytes */
+-      if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
++      /* Do various checks to see if DMA is feasible at all */
++      if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
++          (read && priv->flags & ID_P_NO_RXDMA))
+               return;
+       if (read) {
+@@ -737,6 +747,25 @@ static void rcar_i2c_release_dma(struct
+       }
+ }
++/* I2C is a special case, we need to poll the status of a reset */
++static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
++{
++      int i, ret;
++
++      ret = reset_control_reset(priv->rstc);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < LOOP_TIMEOUT; i++) {
++              ret = reset_control_status(priv->rstc);
++              if (ret == 0)
++                      return 0;
++              udelay(1);
++      }
++
++      return -ETIMEDOUT;
++}
++
+ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+                               struct i2c_msg *msgs,
+                               int num)
+@@ -748,6 +777,16 @@ static int rcar_i2c_master_xfer(struct i
+       pm_runtime_get_sync(dev);
++      /* Gen3 needs a reset before allowing RXDMA once */
++      if (priv->devtype == I2C_RCAR_GEN3) {
++              priv->flags |= ID_P_NO_RXDMA;
++              if (!IS_ERR(priv->rstc)) {
++                      ret = rcar_i2c_do_reset(priv);
++                      if (ret == 0)
++                              priv->flags &= ~ID_P_NO_RXDMA;
++              }
++      }
++
+       rcar_i2c_init(priv);
+       ret = rcar_i2c_bus_barrier(priv);
+@@ -918,6 +957,15 @@ static int rcar_i2c_probe(struct platfor
+       if (ret < 0)
+               goto out_pm_put;
++      if (priv->devtype == I2C_RCAR_GEN3) {
++              priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
++              if (!IS_ERR(priv->rstc)) {
++                      ret = reset_control_status(priv->rstc);
++                      if (ret < 0)
++                              priv->rstc = ERR_PTR(-ENOTSUPP);
++              }
++      }
++
+       /* Stay always active when multi-master to keep arbitration working */
+       if (of_property_read_bool(dev->of_node, "multi-master"))
+               priv->flags |= ID_P_PM_BLOCKED;
diff --git a/queue-4.17/nvmet-fc-fix-target-sgl-list-on-large-transfers.patch b/queue-4.17/nvmet-fc-fix-target-sgl-list-on-large-transfers.patch
new file mode 100644 (file)
index 0000000..19bdab0
--- /dev/null
@@ -0,0 +1,126 @@
+From d082dc1562a2ff0947b214796f12faaa87e816a9 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Mon, 16 Jul 2018 14:38:14 -0700
+Subject: nvmet-fc: fix target sgl list on large transfers
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit d082dc1562a2ff0947b214796f12faaa87e816a9 upstream.
+
+The existing code to carve up the sg list expected an sg element-per-page
+which can be very incorrect with iommu's remapping multiple memory pages
+to fewer bus addresses. To hit this error required a large io payload
+(greater than 256k) and a system that maps on a per-page basis. It's
+possible that large ios could get by fine if the system condensed the
+sgl list into the first 64 elements.
+
+This patch corrects the sg list handling by specifically walking the
+sg list element by element and attempting to divide the transfer up
+on a per-sg element boundary. While doing so, it still tries to keep
+sequences under 256k, but will exceed that rule if a single sg element
+is larger than 256k.
+
+Fixes: 48fa362b6c3f ("nvmet-fc: simplify sg list handling")
+Cc: <stable@vger.kernel.org> # 4.14
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/fc.c |   44 +++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 35 insertions(+), 9 deletions(-)
+
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
+       struct work_struct              work;
+ } __aligned(sizeof(unsigned long long));
++/* desired maximum for a single sequence - if sg list allows it */
+ #define NVMET_FC_MAX_SEQ_LENGTH               (256 * 1024)
+-#define NVMET_FC_MAX_XFR_SGENTS               (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
+ enum nvmet_fcp_datadir {
+       NVMET_FCP_NODATA,
+@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
+       struct nvme_fc_cmd_iu           cmdiubuf;
+       struct nvme_fc_ersp_iu          rspiubuf;
+       dma_addr_t                      rspdma;
++      struct scatterlist              *next_sg;
+       struct scatterlist              *data_sg;
+       int                             data_sg_cnt;
+       u32                             offset;
+@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvme
+       INIT_LIST_HEAD(&newrec->assoc_list);
+       kref_init(&newrec->ref);
+       ida_init(&newrec->assoc_cnt);
+-      newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
+-                                      template->max_sgl_segments);
++      newrec->max_sg_cnt = template->max_sgl_segments;
+       ret = nvmet_fc_alloc_ls_iodlist(newrec);
+       if (ret) {
+@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_f
+                               ((fod->io_dir == NVMET_FCP_WRITE) ?
+                                       DMA_FROM_DEVICE : DMA_TO_DEVICE));
+                               /* note: write from initiator perspective */
++      fod->next_sg = fod->data_sg;
+       return 0;
+@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_
+                               struct nvmet_fc_fcp_iod *fod, u8 op)
+ {
+       struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
++      struct scatterlist *sg = fod->next_sg;
+       unsigned long flags;
+-      u32 tlen;
++      u32 remaininglen = fod->req.transfer_len - fod->offset;
++      u32 tlen = 0;
+       int ret;
+       fcpreq->op = op;
+       fcpreq->offset = fod->offset;
+       fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+-      tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
+-                      (fod->req.transfer_len - fod->offset));
++      /*
++       * for next sequence:
++       *  break at a sg element boundary
++       *  attempt to keep sequence length capped at
++       *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
++       *    be longer if a single sg element is larger
++       *    than that amount. This is done to avoid creating
++       *    a new sg list to use for the tgtport api.
++       */
++      fcpreq->sg = sg;
++      fcpreq->sg_cnt = 0;
++      while (tlen < remaininglen &&
++             fcpreq->sg_cnt < tgtport->max_sg_cnt &&
++             tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
++              fcpreq->sg_cnt++;
++              tlen += sg_dma_len(sg);
++              sg = sg_next(sg);
++      }
++      if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
++              fcpreq->sg_cnt++;
++              tlen += min_t(u32, sg_dma_len(sg), remaininglen);
++              sg = sg_next(sg);
++      }
++      if (tlen < remaininglen)
++              fod->next_sg = sg;
++      else
++              fod->next_sg = NULL;
++
+       fcpreq->transfer_length = tlen;
+       fcpreq->transferred_length = 0;
+       fcpreq->fcp_error = 0;
+       fcpreq->rsplen = 0;
+-      fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
+-      fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
+-
+       /*
+        * If the last READDATA request: check if LLDD supports
+        * combined xfr with response.
diff --git a/queue-4.17/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch b/queue-4.17/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
new file mode 100644 (file)
index 0000000..1b1f7f6
--- /dev/null
@@ -0,0 +1,67 @@
+From 81e69df38e2911b642ec121dec319fad2a4782f3 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 14 Jul 2018 23:55:57 -0400
+Subject: random: mix rdrand with entropy sent in from userspace
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 81e69df38e2911b642ec121dec319fad2a4782f3 upstream.
+
+Fedora has integrated the jitter entropy daemon to work around slow
+boot problems, especially on VM's that don't support virtio-rng:
+
+    https://bugzilla.redhat.com/show_bug.cgi?id=1572944
+
+It's understandable why they did this, but the Jitter entropy daemon
+works fundamentally on the principle: "the CPU microarchitecture is
+**so** complicated and we can't figure it out, so it *must* be
+random".  Yes, it uses statistical tests to "prove" it is secure, but
+AES_ENCRYPT(NSA_KEY, COUNTER++) will also pass statistical tests with
+flying colors.
+
+So if RDRAND is available, mix it into entropy submitted from
+userspace.  It can't hurt, and if you believe the NSA has backdoored
+RDRAND, then they probably have enough details about the Intel
+microarchitecture that they can reverse engineer how the Jitter
+entropy daemon affects the microarchitecture, and attack its output
+stream.  And if RDRAND is in fact an honest DRNG, it will immeasurably
+improve on what the Jitter entropy daemon might produce.
+
+This also provides some protection against someone who is able to read
+or set the entropy seed file.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1895,14 +1895,22 @@ static int
+ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ {
+       size_t bytes;
+-      __u32 buf[16];
++      __u32 t, buf[16];
+       const char __user *p = buffer;
+       while (count > 0) {
++              int b, i = 0;
++
+               bytes = min(count, sizeof(buf));
+               if (copy_from_user(&buf, p, bytes))
+                       return -EFAULT;
++              for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
++                      if (!arch_get_random_int(&t))
++                              break;
++                      buf[i] ^= t;
++              }
++
+               count -= bytes;
+               p += bytes;
index 27e2f1b4f913c56d17ca8248c67c2ddf5498587d..af753c2007343aac741fbcba058a6a9280fc5a0f 100644 (file)
@@ -296,3 +296,17 @@ drm-add-dp-psr2-sink-enable-bit.patch
 drm-atomic-helper-drop-plane-fb-references-only-for-drm_atomic_helper_shutdown.patch
 drm-dp-mst-fix-off-by-one-typo-when-dump-payload-table.patch
 drm-amdgpu-avoid-reclaim-while-holding-locks-taken-in-mmu-notifier.patch
+block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch
+blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch
+block-reset-bi_iter.bi_done-after-splitting-bio.patch
+block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch
+nvmet-fc-fix-target-sgl-list-on-large-transfers.patch
+i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch
+random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
+squashfs-be-more-careful-about-metadata-corruption.patch
+ext4-fix-false-negatives-and-false-positives-in-ext4_check_descriptors.patch
+ext4-fix-inline-data-updates-with-checksums-enabled.patch
+ext4-check-for-allocation-block-validity-with-block-group-locked.patch
+ext4-fix-check-to-prevent-initializing-reserved-inodes.patch
+gpio-of-handle-fixed-regulator-flags-properly.patch
+gpio-uniphier-set-legitimate-irq-trigger-type-in-.to_irq-hook.patch
diff --git a/queue-4.17/squashfs-be-more-careful-about-metadata-corruption.patch b/queue-4.17/squashfs-be-more-careful-about-metadata-corruption.patch
new file mode 100644 (file)
index 0000000..9df5503
--- /dev/null
@@ -0,0 +1,96 @@
+From 01cfb7937a9af2abb1136c7e89fbf3fd92952956 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 29 Jul 2018 12:44:46 -0700
+Subject: squashfs: be more careful about metadata corruption
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 01cfb7937a9af2abb1136c7e89fbf3fd92952956 upstream.
+
+Anatoly Trosinenko reports that a corrupted squashfs image can cause a
+kernel oops.  It turns out that squashfs can end up being confused about
+negative fragment lengths.
+
+The regular squashfs_read_data() does check for negative lengths, but
+squashfs_read_metadata() did not, and the fragment size code just
+blindly trusted the on-disk value.  Fix both the fragment parsing and
+the metadata reading code.
+
+Reported-by: Anatoly Trosinenko <anatoly.trosinenko@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Phillip Lougher <phillip@squashfs.org.uk>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/squashfs/cache.c       |    3 +++
+ fs/squashfs/file.c        |    8 ++++++--
+ fs/squashfs/fragment.c    |    4 +---
+ fs/squashfs/squashfs_fs.h |    6 ++++++
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/fs/squashfs/cache.c
++++ b/fs/squashfs/cache.c
+@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_
+       TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
++      if (unlikely(length < 0))
++              return -EIO;
++
+       while (length) {
+               entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+               if (entry->error) {
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -194,7 +194,11 @@ static long long read_indexes(struct sup
+               }
+               for (i = 0; i < blocks; i++) {
+-                      int size = le32_to_cpu(blist[i]);
++                      int size = squashfs_block_size(blist[i]);
++                      if (size < 0) {
++                              err = size;
++                              goto failure;
++                      }
+                       block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+               }
+               n -= blocks;
+@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *
+                       sizeof(size));
+       if (res < 0)
+               return res;
+-      return le32_to_cpu(size);
++      return squashfs_block_size(size);
+ }
+ /* Copy data into page cache  */
+--- a/fs/squashfs/fragment.c
++++ b/fs/squashfs/fragment.c
+@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_bl
+               return size;
+       *fragment_block = le64_to_cpu(fragment_entry.start_block);
+-      size = le32_to_cpu(fragment_entry.size);
+-
+-      return size;
++      return squashfs_block_size(fragment_entry.size);
+ }
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -129,6 +129,12 @@
+ #define SQUASHFS_COMPRESSED_BLOCK(B)  (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
++static inline int squashfs_block_size(__le32 raw)
++{
++      u32 size = le32_to_cpu(raw);
++      return (size >> 25) ? -EIO : size;
++}
++
+ /*
+  * Inode number ops.  Inodes consist of a compressed block number, and an
+  * uncompressed offset within that block