--- /dev/null
+From 9362dd1109f87a9d0a798fbc890cb339c171ed35 Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:08 +0200
+Subject: blkdev: __blkdev_direct_IO_simple: fix leak in error case
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit 9362dd1109f87a9d0a798fbc890cb339c171ed35 upstream.
+
+Fixes: 72ecad22d9f1 ("block: support a full bio worth of IO for simplified bdev direct-io")
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/block_dev.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -231,7 +231,7 @@ __blkdev_direct_IO_simple(struct kiocb *
+
+ ret = bio_iov_iter_get_pages(&bio, iter);
+ if (unlikely(ret))
+- return ret;
++ goto out;
+ ret = bio.bi_iter.bi_size;
+
+ if (iov_iter_rw(iter) == READ) {
+@@ -260,12 +260,13 @@ __blkdev_direct_IO_simple(struct kiocb *
+ put_page(bvec->bv_page);
+ }
+
+- if (vecs != inline_vecs)
+- kfree(vecs);
+-
+ if (unlikely(bio.bi_status))
+ ret = blk_status_to_errno(bio.bi_status);
+
++out:
++ if (vecs != inline_vecs)
++ kfree(vecs);
++
+ bio_uninit(&bio);
+
+ return ret;
--- /dev/null
+From b403ea2404889e1227812fa9657667a1deb9c694 Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:07 +0200
+Subject: block: bio_iov_iter_get_pages: fix size of last iovec
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit b403ea2404889e1227812fa9657667a1deb9c694 upstream.
+
+If the last page of the bio is not "full", the length of the last
+vector slot needs to be corrected. This slot has the index
+(bio->bi_vcnt - 1), but only in bio->bi_io_vec. In the "bv" helper
+array, which is shifted by the value of bio->bi_vcnt at function
+invocation, the correct index is (nr_pages - 1).
+
+v2: improved readability following suggestions from Ming Lei.
+v3: followed a formatting suggestion from Christoph Hellwig.
+
+Fixes: 2cefe4dbaadf ("block: add bio_iov_iter_get_pages()")
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -881,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page);
+ */
+ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
++ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+ struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+ struct page **pages = (struct page **)bv;
+- size_t offset, diff;
++ size_t offset;
+ ssize_t size;
+
+ size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
+ if (unlikely(size <= 0))
+ return size ? size : -EFAULT;
+- nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
++ idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /*
+ * Deep magic below: We need to walk the pinned pages backwards
+@@ -903,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *b
+ bio->bi_iter.bi_size += size;
+ bio->bi_vcnt += nr_pages;
+
+- diff = (nr_pages * PAGE_SIZE - offset) - size;
+- while (nr_pages--) {
+- bv[nr_pages].bv_page = pages[nr_pages];
+- bv[nr_pages].bv_len = PAGE_SIZE;
+- bv[nr_pages].bv_offset = 0;
++ while (idx--) {
++ bv[idx].bv_page = pages[idx];
++ bv[idx].bv_len = PAGE_SIZE;
++ bv[idx].bv_offset = 0;
+ }
+
+ bv[0].bv_offset += offset;
+ bv[0].bv_len -= offset;
+- if (diff)
+- bv[bio->bi_vcnt - 1].bv_len -= diff;
++ bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
+
+ iov_iter_advance(iter, size);
+ return 0;
--- /dev/null
+From 17d51b10d7773e4618bcac64648f30f12d4078fb Mon Sep 17 00:00:00 2001
+From: Martin Wilck <mwilck@suse.com>
+Date: Wed, 25 Jul 2018 23:15:09 +0200
+Subject: block: bio_iov_iter_get_pages: pin more pages for multi-segment IOs
+
+From: Martin Wilck <mwilck@suse.com>
+
+commit 17d51b10d7773e4618bcac64648f30f12d4078fb upstream.
+
+bio_iov_iter_get_pages() currently only adds pages for the next non-zero
+segment from the iov_iter to the bio. That's suboptimal for callers,
+which typically try to pin as many pages as fit into the bio. This patch
+converts the current bio_iov_iter_get_pages() into a static helper, and
+introduces a new helper that allocates as many pages as
+
+ 1) fit into the bio,
+ 2) are present in the iov_iter,
+ 3) and can be pinned by MM.
+
+Error is returned only if zero pages could be pinned. Because of 3), a
+zero return value doesn't necessarily mean all pages have been pinned.
+Callers that have to pin every page in the iov_iter must still call this
+function in a loop (this is currently the case).
+
+This change matters most for __blkdev_direct_IO_simple(), which calls
+bio_iov_iter_get_pages() only once. If it obtains less pages than
+requested, it returns a "short write" or "short read", and
+__generic_file_write_iter() falls back to buffered writes, which may
+lead to data corruption.
+
+Fixes: 72ecad22d9f1 ("block: support a full bio worth of IO for simplified bdev direct-io")
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin Wilck <mwilck@suse.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c | 35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -872,14 +872,16 @@ done:
+ EXPORT_SYMBOL(bio_add_page);
+
+ /**
+- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+- * Pins as many pages from *iter and appends them to @bio's bvec array. The
++ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
++ * For multi-segment *iter, this function only adds pages from the
++ * the next non-empty segment of the iov iterator.
+ */
+-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+ struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+@@ -916,6 +918,33 @@ int bio_iov_iter_get_pages(struct bio *b
+ iov_iter_advance(iter, size);
+ return 0;
+ }
++
++/**
++ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * @bio: bio to add pages to
++ * @iter: iov iterator describing the region to be mapped
++ *
++ * Pins pages from *iter and appends them to @bio's bvec array. The
++ * pages will have to be released using put_page() when done.
++ * The function tries, but does not guarantee, to pin as many pages as
++ * fit into the bio, or are requested in *iter, whatever is smaller.
++ * If MM encounters an error pinning the requested pages, it stops.
++ * Error is returned only if 0 pages could be pinned.
++ */
++int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++{
++ unsigned short orig_vcnt = bio->bi_vcnt;
++
++ do {
++ int ret = __bio_iov_iter_get_pages(bio, iter);
++
++ if (unlikely(ret))
++ return bio->bi_vcnt > orig_vcnt ? 0 : ret;
++
++ } while (iov_iter_count(iter) && !bio_full(bio));
++
++ return 0;
++}
+ EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+
+ struct submit_bio_ret {
--- /dev/null
+From 5151842b9d8732d4cbfa8400b40bff894f501b2f Mon Sep 17 00:00:00 2001
+From: Greg Edwards <gedwards@ddn.com>
+Date: Thu, 26 Jul 2018 14:39:37 -0400
+Subject: block: reset bi_iter.bi_done after splitting bio
+
+From: Greg Edwards <gedwards@ddn.com>
+
+commit 5151842b9d8732d4cbfa8400b40bff894f501b2f upstream.
+
+After the bio has been updated to represent the remaining sectors, reset
+bi_done so bio_rewind_iter() does not rewind further than it should.
+
+This resolves a bio_integrity_process() failure on reads where the
+original request was split.
+
+Fixes: 63573e359d05 ("bio-integrity: Restore original iterator on verify stage")
+Signed-off-by: Greg Edwards <gedwards@ddn.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1889,6 +1889,7 @@ struct bio *bio_split(struct bio *bio, i
+ bio_integrity_trim(split);
+
+ bio_advance(bio, split->bi_iter.bi_size);
++ bio->bi_iter.bi_done = 0;
+
+ if (bio_flagged(bio, BIO_TRACE_COMPLETION))
+ bio_set_flag(split, BIO_TRACE_COMPLETION);
--- /dev/null
+From 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 12 Jul 2018 19:08:05 -0400
+Subject: ext4: check for allocation block validity with block group locked
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef upstream.
+
+With commit 044e6e3d74a3: "ext4: don't update checksum of new
+initialized bitmaps" the buffer valid bit will get set without
+actually setting up the checksum for the allocation bitmap, since the
+checksum will get calculated once we actually allocate an inode or
+block.
+
+If we are doing this, then we need to (re-)check the verified bit
+after we take the block group lock. Otherwise, we could race with
+another process reading and verifying the bitmap, which would then
+complain about the checksum being invalid.
+
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1780137
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/balloc.c | 3 +++
+ fs/ext4/ialloc.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -379,6 +379,8 @@ static int ext4_validate_block_bitmap(st
+ return -EFSCORRUPTED;
+
+ ext4_lock_group(sb, block_group);
++ if (buffer_verified(bh))
++ goto verified;
+ if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+ desc, bh))) {
+ ext4_unlock_group(sb, block_group);
+@@ -401,6 +403,7 @@ static int ext4_validate_block_bitmap(st
+ return -EFSCORRUPTED;
+ }
+ set_buffer_verified(bh);
++verified:
+ ext4_unlock_group(sb, block_group);
+ return 0;
+ }
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -91,6 +91,8 @@ static int ext4_validate_inode_bitmap(st
+ return -EFSCORRUPTED;
+
+ ext4_lock_group(sb, block_group);
++ if (buffer_verified(bh))
++ goto verified;
+ blk = ext4_inode_bitmap(sb, desc);
+ if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+ EXT4_INODES_PER_GROUP(sb) / 8)) {
+@@ -108,6 +110,7 @@ static int ext4_validate_inode_bitmap(st
+ return -EFSBADCRC;
+ }
+ set_buffer_verified(bh);
++verified:
+ ext4_unlock_group(sb, block_group);
+ return 0;
+ }
--- /dev/null
+From 5012284700775a4e6e3fbe7eac4c543c4874b559 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 28 Jul 2018 08:12:04 -0400
+Subject: ext4: fix check to prevent initializing reserved inodes
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 5012284700775a4e6e3fbe7eac4c543c4874b559 upstream.
+
+Commit 8844618d8aa7: "ext4: only look at the bg_flags field if it is
+valid" will complain if block group zero does not have the
+EXT4_BG_INODE_ZEROED flag set. Unfortunately, this is not correct,
+since a freshly created file system has this flag cleared. It gets
+almost immediately after the file system is mounted read-write --- but
+the following somewhat unlikely sequence will end up triggering a
+false positive report of a corrupted file system:
+
+ mkfs.ext4 /dev/vdc
+ mount -o ro /dev/vdc /vdc
+ mount -o remount,rw /dev/vdc
+
+Instead, when initializing the inode table for block group zero, test
+to make sure that itable_unused count is not too large, since that is
+the case that will result in some or all of the reserved inodes
+getting cleared.
+
+This fixes the failures reported by Eric Whiteney when running
+generic/230 and generic/231 in the the nojournal test case.
+
+Fixes: 8844618d8aa7 ("ext4: only look at the bg_flags field if it is valid")
+Reported-by: Eric Whitney <enwlinux@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ialloc.c | 5 ++++-
+ fs/ext4/super.c | 8 +-------
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1397,7 +1397,10 @@ int ext4_init_inode_table(struct super_b
+ ext4_itable_unused_count(sb, gdp)),
+ sbi->s_inodes_per_block);
+
+- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
++ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
++ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
++ ext4_itable_unused_count(sb, gdp)) <
++ EXT4_FIRST_INO(sb)))) {
+ ext4_error(sb, "Something is wrong with group %u: "
+ "used itable blocks: %d; "
+ "itable unused count: %u",
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3103,14 +3103,8 @@ static ext4_group_t ext4_has_uninit_itab
+ if (!gdp)
+ continue;
+
+- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+- continue;
+- if (group != 0)
++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+ break;
+- ext4_error(sb, "Inode table for bg 0 marked as "
+- "needing zeroing");
+- if (sb_rdonly(sb))
+- return ngroups;
+ }
+
+ return group;
--- /dev/null
+From 362eca70b53389bddf3143fe20f53dcce2cfdf61 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Tue, 10 Jul 2018 01:07:43 -0400
+Subject: ext4: fix inline data updates with checksums enabled
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 362eca70b53389bddf3143fe20f53dcce2cfdf61 upstream.
+
+The inline data code was updating the raw inode directly; this is
+problematic since if metadata checksums are enabled,
+ext4_mark_inode_dirty() must be called to update the inode's checksum.
+In addition, the jbd2 layer requires that get_write_access() be called
+before the metadata buffer is modified. Fix both of these problems.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=200443
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inline.c | 19 +++++++++++--------
+ fs/ext4/inode.c | 16 +++++++---------
+ 2 files changed, 18 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -688,6 +688,10 @@ int ext4_try_to_write_inline_data(struct
+ goto convert;
+ }
+
++ ret = ext4_journal_get_write_access(handle, iloc.bh);
++ if (ret)
++ goto out;
++
+ flags |= AOP_FLAG_NOFS;
+
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+@@ -716,7 +720,7 @@ int ext4_try_to_write_inline_data(struct
+ out_up_read:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ out:
+- if (handle)
++ if (handle && (ret != 1))
+ ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return ret;
+@@ -758,6 +762,7 @@ int ext4_write_inline_data_end(struct in
+
+ ext4_write_unlock_xattr(inode, &no_expand);
+ brelse(iloc.bh);
++ mark_inode_dirty(inode);
+ out:
+ return copied;
+ }
+@@ -904,7 +909,6 @@ retry_journal:
+ goto out;
+ }
+
+-
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page) {
+ ret = -ENOMEM;
+@@ -922,6 +926,9 @@ retry_journal:
+ if (ret < 0)
+ goto out_release_page;
+ }
++ ret = ext4_journal_get_write_access(handle, iloc.bh);
++ if (ret)
++ goto out_release_page;
+
+ up_read(&EXT4_I(inode)->xattr_sem);
+ *pagep = page;
+@@ -942,7 +949,6 @@ int ext4_da_write_inline_data_end(struct
+ unsigned len, unsigned copied,
+ struct page *page)
+ {
+- int i_size_changed = 0;
+ int ret;
+
+ ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
+@@ -960,10 +966,8 @@ int ext4_da_write_inline_data_end(struct
+ * But it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+- if (pos+copied > inode->i_size) {
++ if (pos+copied > inode->i_size)
+ i_size_write(inode, pos+copied);
+- i_size_changed = 1;
+- }
+ unlock_page(page);
+ put_page(page);
+
+@@ -973,8 +977,7 @@ int ext4_da_write_inline_data_end(struct
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+- if (i_size_changed)
+- mark_inode_dirty(inode);
++ mark_inode_dirty(inode);
+
+ return copied;
+ }
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1388,9 +1388,10 @@ static int ext4_write_end(struct file *f
+ loff_t old_size = inode->i_size;
+ int ret = 0, ret2;
+ int i_size_changed = 0;
++ int inline_data = ext4_has_inline_data(inode);
+
+ trace_ext4_write_end(inode, pos, len, copied);
+- if (ext4_has_inline_data(inode)) {
++ if (inline_data) {
+ ret = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ if (ret < 0) {
+@@ -1418,7 +1419,7 @@ static int ext4_write_end(struct file *f
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+- if (i_size_changed)
++ if (i_size_changed || inline_data)
+ ext4_mark_inode_dirty(handle, inode);
+
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
+@@ -1492,6 +1493,7 @@ static int ext4_journalled_write_end(str
+ int partial = 0;
+ unsigned from, to;
+ int size_changed = 0;
++ int inline_data = ext4_has_inline_data(inode);
+
+ trace_ext4_journalled_write_end(inode, pos, len, copied);
+ from = pos & (PAGE_SIZE - 1);
+@@ -1499,7 +1501,7 @@ static int ext4_journalled_write_end(str
+
+ BUG_ON(!ext4_handle_valid(handle));
+
+- if (ext4_has_inline_data(inode)) {
++ if (inline_data) {
+ ret = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ if (ret < 0) {
+@@ -1530,7 +1532,7 @@ static int ext4_journalled_write_end(str
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
+
+- if (size_changed) {
++ if (size_changed || inline_data) {
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (!ret)
+ ret = ret2;
+@@ -2027,11 +2029,7 @@ static int __ext4_journalled_writepage(s
+ }
+
+ if (inline_data) {
+- BUFFER_TRACE(inode_bh, "get write access");
+- ret = ext4_journal_get_write_access(handle, inode_bh);
+-
+- err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
+-
++ ret = ext4_mark_inode_dirty(handle, inode);
+ } else {
+ ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ do_journal_get_write_access);
--- /dev/null
+From 5613d31214eb4c5c04cdfce4966bb661c8b43191 Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Wed, 25 Jul 2018 08:35:17 +0200
+Subject: nvmet: fixup crash on NULL device path
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit 5613d31214eb4c5c04cdfce4966bb661c8b43191 upstream.
+
+When writing an empty string into the device_path attribute the kernel
+will crash with
+
+nvmet: failed to open block device (null): (-22)
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+
+This patch sanitizes the error handling for invalid device path settings.
+
+Fixes: a07b4970 ("nvmet: add a generic NVMe target")
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index d3f3b3ec4d1a..ebea1373d1b7 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+ {
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
++ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+ if (ns->enabled)
+ goto out_unlock;
+
+- kfree(ns->device_path);
++ ret = -EINVAL;
++ len = strcspn(page, "\n");
++ if (!len)
++ goto out_unlock;
+
++ kfree(ns->device_path);
+ ret = -ENOMEM;
+- ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
++ ns->device_path = kstrndup(page, len, GFP_KERNEL);
+ if (!ns->device_path)
+ goto out_unlock;
+
--- /dev/null
+From 81e69df38e2911b642ec121dec319fad2a4782f3 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 14 Jul 2018 23:55:57 -0400
+Subject: random: mix rdrand with entropy sent in from userspace
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 81e69df38e2911b642ec121dec319fad2a4782f3 upstream.
+
+Fedora has integrated the jitter entropy daemon to work around slow
+boot problems, especially on VM's that don't support virtio-rng:
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=1572944
+
+It's understandable why they did this, but the Jitter entropy daemon
+works fundamentally on the principle: "the CPU microarchitecture is
+**so** complicated and we can't figure it out, so it *must* be
+random". Yes, it uses statistical tests to "prove" it is secure, but
+AES_ENCRYPT(NSA_KEY, COUNTER++) will also pass statistical tests with
+flying colors.
+
+So if RDRAND is available, mix it into entropy submitted from
+userspace. It can't hurt, and if you believe the NSA has backdoored
+RDRAND, then they probably have enough details about the Intel
+microarchitecture that they can reverse engineer how the Jitter
+entropy daemon affects the microarchitecture, and attack its output
+stream. And if RDRAND is in fact an honest DRNG, it will immeasurably
+improve on what the Jitter entropy daemon might produce.
+
+This also provides some protection against someone who is able to read
+or set the entropy seed file.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1897,14 +1897,22 @@ static int
+ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ {
+ size_t bytes;
+- __u32 buf[16];
++ __u32 t, buf[16];
+ const char __user *p = buffer;
+
+ while (count > 0) {
++ int b, i = 0;
++
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
+
++ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
++ if (!arch_get_random_int(&t))
++ break;
++ buf[i] ^= t;
++ }
++
+ count -= bytes;
+ p += bytes;
+
drm-add-dp-psr2-sink-enable-bit.patch
drm-atomic-helper-drop-plane-fb-references-only-for-drm_atomic_helper_shutdown.patch
drm-dp-mst-fix-off-by-one-typo-when-dump-payload-table.patch
+block-bio_iov_iter_get_pages-fix-size-of-last-iovec.patch
+blkdev-__blkdev_direct_io_simple-fix-leak-in-error-case.patch
+block-reset-bi_iter.bi_done-after-splitting-bio.patch
+block-bio_iov_iter_get_pages-pin-more-pages-for-multi-segment-ios.patch
+random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
+squashfs-be-more-careful-about-metadata-corruption.patch
+ext4-fix-inline-data-updates-with-checksums-enabled.patch
+ext4-check-for-allocation-block-validity-with-block-group-locked.patch
+ext4-fix-check-to-prevent-initializing-reserved-inodes.patch
+nvmet-fixup-crash-on-null-device-path.patch
--- /dev/null
+From 01cfb7937a9af2abb1136c7e89fbf3fd92952956 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 29 Jul 2018 12:44:46 -0700
+Subject: squashfs: be more careful about metadata corruption
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 01cfb7937a9af2abb1136c7e89fbf3fd92952956 upstream.
+
+Anatoly Trosinenko reports that a corrupted squashfs image can cause a
+kernel oops. It turns out that squashfs can end up being confused about
+negative fragment lengths.
+
+The regular squashfs_read_data() does check for negative lengths, but
+squashfs_read_metadata() did not, and the fragment size code just
+blindly trusted the on-disk value. Fix both the fragment parsing and
+the metadata reading code.
+
+Reported-by: Anatoly Trosinenko <anatoly.trosinenko@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Phillip Lougher <phillip@squashfs.org.uk>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/squashfs/cache.c | 3 +++
+ fs/squashfs/file.c | 8 ++++++--
+ fs/squashfs/fragment.c | 4 +---
+ fs/squashfs/squashfs_fs.h | 6 ++++++
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/fs/squashfs/cache.c
++++ b/fs/squashfs/cache.c
+@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_
+
+ TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+
++ if (unlikely(length < 0))
++ return -EIO;
++
+ while (length) {
+ entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+ if (entry->error) {
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -194,7 +194,11 @@ static long long read_indexes(struct sup
+ }
+
+ for (i = 0; i < blocks; i++) {
+- int size = le32_to_cpu(blist[i]);
++ int size = squashfs_block_size(blist[i]);
++ if (size < 0) {
++ err = size;
++ goto failure;
++ }
+ block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+ }
+ n -= blocks;
+@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *
+ sizeof(size));
+ if (res < 0)
+ return res;
+- return le32_to_cpu(size);
++ return squashfs_block_size(size);
+ }
+
+ /* Copy data into page cache */
+--- a/fs/squashfs/fragment.c
++++ b/fs/squashfs/fragment.c
+@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_bl
+ return size;
+
+ *fragment_block = le64_to_cpu(fragment_entry.start_block);
+- size = le32_to_cpu(fragment_entry.size);
+-
+- return size;
++ return squashfs_block_size(fragment_entry.size);
+ }
+
+
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -129,6 +129,12 @@
+
+ #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
++static inline int squashfs_block_size(__le32 raw)
++{
++ u32 size = le32_to_cpu(raw);
++ return (size >> 25) ? -EIO : size;
++}
++
+ /*
+ * Inode number ops. Inodes consist of a compressed block number, and an
+ * uncompressed offset within that block