]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2018 08:03:29 +0000 (10:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2018 08:03:29 +0000 (10:03 +0200)
added patches:
ext4-check-for-allocation-block-validity-with-block-group-locked.patch
ext4-fix-inline-data-updates-with-checksums-enabled.patch
random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
squashfs-be-more-careful-about-metadata-corruption.patch

queue-4.4/ext4-check-for-allocation-block-validity-with-block-group-locked.patch [new file with mode: 0644]
queue-4.4/ext4-fix-inline-data-updates-with-checksums-enabled.patch [new file with mode: 0644]
queue-4.4/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/squashfs-be-more-careful-about-metadata-corruption.patch [new file with mode: 0644]

diff --git a/queue-4.4/ext4-check-for-allocation-block-validity-with-block-group-locked.patch b/queue-4.4/ext4-check-for-allocation-block-validity-with-block-group-locked.patch
new file mode 100644 (file)
index 0000000..e1fe6ae
--- /dev/null
@@ -0,0 +1,69 @@
+From 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 12 Jul 2018 19:08:05 -0400
+Subject: ext4: check for allocation block validity with block group locked
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef upstream.
+
+With commit 044e6e3d74a3: "ext4: don't update checksum of new
+initialized bitmaps" the buffer valid bit will get set without
+actually setting up the checksum for the allocation bitmap, since the
+checksum will get calculated once we actually allocate an inode or
+block.
+
+If we are doing this, then we need to (re-)check the verified bit
+after we take the block group lock.  Otherwise, we could race with
+another process reading and verifying the bitmap, which would then
+complain about the checksum being invalid.
+
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1780137
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/balloc.c |    3 +++
+ fs/ext4/ialloc.c |    3 +++
+ 2 files changed, 6 insertions(+)
+
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -378,6 +378,8 @@ static int ext4_validate_block_bitmap(st
+               return -EFSCORRUPTED;
+       ext4_lock_group(sb, block_group);
++      if (buffer_verified(bh))
++              goto verified;
+       if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+                       desc, bh))) {
+               ext4_unlock_group(sb, block_group);
+@@ -400,6 +402,7 @@ static int ext4_validate_block_bitmap(st
+               return -EFSCORRUPTED;
+       }
+       set_buffer_verified(bh);
++verified:
+       ext4_unlock_group(sb, block_group);
+       return 0;
+ }
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -88,6 +88,8 @@ static int ext4_validate_inode_bitmap(st
+               return -EFSCORRUPTED;
+       ext4_lock_group(sb, block_group);
++      if (buffer_verified(bh))
++              goto verified;
+       blk = ext4_inode_bitmap(sb, desc);
+       if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
+@@ -105,6 +107,7 @@ static int ext4_validate_inode_bitmap(st
+               return -EFSBADCRC;
+       }
+       set_buffer_verified(bh);
++verified:
+       ext4_unlock_group(sb, block_group);
+       return 0;
+ }
diff --git a/queue-4.4/ext4-fix-inline-data-updates-with-checksums-enabled.patch b/queue-4.4/ext4-fix-inline-data-updates-with-checksums-enabled.patch
new file mode 100644 (file)
index 0000000..7cdce4f
--- /dev/null
@@ -0,0 +1,166 @@
+From 362eca70b53389bddf3143fe20f53dcce2cfdf61 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Tue, 10 Jul 2018 01:07:43 -0400
+Subject: ext4: fix inline data updates with checksums enabled
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 362eca70b53389bddf3143fe20f53dcce2cfdf61 upstream.
+
+The inline data code was updating the raw inode directly; this is
+problematic since if metadata checksums are enabled,
+ext4_mark_inode_dirty() must be called to update the inode's checksum.
+In addition, the jbd2 layer requires that get_write_access() be called
+before the metadata buffer is modified.  Fix both of these problems.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=200443
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inline.c |   19 +++++++++++--------
+ fs/ext4/inode.c  |   16 +++++++---------
+ 2 files changed, 18 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -678,6 +678,10 @@ int ext4_try_to_write_inline_data(struct
+               goto convert;
+       }
++      ret = ext4_journal_get_write_access(handle, iloc.bh);
++      if (ret)
++              goto out;
++
+       flags |= AOP_FLAG_NOFS;
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+@@ -706,7 +710,7 @@ int ext4_try_to_write_inline_data(struct
+ out_up_read:
+       up_read(&EXT4_I(inode)->xattr_sem);
+ out:
+-      if (handle)
++      if (handle && (ret != 1))
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+       return ret;
+@@ -748,6 +752,7 @@ int ext4_write_inline_data_end(struct in
+       ext4_write_unlock_xattr(inode, &no_expand);
+       brelse(iloc.bh);
++      mark_inode_dirty(inode);
+ out:
+       return copied;
+ }
+@@ -894,7 +899,6 @@ retry_journal:
+               goto out;
+       }
+-
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page) {
+               ret = -ENOMEM;
+@@ -912,6 +916,9 @@ retry_journal:
+               if (ret < 0)
+                       goto out_release_page;
+       }
++      ret = ext4_journal_get_write_access(handle, iloc.bh);
++      if (ret)
++              goto out_release_page;
+       up_read(&EXT4_I(inode)->xattr_sem);
+       *pagep = page;
+@@ -932,7 +939,6 @@ int ext4_da_write_inline_data_end(struct
+                                 unsigned len, unsigned copied,
+                                 struct page *page)
+ {
+-      int i_size_changed = 0;
+       int ret;
+       ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
+@@ -950,10 +956,8 @@ int ext4_da_write_inline_data_end(struct
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+-      if (pos+copied > inode->i_size) {
++      if (pos+copied > inode->i_size)
+               i_size_write(inode, pos+copied);
+-              i_size_changed = 1;
+-      }
+       unlock_page(page);
+       page_cache_release(page);
+@@ -963,8 +967,7 @@ int ext4_da_write_inline_data_end(struct
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+-      if (i_size_changed)
+-              mark_inode_dirty(inode);
++      mark_inode_dirty(inode);
+       return copied;
+ }
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1164,9 +1164,10 @@ static int ext4_write_end(struct file *f
+       loff_t old_size = inode->i_size;
+       int ret = 0, ret2;
+       int i_size_changed = 0;
++      int inline_data = ext4_has_inline_data(inode);
+       trace_ext4_write_end(inode, pos, len, copied);
+-      if (ext4_has_inline_data(inode)) {
++      if (inline_data) {
+               ret = ext4_write_inline_data_end(inode, pos, len,
+                                                copied, page);
+               if (ret < 0) {
+@@ -1194,7 +1195,7 @@ static int ext4_write_end(struct file *f
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+-      if (i_size_changed)
++      if (i_size_changed || inline_data)
+               ext4_mark_inode_dirty(handle, inode);
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+@@ -1268,6 +1269,7 @@ static int ext4_journalled_write_end(str
+       int partial = 0;
+       unsigned from, to;
+       int size_changed = 0;
++      int inline_data = ext4_has_inline_data(inode);
+       trace_ext4_journalled_write_end(inode, pos, len, copied);
+       from = pos & (PAGE_CACHE_SIZE - 1);
+@@ -1275,7 +1277,7 @@ static int ext4_journalled_write_end(str
+       BUG_ON(!ext4_handle_valid(handle));
+-      if (ext4_has_inline_data(inode)) {
++      if (inline_data) {
+               ret = ext4_write_inline_data_end(inode, pos, len,
+                                                copied, page);
+               if (ret < 0) {
+@@ -1306,7 +1308,7 @@ static int ext4_journalled_write_end(str
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+-      if (size_changed) {
++      if (size_changed || inline_data) {
+               ret2 = ext4_mark_inode_dirty(handle, inode);
+               if (!ret)
+                       ret = ret2;
+@@ -1804,11 +1806,7 @@ static int __ext4_journalled_writepage(s
+       }
+       if (inline_data) {
+-              BUFFER_TRACE(inode_bh, "get write access");
+-              ret = ext4_journal_get_write_access(handle, inode_bh);
+-
+-              err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
+-
++              ret = ext4_mark_inode_dirty(handle, inode);
+       } else {
+               ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                                            do_journal_get_write_access);
diff --git a/queue-4.4/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch b/queue-4.4/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
new file mode 100644 (file)
index 0000000..4ba7d18
--- /dev/null
@@ -0,0 +1,67 @@
+From 81e69df38e2911b642ec121dec319fad2a4782f3 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 14 Jul 2018 23:55:57 -0400
+Subject: random: mix rdrand with entropy sent in from userspace
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 81e69df38e2911b642ec121dec319fad2a4782f3 upstream.
+
+Fedora has integrated the jitter entropy daemon to work around slow
+boot problems, especially on VM's that don't support virtio-rng:
+
+    https://bugzilla.redhat.com/show_bug.cgi?id=1572944
+
+It's understandable why they did this, but the Jitter entropy daemon
+works fundamentally on the principle: "the CPU microarchitecture is
+**so** complicated and we can't figure it out, so it *must* be
+random".  Yes, it uses statistical tests to "prove" it is secure, but
+AES_ENCRYPT(NSA_KEY, COUNTER++) will also pass statistical tests with
+flying colors.
+
+So if RDRAND is available, mix it into entropy submitted from
+userspace.  It can't hurt, and if you believe the NSA has backdoored
+RDRAND, then they probably have enough details about the Intel
+microarchitecture that they can reverse engineer how the Jitter
+entropy daemon affects the microarchitecture, and attack its output
+stream.  And if RDRAND is in fact an honest DRNG, it will immeasurably
+improve on what the Jitter entropy daemon might produce.
+
+This also provides some protection against someone who is able to read
+or set the entropy seed file.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1503,14 +1503,22 @@ static int
+ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ {
+       size_t bytes;
+-      __u32 buf[16];
++      __u32 t, buf[16];
+       const char __user *p = buffer;
+       while (count > 0) {
++              int b, i = 0;
++
+               bytes = min(count, sizeof(buf));
+               if (copy_from_user(&buf, p, bytes))
+                       return -EFAULT;
++              for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
++                      if (!arch_get_random_int(&t))
++                              break;
++                      buf[i] ^= t;
++              }
++
+               count -= bytes;
+               p += bytes;
index 523b75e403da237a06f1649766caa27db7b8763b..1553efacca3d1285f6dfdc4aea3a42dabcba8e58 100644 (file)
@@ -95,3 +95,7 @@ scsi-scsi_dh-replace-too-broad-tp9-string-with-the-exact-models.patch
 scsi-megaraid_sas-increase-timeout-by-1-sec-for-non-raid-fastpath-ios.patch
 media-si470x-fix-__be16-annotations.patch
 drm-add-dp-psr2-sink-enable-bit.patch
+random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
+squashfs-be-more-careful-about-metadata-corruption.patch
+ext4-fix-inline-data-updates-with-checksums-enabled.patch
+ext4-check-for-allocation-block-validity-with-block-group-locked.patch
diff --git a/queue-4.4/squashfs-be-more-careful-about-metadata-corruption.patch b/queue-4.4/squashfs-be-more-careful-about-metadata-corruption.patch
new file mode 100644 (file)
index 0000000..9df5503
--- /dev/null
@@ -0,0 +1,96 @@
+From 01cfb7937a9af2abb1136c7e89fbf3fd92952956 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 29 Jul 2018 12:44:46 -0700
+Subject: squashfs: be more careful about metadata corruption
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 01cfb7937a9af2abb1136c7e89fbf3fd92952956 upstream.
+
+Anatoly Trosinenko reports that a corrupted squashfs image can cause a
+kernel oops.  It turns out that squashfs can end up being confused about
+negative fragment lengths.
+
+The regular squashfs_read_data() does check for negative lengths, but
+squashfs_read_metadata() did not, and the fragment size code just
+blindly trusted the on-disk value.  Fix both the fragment parsing and
+the metadata reading code.
+
+Reported-by: Anatoly Trosinenko <anatoly.trosinenko@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Phillip Lougher <phillip@squashfs.org.uk>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/squashfs/cache.c       |    3 +++
+ fs/squashfs/file.c        |    8 ++++++--
+ fs/squashfs/fragment.c    |    4 +---
+ fs/squashfs/squashfs_fs.h |    6 ++++++
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/fs/squashfs/cache.c
++++ b/fs/squashfs/cache.c
+@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_
+       TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
++      if (unlikely(length < 0))
++              return -EIO;
++
+       while (length) {
+               entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+               if (entry->error) {
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -194,7 +194,11 @@ static long long read_indexes(struct sup
+               }
+               for (i = 0; i < blocks; i++) {
+-                      int size = le32_to_cpu(blist[i]);
++                      int size = squashfs_block_size(blist[i]);
++                      if (size < 0) {
++                              err = size;
++                              goto failure;
++                      }
+                       block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+               }
+               n -= blocks;
+@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *
+                       sizeof(size));
+       if (res < 0)
+               return res;
+-      return le32_to_cpu(size);
++      return squashfs_block_size(size);
+ }
+ /* Copy data into page cache  */
+--- a/fs/squashfs/fragment.c
++++ b/fs/squashfs/fragment.c
+@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_bl
+               return size;
+       *fragment_block = le64_to_cpu(fragment_entry.start_block);
+-      size = le32_to_cpu(fragment_entry.size);
+-
+-      return size;
++      return squashfs_block_size(fragment_entry.size);
+ }
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -129,6 +129,12 @@
+ #define SQUASHFS_COMPRESSED_BLOCK(B)  (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
++static inline int squashfs_block_size(__le32 raw)
++{
++      u32 size = le32_to_cpu(raw);
++      return (size >> 25) ? -EIO : size;
++}
++
+ /*
+  * Inode number ops.  Inodes consist of a compressed block number, and an
+  * uncompressed offset within that block