From a0ba6a3d48340449df201c23820e40dbbc8e224e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 30 Jul 2018 10:03:45 +0200 Subject: [PATCH] 4.9-stable patches added patches: ext4-check-for-allocation-block-validity-with-block-group-locked.patch ext4-fix-inline-data-updates-with-checksums-enabled.patch random-mix-rdrand-with-entropy-sent-in-from-userspace.patch squashfs-be-more-careful-about-metadata-corruption.patch --- ...ock-validity-with-block-group-locked.patch | 69 ++++++++ ...-data-updates-with-checksums-enabled.patch | 166 ++++++++++++++++++ ...-with-entropy-sent-in-from-userspace.patch | 67 +++++++ queue-4.9/series | 4 + ...re-careful-about-metadata-corruption.patch | 96 ++++++++++ 5 files changed, 402 insertions(+) create mode 100644 queue-4.9/ext4-check-for-allocation-block-validity-with-block-group-locked.patch create mode 100644 queue-4.9/ext4-fix-inline-data-updates-with-checksums-enabled.patch create mode 100644 queue-4.9/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch create mode 100644 queue-4.9/squashfs-be-more-careful-about-metadata-corruption.patch diff --git a/queue-4.9/ext4-check-for-allocation-block-validity-with-block-group-locked.patch b/queue-4.9/ext4-check-for-allocation-block-validity-with-block-group-locked.patch new file mode 100644 index 00000000000..e1fe6ae762d --- /dev/null +++ b/queue-4.9/ext4-check-for-allocation-block-validity-with-block-group-locked.patch @@ -0,0 +1,69 @@ +From 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o +Date: Thu, 12 Jul 2018 19:08:05 -0400 +Subject: ext4: check for allocation block validity with block group locked + +From: Theodore Ts'o + +commit 8d5a803c6a6ce4ec258e31f76059ea5153ba46ef upstream. + +With commit 044e6e3d74a3: "ext4: don't update checksum of new +initialized bitmaps" the buffer valid bit will get set without +actually setting up the checksum for the allocation bitmap, since the +checksum will get calculated once we actually allocate an inode or +block. + +If we are doing this, then we need to (re-)check the verified bit +after we take the block group lock. Otherwise, we could race with +another process reading and verifying the bitmap, which would then +complain about the checksum being invalid. + +https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1780137 + +Signed-off-by: Theodore Ts'o +Cc: stable@kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/balloc.c | 3 +++ + fs/ext4/ialloc.c | 3 +++ + 2 files changed, 6 insertions(+) + +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -378,6 +378,8 @@ static int ext4_validate_block_bitmap(st + return -EFSCORRUPTED; + + ext4_lock_group(sb, block_group); ++ if (buffer_verified(bh)) ++ goto verified; + if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, + desc, bh))) { + ext4_unlock_group(sb, block_group); +@@ -400,6 +402,7 @@ static int ext4_validate_block_bitmap(st + return -EFSCORRUPTED; + } + set_buffer_verified(bh); ++verified: + ext4_unlock_group(sb, block_group); + return 0; + } +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -88,6 +88,8 @@ static int ext4_validate_inode_bitmap(st + return -EFSCORRUPTED; + + ext4_lock_group(sb, block_group); ++ if (buffer_verified(bh)) ++ goto verified; + blk = ext4_inode_bitmap(sb, desc); + if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, + EXT4_INODES_PER_GROUP(sb) / 8)) { +@@ -105,6 +107,7 @@ static int ext4_validate_inode_bitmap(st + return -EFSBADCRC; + } + set_buffer_verified(bh); ++verified: + ext4_unlock_group(sb, block_group); + return 0; + } diff --git a/queue-4.9/ext4-fix-inline-data-updates-with-checksums-enabled.patch b/queue-4.9/ext4-fix-inline-data-updates-with-checksums-enabled.patch new file mode 100644 index 00000000000..e9e19059fd6 --- /dev/null +++ b/queue-4.9/ext4-fix-inline-data-updates-with-checksums-enabled.patch @@ -0,0 +1,166 @@ +From 362eca70b53389bddf3143fe20f53dcce2cfdf61 Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o +Date: Tue, 10 Jul 2018 01:07:43 -0400 +Subject: ext4: fix inline data updates with checksums enabled + +From: Theodore Ts'o + +commit 362eca70b53389bddf3143fe20f53dcce2cfdf61 upstream. + +The inline data code was updating the raw inode directly; this is +problematic since if metadata checksums are enabled, +ext4_mark_inode_dirty() must be called to update the inode's checksum. +In addition, the jbd2 layer requires that get_write_access() be called +before the metadata buffer is modified. Fix both of these problems. + +https://bugzilla.kernel.org/show_bug.cgi?id=200443 + +Signed-off-by: Theodore Ts'o +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/inline.c | 19 +++++++++++-------- + fs/ext4/inode.c | 16 +++++++--------- + 2 files changed, 18 insertions(+), 17 deletions(-) + +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -679,6 +679,10 @@ int ext4_try_to_write_inline_data(struct + goto convert; + } + ++ ret = ext4_journal_get_write_access(handle, iloc.bh); ++ if (ret) ++ goto out; ++ + flags |= AOP_FLAG_NOFS; + + page = grab_cache_page_write_begin(mapping, 0, flags); +@@ -707,7 +711,7 @@ int ext4_try_to_write_inline_data(struct + out_up_read: + up_read(&EXT4_I(inode)->xattr_sem); + out: +- if (handle) ++ if (handle && (ret != 1)) + ext4_journal_stop(handle); + brelse(iloc.bh); + return ret; +@@ -749,6 +753,7 @@ int ext4_write_inline_data_end(struct in + + ext4_write_unlock_xattr(inode, &no_expand); + brelse(iloc.bh); ++ mark_inode_dirty(inode); + out: + return copied; + } +@@ -895,7 +900,6 @@ retry_journal: + goto out; + } + +- + page = grab_cache_page_write_begin(mapping, 0, flags); + if (!page) { + ret = -ENOMEM; +@@ -913,6 +917,9 @@ retry_journal: + if (ret < 0) + goto out_release_page; + } ++ ret = ext4_journal_get_write_access(handle, iloc.bh); ++ if (ret) ++ goto out_release_page; + + up_read(&EXT4_I(inode)->xattr_sem); + *pagep = page; +@@ -933,7 +940,6 @@ int ext4_da_write_inline_data_end(struct + unsigned len, unsigned copied, + struct page *page) + { +- int i_size_changed = 0; + int ret; + + ret = ext4_write_inline_data_end(inode, pos, len, copied, page); +@@ -951,10 +957,8 @@ int ext4_da_write_inline_data_end(struct + * But it's important to update i_size while still holding page lock: + * page writeout could otherwise come in and zero beyond i_size. + */ +- if (pos+copied > inode->i_size) { ++ if (pos+copied > inode->i_size) + i_size_write(inode, pos+copied); +- i_size_changed = 1; +- } + unlock_page(page); + put_page(page); + +@@ -964,8 +968,7 @@ int ext4_da_write_inline_data_end(struct + * ordering of page lock and transaction start for journaling + * filesystems. + */ +- if (i_size_changed) +- mark_inode_dirty(inode); ++ mark_inode_dirty(inode); + + return copied; + } +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1318,9 +1318,10 @@ static int ext4_write_end(struct file *f + loff_t old_size = inode->i_size; + int ret = 0, ret2; + int i_size_changed = 0; ++ int inline_data = ext4_has_inline_data(inode); + + trace_ext4_write_end(inode, pos, len, copied); +- if (ext4_has_inline_data(inode)) { ++ if (inline_data) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); + if (ret < 0) { +@@ -1348,7 +1349,7 @@ static int ext4_write_end(struct file *f + * ordering of page lock and transaction start for journaling + * filesystems. + */ +- if (i_size_changed) ++ if (i_size_changed || inline_data) + ext4_mark_inode_dirty(handle, inode); + + if (pos + len > inode->i_size && ext4_can_truncate(inode)) +@@ -1422,6 +1423,7 @@ static int ext4_journalled_write_end(str + int partial = 0; + unsigned from, to; + int size_changed = 0; ++ int inline_data = ext4_has_inline_data(inode); + + trace_ext4_journalled_write_end(inode, pos, len, copied); + from = pos & (PAGE_SIZE - 1); +@@ -1429,7 +1431,7 @@ static int ext4_journalled_write_end(str + + BUG_ON(!ext4_handle_valid(handle)); + +- if (ext4_has_inline_data(inode)) { ++ if (inline_data) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); + if (ret < 0) { +@@ -1460,7 +1462,7 @@ static int ext4_journalled_write_end(str + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); + +- if (size_changed) { ++ if (size_changed || inline_data) { + ret2 = ext4_mark_inode_dirty(handle, inode); + if (!ret) + ret = ret2; +@@ -1958,11 +1960,7 @@ static int __ext4_journalled_writepage(s + } + + if (inline_data) { +- BUFFER_TRACE(inode_bh, "get write access"); +- ret = ext4_journal_get_write_access(handle, inode_bh); +- +- err = ext4_handle_dirty_metadata(handle, inode, inode_bh); +- ++ ret = ext4_mark_inode_dirty(handle, inode); + } else { + ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, + do_journal_get_write_access); diff --git a/queue-4.9/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch b/queue-4.9/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch new file mode 100644 index 00000000000..7f8255708cb --- /dev/null +++ b/queue-4.9/random-mix-rdrand-with-entropy-sent-in-from-userspace.patch @@ -0,0 +1,67 @@ +From 81e69df38e2911b642ec121dec319fad2a4782f3 Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o +Date: Sat, 14 Jul 2018 23:55:57 -0400 +Subject: random: mix rdrand with entropy sent in from userspace + +From: Theodore Ts'o + +commit 81e69df38e2911b642ec121dec319fad2a4782f3 upstream. + +Fedora has integrated the jitter entropy daemon to work around slow +boot problems, especially on VM's that don't support virtio-rng: + + https://bugzilla.redhat.com/show_bug.cgi?id=1572944 + +It's understandable why they did this, but the Jitter entropy daemon +works fundamentally on the principle: "the CPU microarchitecture is +**so** complicated and we can't figure it out, so it *must* be +random". Yes, it uses statistical tests to "prove" it is secure, but +AES_ENCRYPT(NSA_KEY, COUNTER++) will also pass statistical tests with +flying colors. + +So if RDRAND is available, mix it into entropy submitted from +userspace. It can't hurt, and if you believe the NSA has backdoored +RDRAND, then they probably have enough details about the Intel +microarchitecture that they can reverse engineer how the Jitter +entropy daemon affects the microarchitecture, and attack its output +stream. And if RDRAND is in fact an honest DRNG, it will immeasurably +improve on what the Jitter entropy daemon might produce. + +This also provides some protection against someone who is able to read +or set the entropy seed file. + +Signed-off-by: Theodore Ts'o +Cc: stable@vger.kernel.org +Cc: Arnd Bergmann +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/char/random.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1826,14 +1826,22 @@ static int + write_pool(struct entropy_store *r, const char __user *buffer, size_t count) + { + size_t bytes; +- __u32 buf[16]; ++ __u32 t, buf[16]; + const char __user *p = buffer; + + while (count > 0) { ++ int b, i = 0; ++ + bytes = min(count, sizeof(buf)); + if (copy_from_user(&buf, p, bytes)) + return -EFAULT; + ++ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { ++ if (!arch_get_random_int(&t)) ++ break; ++ buf[i] ^= t; ++ } ++ + count -= bytes; + p += bytes; + diff --git a/queue-4.9/series b/queue-4.9/series index 59e938dba8b..b71c0096310 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -134,3 +134,7 @@ scsi-scsi_dh-replace-too-broad-tp9-string-with-the-exact-models.patch scsi-megaraid_sas-increase-timeout-by-1-sec-for-non-raid-fastpath-ios.patch media-si470x-fix-__be16-annotations.patch drm-add-dp-psr2-sink-enable-bit.patch +random-mix-rdrand-with-entropy-sent-in-from-userspace.patch +squashfs-be-more-careful-about-metadata-corruption.patch +ext4-fix-inline-data-updates-with-checksums-enabled.patch +ext4-check-for-allocation-block-validity-with-block-group-locked.patch diff --git a/queue-4.9/squashfs-be-more-careful-about-metadata-corruption.patch b/queue-4.9/squashfs-be-more-careful-about-metadata-corruption.patch new file mode 100644 index 00000000000..9df5503fd29 --- /dev/null +++ b/queue-4.9/squashfs-be-more-careful-about-metadata-corruption.patch @@ -0,0 +1,96 @@ +From 01cfb7937a9af2abb1136c7e89fbf3fd92952956 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Sun, 29 Jul 2018 12:44:46 -0700 +Subject: squashfs: be more careful about metadata corruption + +From: Linus Torvalds + +commit 01cfb7937a9af2abb1136c7e89fbf3fd92952956 upstream. + +Anatoly Trosinenko reports that a corrupted squashfs image can cause a +kernel oops. It turns out that squashfs can end up being confused about +negative fragment lengths. + +The regular squashfs_read_data() does check for negative lengths, but +squashfs_read_metadata() did not, and the fragment size code just +blindly trusted the on-disk value. Fix both the fragment parsing and +the metadata reading code. + +Reported-by: Anatoly Trosinenko +Cc: Al Viro +Cc: Phillip Lougher +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/squashfs/cache.c | 3 +++ + fs/squashfs/file.c | 8 ++++++-- + fs/squashfs/fragment.c | 4 +--- + fs/squashfs/squashfs_fs.h | 6 ++++++ + 4 files changed, 16 insertions(+), 5 deletions(-) + +--- a/fs/squashfs/cache.c ++++ b/fs/squashfs/cache.c +@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_ + + TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); + ++ if (unlikely(length < 0)) ++ return -EIO; ++ + while (length) { + entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); + if (entry->error) { +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -194,7 +194,11 @@ static long long read_indexes(struct sup + } + + for (i = 0; i < blocks; i++) { +- int size = le32_to_cpu(blist[i]); ++ int size = squashfs_block_size(blist[i]); ++ if (size < 0) { ++ err = size; ++ goto failure; ++ } + block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); + } + n -= blocks; +@@ -367,7 +371,7 @@ static int read_blocklist(struct inode * + sizeof(size)); + if (res < 0) + return res; +- return le32_to_cpu(size); ++ return squashfs_block_size(size); + } + + /* Copy data into page cache */ +--- a/fs/squashfs/fragment.c ++++ b/fs/squashfs/fragment.c +@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_bl + return size; + + *fragment_block = le64_to_cpu(fragment_entry.start_block); +- size = le32_to_cpu(fragment_entry.size); +- +- return size; ++ return squashfs_block_size(fragment_entry.size); + } + + +--- a/fs/squashfs/squashfs_fs.h ++++ b/fs/squashfs/squashfs_fs.h +@@ -129,6 +129,12 @@ + + #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) + ++static inline int squashfs_block_size(__le32 raw) ++{ ++ u32 size = le32_to_cpu(raw); ++ return (size >> 25) ? -EIO : size; ++} ++ + /* + * Inode number ops. Inodes consist of a compressed block number, and an + * uncompressed offset within that block -- 2.47.3