From: Greg Kroah-Hartman Date: Wed, 21 Jun 2023 18:45:24 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v4.14.320~75 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4ed77b0075e4036651d6af832030b8565302f161;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: nilfs2-reject-devices-with-insufficient-block-count.patch tracing-add-tracing_reset_all_online_cpus_unlocked-function.patch --- diff --git a/queue-5.4/nilfs2-reject-devices-with-insufficient-block-count.patch b/queue-5.4/nilfs2-reject-devices-with-insufficient-block-count.patch new file mode 100644 index 00000000000..6f3c07caddd --- /dev/null +++ b/queue-5.4/nilfs2-reject-devices-with-insufficient-block-count.patch @@ -0,0 +1,105 @@ +From 92c5d1b860e9581d64baca76779576c0ab0d943d Mon Sep 17 00:00:00 2001 +From: Ryusuke Konishi +Date: Fri, 26 May 2023 11:13:32 +0900 +Subject: nilfs2: reject devices with insufficient block count + +From: Ryusuke Konishi + +commit 92c5d1b860e9581d64baca76779576c0ab0d943d upstream. + +The current sanity check for nilfs2 geometry information lacks checks for +the number of segments stored in superblocks, so even for device images +that have been destructively truncated or have an unusually high number of +segments, the mount operation may succeed. + +This causes out-of-bounds block I/O on file system block reads or log +writes to the segments, the latter in particular causing +"a_ops->writepages" to repeatedly fail, resulting in sync_inodes_sb() to +hang. + +Fix this issue by checking the number of segments stored in the superblock +and avoiding mounting devices that can cause out-of-bounds accesses. To +eliminate the possibility of overflow when calculating the number of +blocks required for the device from the number of segments, this also adds +a helper function to calculate the upper bound on the number of segments +and inserts a check using it. + +Link: https://lkml.kernel.org/r/20230526021332.3431-1-konishi.ryusuke@gmail.com +Signed-off-by: Ryusuke Konishi +Reported-by: syzbot+7d50f1e54a12ba3aeae2@syzkaller.appspotmail.com + Link: https://syzkaller.appspot.com/bug?extid=7d50f1e54a12ba3aeae2 +Tested-by: Ryusuke Konishi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + fs/nilfs2/the_nilfs.c | 44 +++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 43 insertions(+), 1 deletion(-) + +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -375,6 +375,18 @@ unsigned long nilfs_nrsvsegs(struct the_ + 100)); + } + ++/** ++ * nilfs_max_segment_count - calculate the maximum number of segments ++ * @nilfs: nilfs object ++ */ ++static u64 nilfs_max_segment_count(struct the_nilfs *nilfs) ++{ ++ u64 max_count = U64_MAX; ++ ++ do_div(max_count, nilfs->ns_blocks_per_segment); ++ return min_t(u64, max_count, ULONG_MAX); ++} ++ + void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs) + { + nilfs->ns_nsegments = nsegs; +@@ -384,6 +396,8 @@ void nilfs_set_nsegments(struct the_nilf + static int nilfs_store_disk_layout(struct the_nilfs *nilfs, + struct nilfs_super_block *sbp) + { ++ u64 nsegments, nblocks; ++ + if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { + nilfs_msg(nilfs->ns_sb, KERN_ERR, + "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).", +@@ -430,7 +444,35 @@ static int nilfs_store_disk_layout(struc + return -EINVAL; + } + +- nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); ++ nsegments = le64_to_cpu(sbp->s_nsegments); ++ if (nsegments > nilfs_max_segment_count(nilfs)) { ++ nilfs_msg(nilfs->ns_sb, KERN_ERR, ++ "segment count %llu exceeds upper limit (%llu segments)", ++ (unsigned long long)nsegments, ++ (unsigned long long)nilfs_max_segment_count(nilfs)); ++ return -EINVAL; ++ } ++ ++ nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >> ++ nilfs->ns_sb->s_blocksize_bits; ++ if (nblocks) { ++ u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment; ++ /* ++ * To avoid failing to mount early device images without a ++ * second superblock, exclude that block count from the ++ * "min_block_count" calculation. ++ */ ++ ++ if (nblocks < min_block_count) { ++ nilfs_msg(nilfs->ns_sb, KERN_ERR, ++ "total number of segment blocks %llu exceeds device size (%llu blocks)", ++ (unsigned long long)min_block_count, ++ (unsigned long long)nblocks); ++ return -EINVAL; ++ } ++ } ++ ++ nilfs_set_nsegments(nilfs, nsegments); + nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); + return 0; + } diff --git a/queue-5.4/series b/queue-5.4/series index a9f3c328359..4711d2b2907 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -1,5 +1,7 @@ +nilfs2-reject-devices-with-insufficient-block-count.patch x86-purgatory-fail-the-build-if-purgatory.ro-has-mis.patch x86-purgatory-remove-pgo-flags.patch mm-rewrite-wait_on_page_bit_common-logic.patch list-add-list_del_init_careful-to-go-with-list_empty.patch epoll-ep_autoremove_wake_function-should-use-list_de.patch +tracing-add-tracing_reset_all_online_cpus_unlocked-function.patch diff --git a/queue-5.4/tracing-add-tracing_reset_all_online_cpus_unlocked-function.patch b/queue-5.4/tracing-add-tracing_reset_all_online_cpus_unlocked-function.patch new file mode 100644 index 00000000000..dae351d8893 --- /dev/null +++ b/queue-5.4/tracing-add-tracing_reset_all_online_cpus_unlocked-function.patch @@ -0,0 +1,87 @@ +From e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 Mon Sep 17 00:00:00 2001 +From: "Steven Rostedt (Google)" +Date: Wed, 23 Nov 2022 14:25:57 -0500 +Subject: tracing: Add tracing_reset_all_online_cpus_unlocked() function + +From: Steven Rostedt (Google) + +commit e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 upstream. + +Currently the tracing_reset_all_online_cpus() requires the +trace_types_lock held. But only one caller of this function actually has +that lock held before calling it, and the other just takes the lock so +that it can call it. More users of this function is needed where the lock +is not held. + +Add a tracing_reset_all_online_cpus_unlocked() function for the one use +case that calls it without being held, and also add a lockdep_assert to +make sure it is held when called. + +Then have tracing_reset_all_online_cpus() take the lock internally, such +that callers do not need to worry about taking it. + +Link: https://lkml.kernel.org/r/20221123192741.658273220@goodmis.org + +Cc: Masami Hiramatsu +Cc: Andrew Morton +Cc: Zheng Yejian +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Zheng Yejian +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace.c | 11 ++++++++++- + kernel/trace/trace.h | 1 + + kernel/trace/trace_events.c | 2 +- + 3 files changed, 12 insertions(+), 2 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1931,10 +1931,12 @@ void tracing_reset_online_cpus(struct tr + } + + /* Must have trace_types_lock held */ +-void tracing_reset_all_online_cpus(void) ++void tracing_reset_all_online_cpus_unlocked(void) + { + struct trace_array *tr; + ++ lockdep_assert_held(&trace_types_lock); ++ + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + if (!tr->clear_trace) + continue; +@@ -1946,6 +1948,13 @@ void tracing_reset_all_online_cpus(void) + } + } + ++void tracing_reset_all_online_cpus(void) ++{ ++ mutex_lock(&trace_types_lock); ++ tracing_reset_all_online_cpus_unlocked(); ++ mutex_unlock(&trace_types_lock); ++} ++ + /* + * The tgid_map array maps from pid to tgid; i.e. the value stored at index i + * is the tgid last observed corresponding to pid=i. +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -677,6 +677,7 @@ int tracing_is_enabled(void); + void tracing_reset_online_cpus(struct trace_buffer *buf); + void tracing_reset_current(int cpu); + void tracing_reset_all_online_cpus(void); ++void tracing_reset_all_online_cpus_unlocked(void); + int tracing_open_generic(struct inode *inode, struct file *filp); + int tracing_open_generic_tr(struct inode *inode, struct file *filp); + bool tracing_is_disabled(void); +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -2440,7 +2440,7 @@ static void trace_module_remove_events(s + * over from this module may be passed to the new module events and + * unexpected results may occur. + */ +- tracing_reset_all_online_cpus(); ++ tracing_reset_all_online_cpus_unlocked(); + } + + static int trace_module_notify(struct notifier_block *self,