From: Greg Kroah-Hartman Date: Thu, 13 Mar 2025 16:15:41 +0000 (+0100) Subject: 6.1-stable patches X-Git-Tag: v6.6.84~59 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=100a31e98076cb5f5ba13dc26da9db5b45f86271;p=thirdparty%2Fkernel%2Fstable-queue.git 6.1-stable patches added patches: fs-ntfs3-fix-shift-out-of-bounds-in-ntfs_fill_super.patch hrtimer-use-and-report-correct-timerslack-values-for-realtime-tasks.patch --- diff --git a/queue-6.1/fs-ntfs3-fix-shift-out-of-bounds-in-ntfs_fill_super.patch b/queue-6.1/fs-ntfs3-fix-shift-out-of-bounds-in-ntfs_fill_super.patch new file mode 100644 index 0000000000..982b4d37db --- /dev/null +++ b/queue-6.1/fs-ntfs3-fix-shift-out-of-bounds-in-ntfs_fill_super.patch @@ -0,0 +1,160 @@ +From 91a4b1ee78cb100b19b70f077c247f211110348f Mon Sep 17 00:00:00 2001 +From: Konstantin Komarov +Date: Fri, 30 Jun 2023 16:25:25 +0400 +Subject: fs/ntfs3: Fix shift-out-of-bounds in ntfs_fill_super + +From: Konstantin Komarov + +commit 91a4b1ee78cb100b19b70f077c247f211110348f upstream. + +Reported-by: syzbot+478c1bf0e6bf4a8f3a04@syzkaller.appspotmail.com +Signed-off-by: Konstantin Komarov +Signed-off-by: Miguel Garcia Roman +Signed-off-by: Greg Kroah-Hartman +--- + fs/ntfs3/ntfs_fs.h | 2 + + fs/ntfs3/super.c | 68 +++++++++++++++++++++++++++++++++++------------------ + 2 files changed, 48 insertions(+), 22 deletions(-) + +--- a/fs/ntfs3/ntfs_fs.h ++++ b/fs/ntfs3/ntfs_fs.h +@@ -42,9 +42,11 @@ enum utf16_endian; + #define MINUS_ONE_T ((size_t)(-1)) + /* Biggest MFT / smallest cluster */ + #define MAXIMUM_BYTES_PER_MFT 4096 ++#define MAXIMUM_SHIFT_BYTES_PER_MFT 12 + #define NTFS_BLOCKS_PER_MFT_RECORD (MAXIMUM_BYTES_PER_MFT / 512) + + #define MAXIMUM_BYTES_PER_INDEX 4096 ++#define MAXIMUM_SHIFT_BYTES_PER_INDEX 12 + #define NTFS_BLOCKS_PER_INODE (MAXIMUM_BYTES_PER_INDEX / 512) + + /* NTFS specific error code when fixup failed. */ +--- a/fs/ntfs3/super.c ++++ b/fs/ntfs3/super.c +@@ -680,7 +680,7 @@ static u32 true_sectors_per_clst(const s + * ntfs_init_from_boot - Init internal info from on-disk boot sector. + */ + static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, +- u64 dev_size) ++ u64 dev_size) + { + struct ntfs_sb_info *sbi = sb->s_fs_info; + int err; +@@ -705,12 +705,12 @@ static int ntfs_init_from_boot(struct su + + /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/ + /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1]) +- * goto out; ++ * goto out; + */ + + boot_sector_size = (u32)boot->bytes_per_sector[1] << 8; + if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE || +- !is_power_of_2(boot_sector_size)) { ++ !is_power_of_2(boot_sector_size)) { + goto out; + } + +@@ -733,15 +733,49 @@ static int ntfs_init_from_boot(struct su + + /* Check MFT record size. */ + if ((boot->record_size < 0 && +- SECTOR_SIZE > (2U << (-boot->record_size))) || +- (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) { ++ SECTOR_SIZE > (2U << (-boot->record_size))) || ++ (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) { ++ goto out; ++ } ++ ++ /* Calculate cluster size */ ++ sbi->cluster_size = boot_sector_size * sct_per_clst; ++ sbi->cluster_bits = blksize_bits(sbi->cluster_size); ++ ++ if (boot->record_size >= 0) { ++ record_size = (u32)boot->record_size << sbi->cluster_bits; ++ } else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) { ++ record_size = 1u << (-boot->record_size); ++ } else { ++ ntfs_err(sb, "%s: invalid record size %d.", "NTFS", ++ boot->record_size); ++ goto out; ++ } ++ ++ sbi->record_size = record_size; ++ sbi->record_bits = blksize_bits(record_size); ++ sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes ++ ++ if (record_size > MAXIMUM_BYTES_PER_MFT) { ++ ntfs_err(sb, "Unsupported bytes per MFT record %u.", ++ record_size); ++ goto out; ++ } ++ ++ if (boot->index_size >= 0) { ++ sbi->index_size = (u32)boot->index_size << sbi->cluster_bits; ++ } else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) { ++ sbi->index_size = 1u << (-boot->index_size); ++ } else { ++ ntfs_err(sb, "%s: invalid index size %d.", "NTFS", ++ boot->index_size); + goto out; + } + + /* Check index record size. */ + if ((boot->index_size < 0 && +- SECTOR_SIZE > (2U << (-boot->index_size))) || +- (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) { ++ SECTOR_SIZE > (2U << (-boot->index_size))) || ++ (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) { + goto out; + } + +@@ -762,9 +796,6 @@ static int ntfs_init_from_boot(struct su + dev_size += sector_size - 1; + } + +- sbi->cluster_size = boot_sector_size * sct_per_clst; +- sbi->cluster_bits = blksize_bits(sbi->cluster_size); +- + sbi->mft.lbo = mlcn << sbi->cluster_bits; + sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits; + +@@ -785,9 +816,9 @@ static int ntfs_init_from_boot(struct su + sbi->cluster_mask = sbi->cluster_size - 1; + sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask; + sbi->record_size = record_size = boot->record_size < 0 +- ? 1 << (-boot->record_size) +- : (u32)boot->record_size +- << sbi->cluster_bits; ++ ? 1 << (-boot->record_size) ++ : (u32)boot->record_size ++ << sbi->cluster_bits; + + if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE) + goto out; +@@ -801,8 +832,8 @@ static int ntfs_init_from_boot(struct su + ALIGN(sizeof(enum ATTR_TYPE), 8); + + sbi->index_size = boot->index_size < 0 +- ? 1u << (-boot->index_size) +- : (u32)boot->index_size << sbi->cluster_bits; ++ ? 1u << (-boot->index_size) ++ : (u32)boot->index_size << sbi->cluster_bits; + + sbi->volume.ser_num = le64_to_cpu(boot->serial_num); + +@@ -871,13 +902,6 @@ static int ntfs_init_from_boot(struct su + sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits; + #endif + +- /* +- * Compute the MFT zone at two steps. +- * It would be nice if we are able to allocate 1/8 of +- * total clusters for MFT but not more then 512 MB. +- */ +- sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3); +- + err = 0; + + out: diff --git a/queue-6.1/hrtimer-use-and-report-correct-timerslack-values-for-realtime-tasks.patch b/queue-6.1/hrtimer-use-and-report-correct-timerslack-values-for-realtime-tasks.patch new file mode 100644 index 0000000000..f682f374c9 --- /dev/null +++ b/queue-6.1/hrtimer-use-and-report-correct-timerslack-values-for-realtime-tasks.patch @@ -0,0 +1,167 @@ +From ed4fb6d7ef68111bb539283561953e5c6e9a6e38 Mon Sep 17 00:00:00 2001 +From: Felix Moessbauer +Date: Wed, 14 Aug 2024 14:10:32 +0200 +Subject: hrtimer: Use and report correct timerslack values for realtime tasks + +From: Felix Moessbauer + +commit ed4fb6d7ef68111bb539283561953e5c6e9a6e38 upstream. + +The timerslack_ns setting is used to specify how much the hardware +timers should be delayed, to potentially dispatch multiple timers in a +single interrupt. This is a performance optimization. Timers of +realtime tasks (having a realtime scheduling policy) should not be +delayed. + +This logic was inconsitently applied to the hrtimers, leading to delays +of realtime tasks which used timed waits for events (e.g. condition +variables). Due to the downstream override of the slack for rt tasks, +the procfs reported incorrect (non-zero) timerslack_ns values. + +This is changed by setting the timer_slack_ns task attribute to 0 for +all tasks with a rt policy. By that, downstream users do not need to +specially handle rt tasks (w.r.t. the slack), and the procfs entry +shows the correct value of "0". Setting non-zero slack values (either +via procfs or PR_SET_TIMERSLACK) on tasks with a rt policy is ignored, +as stated in "man 2 PR_SET_TIMERSLACK": + + Timer slack is not applied to threads that are scheduled under a + real-time scheduling policy (see sched_setscheduler(2)). + +The special handling of timerslack on rt tasks in downstream users +is removed as well. + +Signed-off-by: Felix Moessbauer +Signed-off-by: Thomas Gleixner +Link: https://lore.kernel.org/all/20240814121032.368444-2-felix.moessbauer@siemens.com +Signed-off-by: Greg Kroah-Hartman +--- + fs/proc/base.c | 9 +++++---- + fs/select.c | 11 ++++------- + kernel/sched/core.c | 8 ++++++++ + kernel/sys.c | 2 ++ + kernel/time/hrtimer.c | 18 +++--------------- + 5 files changed, 22 insertions(+), 26 deletions(-) + +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -2633,10 +2633,11 @@ static ssize_t timerslack_ns_write(struc + } + + task_lock(p); +- if (slack_ns == 0) +- p->timer_slack_ns = p->default_timer_slack_ns; +- else +- p->timer_slack_ns = slack_ns; ++ if (task_is_realtime(p)) ++ slack_ns = 0; ++ else if (slack_ns == 0) ++ slack_ns = p->default_timer_slack_ns; ++ p->timer_slack_ns = slack_ns; + task_unlock(p); + + out: +--- a/fs/select.c ++++ b/fs/select.c +@@ -77,19 +77,16 @@ u64 select_estimate_accuracy(struct time + { + u64 ret; + struct timespec64 now; ++ u64 slack = current->timer_slack_ns; + +- /* +- * Realtime tasks get a slack of 0 for obvious reasons. +- */ +- +- if (rt_task(current)) ++ if (slack == 0) + return 0; + + ktime_get_ts64(&now); + now = timespec64_sub(*tv, now); + ret = __estimate_accuracy(&now); +- if (ret < current->timer_slack_ns) +- return current->timer_slack_ns; ++ if (ret < slack) ++ return slack; + return ret; + } + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7380,6 +7380,14 @@ static void __setscheduler_params(struct + else if (fair_policy(policy)) + p->static_prio = NICE_TO_PRIO(attr->sched_nice); + ++ /* rt-policy tasks do not have a timerslack */ ++ if (task_is_realtime(p)) { ++ p->timer_slack_ns = 0; ++ } else if (p->timer_slack_ns == 0) { ++ /* when switching back to non-rt policy, restore timerslack */ ++ p->timer_slack_ns = p->default_timer_slack_ns; ++ } ++ + /* + * __sched_setscheduler() ensures attr->sched_priority == 0 when + * !rt_policy. Always setting this ensures that things like +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -2477,6 +2477,8 @@ SYSCALL_DEFINE5(prctl, int, option, unsi + error = current->timer_slack_ns; + break; + case PR_SET_TIMERSLACK: ++ if (task_is_realtime(current)) ++ break; + if (arg2 <= 0) + current->timer_slack_ns = + current->default_timer_slack_ns; +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2090,14 +2090,9 @@ long hrtimer_nanosleep(ktime_t rqtp, con + struct restart_block *restart; + struct hrtimer_sleeper t; + int ret = 0; +- u64 slack; +- +- slack = current->timer_slack_ns; +- if (dl_task(current) || rt_task(current)) +- slack = 0; + + hrtimer_init_sleeper_on_stack(&t, clockid, mode); +- hrtimer_set_expires_range_ns(&t.timer, rqtp, slack); ++ hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns); + ret = do_nanosleep(&t, mode); + if (ret != -ERESTART_RESTARTBLOCK) + goto out; +@@ -2278,7 +2273,7 @@ void __init hrtimers_init(void) + /** + * schedule_hrtimeout_range_clock - sleep until timeout + * @expires: timeout value (ktime_t) +- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks ++ * @delta: slack in expires timeout (ktime_t) + * @mode: timer mode + * @clock_id: timer clock to be used + */ +@@ -2305,13 +2300,6 @@ schedule_hrtimeout_range_clock(ktime_t * + return -EINTR; + } + +- /* +- * Override any slack passed by the user if under +- * rt contraints. +- */ +- if (rt_task(current)) +- delta = 0; +- + hrtimer_init_sleeper_on_stack(&t, clock_id, mode); + hrtimer_set_expires_range_ns(&t.timer, *expires, delta); + hrtimer_sleeper_start_expires(&t, mode); +@@ -2331,7 +2319,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_ran + /** + * schedule_hrtimeout_range - sleep until timeout + * @expires: timeout value (ktime_t) +- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks ++ * @delta: slack in expires timeout (ktime_t) + * @mode: timer mode + * + * Make the current task sleep until the given expiry time has diff --git a/queue-6.1/series b/queue-6.1/series index a47ea14d11..b4ad35732f 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -1,2 +1,4 @@ clockevents-drivers-i8253-fix-stop-sequence-for-timer-0.patch sched-isolation-prevent-boot-crash-when-the-boot-cpu-is-nohz_full.patch +hrtimer-use-and-report-correct-timerslack-values-for-realtime-tasks.patch +fs-ntfs3-fix-shift-out-of-bounds-in-ntfs_fill_super.patch