]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Jul 2024 10:21:34 +0000 (12:21 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Jul 2024 10:21:34 +0000 (12:21 +0200)
added patches:
apparmor-use-kvfree_sensitive-to-free-data-data.patch
ext4-check-dot-and-dotdot-of-dx_root-before-making-dir-indexed.patch
ext4-make-sure-the-first-directory-block-is-not-a-hole.patch
m68k-amiga-turn-off-warp1260-interrupts-during-boot.patch
sched-fair-use-all-little-cpus-for-cpu-bound-workloads.patch
task_work-introduce-task_work_cancel-again.patch
task_work-s-task_work_cancel-task_work_cancel_func.patch
udf-avoid-using-corrupted-block-bitmap-buffer.patch

queue-5.10/apparmor-use-kvfree_sensitive-to-free-data-data.patch [new file with mode: 0644]
queue-5.10/ext4-check-dot-and-dotdot-of-dx_root-before-making-dir-indexed.patch [new file with mode: 0644]
queue-5.10/ext4-make-sure-the-first-directory-block-is-not-a-hole.patch [new file with mode: 0644]
queue-5.10/m68k-amiga-turn-off-warp1260-interrupts-during-boot.patch [new file with mode: 0644]
queue-5.10/sched-fair-use-all-little-cpus-for-cpu-bound-workloads.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/task_work-introduce-task_work_cancel-again.patch [new file with mode: 0644]
queue-5.10/task_work-s-task_work_cancel-task_work_cancel_func.patch [new file with mode: 0644]
queue-5.10/udf-avoid-using-corrupted-block-bitmap-buffer.patch [new file with mode: 0644]

diff --git a/queue-5.10/apparmor-use-kvfree_sensitive-to-free-data-data.patch b/queue-5.10/apparmor-use-kvfree_sensitive-to-free-data-data.patch
new file mode 100644 (file)
index 0000000..72dea55
--- /dev/null
@@ -0,0 +1,48 @@
+From 2bc73505a5cd2a18a7a542022722f136c19e3b87 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Thu, 1 Feb 2024 17:24:48 +0300
+Subject: apparmor: use kvfree_sensitive to free data->data
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 2bc73505a5cd2a18a7a542022722f136c19e3b87 upstream.
+
+Inside unpack_profile() data->data is allocated using kvmemdup() so it
+should be freed with the corresponding kvfree_sensitive().
+
+Also add missing data->data release for rhashtable insertion failure path
+in unpack_profile().
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: e025be0f26d5 ("apparmor: support querying extended trusted helper extra data")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: John Johansen <john.johansen@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/apparmor/policy.c        |    2 +-
+ security/apparmor/policy_unpack.c |    1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -187,7 +187,7 @@ static void aa_free_data(void *ptr, void
+ {
+       struct aa_data *data = ptr;
+-      kfree_sensitive(data->data);
++      kvfree_sensitive(data->data, data->size);
+       kfree_sensitive(data->key);
+       kfree_sensitive(data);
+ }
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -915,6 +915,7 @@ static struct aa_profile *unpack_profile
+                       if (rhashtable_insert_fast(profile->data, &data->head,
+                                                  profile->data->p)) {
++                              kvfree_sensitive(data->data, data->size);
+                               kfree_sensitive(data->key);
+                               kfree_sensitive(data);
+                               info = "failed to insert data to table";
diff --git a/queue-5.10/ext4-check-dot-and-dotdot-of-dx_root-before-making-dir-indexed.patch b/queue-5.10/ext4-check-dot-and-dotdot-of-dx_root-before-making-dir-indexed.patch
new file mode 100644 (file)
index 0000000..0a0c897
--- /dev/null
@@ -0,0 +1,151 @@
+From 50ea741def587a64e08879ce6c6a30131f7111e7 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Tue, 2 Jul 2024 21:23:48 +0800
+Subject: ext4: check dot and dotdot of dx_root before making dir indexed
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 50ea741def587a64e08879ce6c6a30131f7111e7 upstream.
+
+Syzbot reports a issue as follows:
+============================================
+BUG: unable to handle page fault for address: ffffed11022e24fe
+PGD 23ffee067 P4D 23ffee067 PUD 0
+Oops: Oops: 0000 [#1] PREEMPT SMP KASAN PTI
+CPU: 0 PID: 5079 Comm: syz-executor306 Not tainted 6.10.0-rc5-g55027e689933 #0
+Call Trace:
+ <TASK>
+ make_indexed_dir+0xdaf/0x13c0 fs/ext4/namei.c:2341
+ ext4_add_entry+0x222a/0x25d0 fs/ext4/namei.c:2451
+ ext4_rename fs/ext4/namei.c:3936 [inline]
+ ext4_rename2+0x26e5/0x4370 fs/ext4/namei.c:4214
+[...]
+============================================
+
+The immediate cause of this problem is that there is only one valid dentry
+for the block to be split during do_split, so split==0 results in out of
+bounds accesses to the map triggering the issue.
+
+    do_split
+      unsigned split
+      dx_make_map
+       count = 1
+      split = count/2 = 0;
+      continued = hash2 == map[split - 1].hash;
+       ---> map[4294967295]
+
+The maximum length of a filename is 255 and the minimum block size is 1024,
+so it is always guaranteed that the number of entries is greater than or
+equal to 2 when do_split() is called.
+
+But syzbot's crafted image has no dot and dotdot in dir, and the dentry
+distribution in dirblock is as follows:
+
+  bus     dentry1          hole           dentry2           free
+|xx--|xx-------------|...............|xx-------------|...............|
+0   12 (8+248)=256  268     256     524 (8+256)=264 788     236     1024
+
+So when renaming dentry1 increases its name_len length by 1, neither hole
+nor free is sufficient to hold the new dentry, and make_indexed_dir() is
+called.
+
+In make_indexed_dir() it is assumed that the first two entries of the
+dirblock must be dot and dotdot, so bus and dentry1 are left in dx_root
+because they are treated as dot and dotdot, and only dentry2 is moved
+to the new leaf block. That's why count is equal to 1.
+
+Therefore add the ext4_check_dx_root() helper function to add more sanity
+checks to dot and dotdot before starting the conversion to avoid the above
+issue.
+
+Reported-by: syzbot+ae688d469e36fb5138d0@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=ae688d469e36fb5138d0
+Fixes: ac27a0ec112a ("[PATCH] ext4: initial copy of files from ext3")
+Cc: stable@kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20240702132349.2600605-2-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/namei.c |   56 +++++++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 51 insertions(+), 5 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2098,6 +2098,52 @@ static int add_dirent_to_buf(handle_t *h
+       return err ? err : err2;
+ }
++static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root)
++{
++      struct fake_dirent *fde;
++      const char *error_msg;
++      unsigned int rlen;
++      unsigned int blocksize = dir->i_sb->s_blocksize;
++      char *blockend = (char *)root + dir->i_sb->s_blocksize;
++
++      fde = &root->dot;
++      if (unlikely(fde->name_len != 1)) {
++              error_msg = "invalid name_len for '.'";
++              goto corrupted;
++      }
++      if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) {
++              error_msg = "invalid name for '.'";
++              goto corrupted;
++      }
++      rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++      if (unlikely((char *)fde + rlen >= blockend)) {
++              error_msg = "invalid rec_len for '.'";
++              goto corrupted;
++      }
++
++      fde = &root->dotdot;
++      if (unlikely(fde->name_len != 2)) {
++              error_msg = "invalid name_len for '..'";
++              goto corrupted;
++      }
++      if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) {
++              error_msg = "invalid name for '..'";
++              goto corrupted;
++      }
++      rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++      if (unlikely((char *)fde + rlen >= blockend)) {
++              error_msg = "invalid rec_len for '..'";
++              goto corrupted;
++      }
++
++      return true;
++
++corrupted:
++      EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended",
++                       error_msg);
++      return false;
++}
++
+ /*
+  * This converts a one block unindexed directory to a 3 block indexed
+  * directory, and adds the dentry to the indexed directory.
+@@ -2131,17 +2177,17 @@ static int make_indexed_dir(handle_t *ha
+               brelse(bh);
+               return retval;
+       }
++
+       root = (struct dx_root *) bh->b_data;
++      if (!ext4_check_dx_root(dir, root)) {
++              brelse(bh);
++              return -EFSCORRUPTED;
++      }
+       /* The 0th block becomes the root, move the dirents out */
+       fde = &root->dotdot;
+       de = (struct ext4_dir_entry_2 *)((char *)fde +
+               ext4_rec_len_from_disk(fde->rec_len, blocksize));
+-      if ((char *) de >= (((char *) root) + blocksize)) {
+-              EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+-              brelse(bh);
+-              return -EFSCORRUPTED;
+-      }
+       len = ((char *) root) + (blocksize - csum_size) - (char *) de;
+       /* Allocate new block for the 0th block's dirents */
diff --git a/queue-5.10/ext4-make-sure-the-first-directory-block-is-not-a-hole.patch b/queue-5.10/ext4-make-sure-the-first-directory-block-is-not-a-hole.patch
new file mode 100644 (file)
index 0000000..79e03e5
--- /dev/null
@@ -0,0 +1,85 @@
+From f9ca51596bbfd0f9c386dd1c613c394c78d9e5e6 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Tue, 2 Jul 2024 21:23:49 +0800
+Subject: ext4: make sure the first directory block is not a hole
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit f9ca51596bbfd0f9c386dd1c613c394c78d9e5e6 upstream.
+
+The syzbot constructs a directory that has no dirblock but is non-inline,
+i.e. the first directory block is a hole. And no errors are reported when
+creating files in this directory in the following flow.
+
+    ext4_mknod
+     ...
+      ext4_add_entry
+        // Read block 0
+        ext4_read_dirblock(dir, block, DIRENT)
+          bh = ext4_bread(NULL, inode, block, 0)
+          if (!bh && (type == INDEX || type == DIRENT_HTREE))
+          // The first directory block is a hole
+          // But type == DIRENT, so no error is reported.
+
+After that, we get a directory block without '.' and '..' but with a valid
+dentry. This may cause some code that relies on dot or dotdot (such as
+make_indexed_dir()) to crash.
+
+Therefore when ext4_read_dirblock() finds that the first directory block
+is a hole report that the filesystem is corrupted and return an error to
+avoid loading corrupted data from disk causing something bad.
+
+Reported-by: syzbot+ae688d469e36fb5138d0@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=ae688d469e36fb5138d0
+Fixes: 4e19d6b65fb4 ("ext4: allow directory holes")
+Cc: stable@kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20240702132349.2600605-3-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/namei.c |   17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -145,10 +145,11 @@ static struct buffer_head *__ext4_read_d
+               return bh;
+       }
+-      if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
++      /* The first directory block must not be a hole. */
++      if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) {
+               ext4_error_inode(inode, func, line, block,
+-                               "Directory hole found for htree %s block",
+-                               (type == INDEX) ? "index" : "leaf");
++                               "Directory hole found for htree %s block %u",
++                               (type == INDEX) ? "index" : "leaf", block);
+               return ERR_PTR(-EFSCORRUPTED);
+       }
+       if (!bh)
+@@ -2977,10 +2978,7 @@ bool ext4_empty_dir(struct inode *inode)
+               EXT4_ERROR_INODE(inode, "invalid size");
+               return false;
+       }
+-      /* The first directory block must not be a hole,
+-       * so treat it as DIRENT_HTREE
+-       */
+-      bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++      bh = ext4_read_dirblock(inode, 0, EITHER);
+       if (IS_ERR(bh))
+               return false;
+@@ -3611,10 +3609,7 @@ static struct buffer_head *ext4_get_firs
+               struct ext4_dir_entry_2 *de;
+               unsigned int offset;
+-              /* The first directory block must not be a hole, so
+-               * treat it as DIRENT_HTREE
+-               */
+-              bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++              bh = ext4_read_dirblock(inode, 0, EITHER);
+               if (IS_ERR(bh)) {
+                       *retval = PTR_ERR(bh);
+                       return NULL;
diff --git a/queue-5.10/m68k-amiga-turn-off-warp1260-interrupts-during-boot.patch b/queue-5.10/m68k-amiga-turn-off-warp1260-interrupts-during-boot.patch
new file mode 100644 (file)
index 0000000..9a934fe
--- /dev/null
@@ -0,0 +1,57 @@
+From 1d8491d3e726984343dd8c3cdbe2f2b47cfdd928 Mon Sep 17 00:00:00 2001
+From: Paolo Pisati <p.pisati@gmail.com>
+Date: Sat, 1 Jun 2024 17:32:54 +0200
+Subject: m68k: amiga: Turn off Warp1260 interrupts during boot
+
+From: Paolo Pisati <p.pisati@gmail.com>
+
+commit 1d8491d3e726984343dd8c3cdbe2f2b47cfdd928 upstream.
+
+On an Amiga 1200 equipped with a Warp1260 accelerator, an interrupt
+storm coming from the accelerator board causes the machine to crash in
+local_irq_enable() or auto_irq_enable().  Disabling interrupts for the
+Warp1260 in amiga_parse_bootinfo() fixes the problem.
+
+Link: https://lore.kernel.org/r/ZkjwzVwYeQtyAPrL@amaterasu.local
+Cc: stable <stable@kernel.org>
+Signed-off-by: Paolo Pisati <p.pisati@gmail.com>
+Reviewed-by: Michael Schmitz <schmitzmic@gmail.com>
+Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Link: https://lore.kernel.org/r/20240601153254.186225-1-p.pisati@gmail.com
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/m68k/amiga/config.c       |    9 +++++++++
+ include/uapi/linux/zorro_ids.h |    3 +++
+ 2 files changed, 12 insertions(+)
+
+--- a/arch/m68k/amiga/config.c
++++ b/arch/m68k/amiga/config.c
+@@ -179,6 +179,15 @@ int __init amiga_parse_bootinfo(const st
+                       dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
+                       dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
+                       dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
++
++                      /* CS-LAB Warp 1260 workaround */
++                      if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
++                          dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
++
++                              /* turn off all interrupts */
++                              pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
++                              *(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
++                      }
+               } else
+                       pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
+ #endif /* CONFIG_ZORRO */
+--- a/include/uapi/linux/zorro_ids.h
++++ b/include/uapi/linux/zorro_ids.h
+@@ -449,6 +449,9 @@
+ #define  ZORRO_PROD_VMC_ISDN_BLASTER_Z2                               ZORRO_ID(VMC, 0x01, 0)
+ #define  ZORRO_PROD_VMC_HYPERCOM_4                            ZORRO_ID(VMC, 0x02, 0)
++#define ZORRO_MANUF_CSLAB                                     0x1400
++#define  ZORRO_PROD_CSLAB_WARP_1260                           ZORRO_ID(CSLAB, 0x65, 0)
++
+ #define ZORRO_MANUF_INFORMATION                                       0x157C
+ #define  ZORRO_PROD_INFORMATION_ISDN_ENGINE_I                 ZORRO_ID(INFORMATION, 0x64, 0)
diff --git a/queue-5.10/sched-fair-use-all-little-cpus-for-cpu-bound-workloads.patch b/queue-5.10/sched-fair-use-all-little-cpus-for-cpu-bound-workloads.patch
new file mode 100644 (file)
index 0000000..01467ea
--- /dev/null
@@ -0,0 +1,113 @@
+From 3af7524b14198f5159a86692d57a9f28ec9375ce Mon Sep 17 00:00:00 2001
+From: Pierre Gondois <pierre.gondois@arm.com>
+Date: Wed, 6 Dec 2023 10:00:43 +0100
+Subject: sched/fair: Use all little CPUs for CPU-bound workloads
+
+From: Pierre Gondois <pierre.gondois@arm.com>
+
+commit 3af7524b14198f5159a86692d57a9f28ec9375ce upstream.
+
+Running N CPU-bound tasks on an N CPUs platform:
+
+- with asymmetric CPU capacity
+
+- not being a DynamIq system (i.e. having a PKG level sched domain
+  without the SD_SHARE_PKG_RESOURCES flag set)
+
+.. might result in a task placement where two tasks run on a big CPU
+and none on a little CPU. This placement could be more optimal by
+using all CPUs.
+
+Testing platform:
+
+  Juno-r2:
+    - 2 big CPUs (1-2), maximum capacity of 1024
+    - 4 little CPUs (0,3-5), maximum capacity of 383
+
+Testing workload ([1]):
+
+  Spawn 6 CPU-bound tasks. During the first 100ms (step 1), each tasks
+  is affine to a CPU, except for:
+
+    - one little CPU which is left idle.
+    - one big CPU which has 2 tasks affine.
+
+  After the 100ms (step 2), remove the cpumask affinity.
+
+Behavior before the patch:
+
+  During step 2, the load balancer running from the idle CPU tags sched
+  domains as:
+
+  - little CPUs: 'group_has_spare'. Cf. group_has_capacity() and
+    group_is_overloaded(), 3 CPU-bound tasks run on a 4 CPUs
+    sched-domain, and the idle CPU provides enough spare capacity
+    regarding the imbalance_pct
+
+  - big CPUs: 'group_overloaded'. Indeed, 3 tasks run on a 2 CPUs
+    sched-domain, so the following path is used:
+
+      group_is_overloaded()
+      \-if (sgs->sum_nr_running <= sgs->group_weight) return true;
+
+    The following path which would change the migration type to
+    'migrate_task' is not taken:
+
+      calculate_imbalance()
+      \-if (env->idle != CPU_NOT_IDLE && env->imbalance == 0)
+
+    as the local group has some spare capacity, so the imbalance
+    is not 0.
+
+  The migration type requested is 'migrate_util' and the busiest
+  runqueue is the big CPU's runqueue having 2 tasks (each having a
+  utilization of 512). The idle little CPU cannot pull one of these
+  task as its capacity is too small for the task. The following path
+  is used:
+
+   detach_tasks()
+   \-case migrate_util:
+     \-if (util > env->imbalance) goto next;
+
+After the patch:
+
+As the number of failed balancing attempts grows (with
+'nr_balance_failed'), progressively make it easier to migrate
+a big task to the idling little CPU. A similar mechanism is
+used for the 'migrate_load' migration type.
+
+Improvement:
+
+Running the testing workload [1] with the step 2 representing
+a ~10s load for a big CPU:
+
+  Before patch: ~19.3s
+  After patch:  ~18s (-6.7%)
+
+Similar issue reported at:
+
+  https://lore.kernel.org/lkml/20230716014125.139577-1-qyousef@layalina.io/
+
+Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Pierre Gondois <pierre.gondois@arm.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Acked-by: Qais Yousef <qyousef@layalina.io>
+Link: https://lore.kernel.org/r/20231206090043.634697-1-pierre.gondois@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7950,7 +7950,7 @@ static int detach_tasks(struct lb_env *e
+               case migrate_util:
+                       util = task_util_est(p);
+-                      if (util > env->imbalance)
++                      if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance)
+                               goto next;
+                       env->imbalance -= util;
index 00ffaeebdc49af9afab900f3ad644aa7224ad51f..a1e912264066c895effabe4a3adcc6e84e5f5203 100644 (file)
@@ -136,3 +136,11 @@ drm-gma500-fix-null-pointer-dereference-in-cdv_intel_lvds_get_modes.patch
 drm-gma500-fix-null-pointer-dereference-in-psb_intel_lvds_get_modes.patch
 scsi-qla2xxx-fix-optrom-version-displayed-in-fdmi.patch
 drm-amd-display-check-for-null-pointer.patch
+sched-fair-use-all-little-cpus-for-cpu-bound-workloads.patch
+apparmor-use-kvfree_sensitive-to-free-data-data.patch
+task_work-s-task_work_cancel-task_work_cancel_func.patch
+task_work-introduce-task_work_cancel-again.patch
+udf-avoid-using-corrupted-block-bitmap-buffer.patch
+m68k-amiga-turn-off-warp1260-interrupts-during-boot.patch
+ext4-check-dot-and-dotdot-of-dx_root-before-making-dir-indexed.patch
+ext4-make-sure-the-first-directory-block-is-not-a-hole.patch
diff --git a/queue-5.10/task_work-introduce-task_work_cancel-again.patch b/queue-5.10/task_work-introduce-task_work_cancel-again.patch
new file mode 100644 (file)
index 0000000..0050034
--- /dev/null
@@ -0,0 +1,66 @@
+From f409530e4db9dd11b88cb7703c97c8f326ff6566 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Fri, 21 Jun 2024 11:15:59 +0200
+Subject: task_work: Introduce task_work_cancel() again
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit f409530e4db9dd11b88cb7703c97c8f326ff6566 upstream.
+
+Re-introduce task_work_cancel(), this time to cancel an actual callback
+and not *any* callback pointing to a given function. This is going to be
+needed for perf events event freeing.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240621091601.18227-3-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/task_work.h |    1 +
+ kernel/task_work.c        |   24 ++++++++++++++++++++++++
+ 2 files changed, 25 insertions(+)
+
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -25,6 +25,7 @@ int task_work_add(struct task_struct *ta
+ struct callback_head *task_work_cancel_match(struct task_struct *task,
+       bool (*match)(struct callback_head *, void *data), void *data);
+ struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
+ void task_work_run(void);
+ static inline void exit_task_work(struct task_struct *task)
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -117,6 +117,30 @@ task_work_cancel_func(struct task_struct
+       return task_work_cancel_match(task, task_work_func_match, func);
+ }
++static bool task_work_match(struct callback_head *cb, void *data)
++{
++      return cb == data;
++}
++
++/**
++ * task_work_cancel - cancel a pending work added by task_work_add()
++ * @task: the task which should execute the work
++ * @cb: the callback to remove if queued
++ *
++ * Remove a callback from a task's queue if queued.
++ *
++ * RETURNS:
++ * True if the callback was queued and got cancelled, false otherwise.
++ */
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
++{
++      struct callback_head *ret;
++
++      ret = task_work_cancel_match(task, task_work_match, cb);
++
++      return ret == cb;
++}
++
+ /**
+  * task_work_run - execute the works added by task_work_add()
+  *
diff --git a/queue-5.10/task_work-s-task_work_cancel-task_work_cancel_func.patch b/queue-5.10/task_work-s-task_work_cancel-task_work_cancel_func.patch
new file mode 100644 (file)
index 0000000..7db781d
--- /dev/null
@@ -0,0 +1,92 @@
+From 68cbd415dd4b9c5b9df69f0f091879e56bf5907a Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Fri, 21 Jun 2024 11:15:58 +0200
+Subject: task_work: s/task_work_cancel()/task_work_cancel_func()/
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 68cbd415dd4b9c5b9df69f0f091879e56bf5907a upstream.
+
+A proper task_work_cancel() API that actually cancels a callback and not
+*any* callback pointing to a given function is going to be needed for
+perf events event freeing. Do the appropriate rename to prepare for
+that.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240621091601.18227-2-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/task_work.h |    2 +-
+ kernel/irq/manage.c       |    2 +-
+ kernel/task_work.c        |   10 +++++-----
+ security/keys/keyctl.c    |    2 +-
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -24,7 +24,7 @@ int task_work_add(struct task_struct *ta
+ struct callback_head *task_work_cancel_match(struct task_struct *task,
+       bool (*match)(struct callback_head *, void *data), void *data);
+-struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
++struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
+ void task_work_run(void);
+ static inline void exit_task_work(struct task_struct *task)
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1230,7 +1230,7 @@ static int irq_thread(void *data)
+        * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+        * oneshot mask bit can be set.
+        */
+-      task_work_cancel(current, irq_thread_dtor);
++      task_work_cancel_func(current, irq_thread_dtor);
+       return 0;
+ }
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -101,9 +101,9 @@ static bool task_work_func_match(struct
+ }
+ /**
+- * task_work_cancel - cancel a pending work added by task_work_add()
+- * @task: the task which should execute the work
+- * @func: identifies the work to remove
++ * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
++ * @task: the task which should execute the func's work
++ * @func: identifies the func to match with a work to remove
+  *
+  * Find the last queued pending work with ->func == @func and remove
+  * it from queue.
+@@ -112,7 +112,7 @@ static bool task_work_func_match(struct
+  * The found work or NULL if not found.
+  */
+ struct callback_head *
+-task_work_cancel(struct task_struct *task, task_work_func_t func)
++task_work_cancel_func(struct task_struct *task, task_work_func_t func)
+ {
+       return task_work_cancel_match(task, task_work_func_match, func);
+ }
+@@ -149,7 +149,7 @@ void task_work_run(void)
+               if (!work)
+                       break;
+               /*
+-               * Synchronize with task_work_cancel(). It can not remove
++               * Synchronize with task_work_cancel_match(). It can not remove
+                * the first entry == work, cmpxchg(task_works) must fail.
+                * But it can remove another entry from the ->next list.
+                */
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1694,7 +1694,7 @@ long keyctl_session_to_parent(void)
+               goto unlock;
+       /* cancel an already pending keyring replacement */
+-      oldwork = task_work_cancel(parent, key_change_session_keyring);
++      oldwork = task_work_cancel_func(parent, key_change_session_keyring);
+       /* the replacement session keyring is applied just prior to userspace
+        * restarting */
diff --git a/queue-5.10/udf-avoid-using-corrupted-block-bitmap-buffer.patch b/queue-5.10/udf-avoid-using-corrupted-block-bitmap-buffer.patch
new file mode 100644 (file)
index 0000000..d8c0815
--- /dev/null
@@ -0,0 +1,73 @@
+From a90d4471146de21745980cba51ce88e7926bcc4f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 17 Jun 2024 17:41:52 +0200
+Subject: udf: Avoid using corrupted block bitmap buffer
+
+From: Jan Kara <jack@suse.cz>
+
+commit a90d4471146de21745980cba51ce88e7926bcc4f upstream.
+
+When the filesystem block bitmap is corrupted, we detect the corruption
+while loading the bitmap and fail the allocation with error. However the
+next allocation from the same bitmap will notice the bitmap buffer is
+already loaded and tries to allocate from the bitmap with mixed results
+(depending on the exact nature of the bitmap corruption). Fix the
+problem by using BH_verified bit to indicate whether the bitmap is valid
+or not.
+
+Reported-by: syzbot+5f682cd029581f9edfd1@syzkaller.appspotmail.com
+CC: stable@vger.kernel.org
+Link: https://patch.msgid.link/20240617154201.29512-2-jack@suse.cz
+Fixes: 1e0d4adf17e7 ("udf: Check consistency of Space Bitmap Descriptor")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/balloc.c |   15 +++++++++++++--
+ fs/udf/super.c  |    3 ++-
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -68,8 +68,12 @@ static int read_block_bitmap(struct supe
+       }
+       for (i = 0; i < count; i++)
+-              if (udf_test_bit(i + off, bh->b_data))
++              if (udf_test_bit(i + off, bh->b_data)) {
++                      bitmap->s_block_bitmap[bitmap_nr] =
++                                                      ERR_PTR(-EFSCORRUPTED);
++                      brelse(bh);
+                       return -EFSCORRUPTED;
++              }
+       return 0;
+ }
+@@ -85,8 +89,15 @@ static int __load_block_bitmap(struct su
+                         block_group, nr_groups);
+       }
+-      if (bitmap->s_block_bitmap[block_group])
++      if (bitmap->s_block_bitmap[block_group]) {
++              /*
++               * The bitmap failed verification in the past. No point in
++               * trying again.
++               */
++              if (IS_ERR(bitmap->s_block_bitmap[block_group]))
++                      return PTR_ERR(bitmap->s_block_bitmap[block_group]);
+               return block_group;
++      }
+       retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+       if (retval < 0)
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -266,7 +266,8 @@ static void udf_sb_free_bitmap(struct ud
+       int nr_groups = bitmap->s_nr_groups;
+       for (i = 0; i < nr_groups; i++)
+-              brelse(bitmap->s_block_bitmap[i]);
++              if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
++                      brelse(bitmap->s_block_bitmap[i]);
+       kvfree(bitmap);
+ }