]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 26 Sep 2022 06:54:27 +0000 (08:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 26 Sep 2022 06:54:27 +0000 (08:54 +0200)
added patches:
ext4-avoid-unnecessary-spreading-of-allocations-among-groups.patch
ext4-make-directory-inode-spreading-reflect-flexbg-size.patch
ext4-use-locality-group-preallocation-for-small-closed-files.patch

queue-5.15/ext4-avoid-unnecessary-spreading-of-allocations-among-groups.patch [new file with mode: 0644]
queue-5.15/ext4-make-directory-inode-spreading-reflect-flexbg-size.patch [new file with mode: 0644]
queue-5.15/ext4-use-locality-group-preallocation-for-small-closed-files.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/ext4-avoid-unnecessary-spreading-of-allocations-among-groups.patch b/queue-5.15/ext4-avoid-unnecessary-spreading-of-allocations-among-groups.patch
new file mode 100644 (file)
index 0000000..da678b9
--- /dev/null
@@ -0,0 +1,72 @@
+From 1940265ede6683f6317cba0d428ce6505eaca944 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 8 Sep 2022 11:21:25 +0200
+Subject: ext4: avoid unnecessary spreading of allocations among groups
+
+From: Jan Kara <jack@suse.cz>
+
+commit 1940265ede6683f6317cba0d428ce6505eaca944 upstream.
+
+mb_set_largest_free_order() updates lists containing groups with largest
+chunk of free space of given order. The way it updates it leads to
+always moving the group to the tail of the list. Thus allocations
+looking for free space of given order effectively end up cycling through
+all groups (and due to initialization in last to first order). This
+spreads allocations among block groups which reduces performance for
+rotating disks or low-end flash media. Change
+mb_set_largest_free_order() to only update lists if the order of the
+largest free chunk in the group changed.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@kernel.org
+Reported-and-tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/all/0d81a7c2-46b7-6010-62a4-3e6cfc1628d6@i2se.com/
+Link: https://lore.kernel.org/r/20220908092136.11770-2-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1080,23 +1080,25 @@ mb_set_largest_free_order(struct super_b
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int i;
+-      if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
++      for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
++              if (grp->bb_counters[i] > 0)
++                      break;
++      /* No need to move between order lists? */
++      if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
++          i == grp->bb_largest_free_order) {
++              grp->bb_largest_free_order = i;
++              return;
++      }
++
++      if (grp->bb_largest_free_order >= 0) {
+               write_lock(&sbi->s_mb_largest_free_orders_locks[
+                                             grp->bb_largest_free_order]);
+               list_del_init(&grp->bb_largest_free_order_node);
+               write_unlock(&sbi->s_mb_largest_free_orders_locks[
+                                             grp->bb_largest_free_order]);
+       }
+-      grp->bb_largest_free_order = -1; /* uninit */
+-
+-      for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
+-              if (grp->bb_counters[i] > 0) {
+-                      grp->bb_largest_free_order = i;
+-                      break;
+-              }
+-      }
+-      if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
+-          grp->bb_largest_free_order >= 0 && grp->bb_free) {
++      grp->bb_largest_free_order = i;
++      if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+               write_lock(&sbi->s_mb_largest_free_orders_locks[
+                                             grp->bb_largest_free_order]);
+               list_add_tail(&grp->bb_largest_free_order_node,
diff --git a/queue-5.15/ext4-make-directory-inode-spreading-reflect-flexbg-size.patch b/queue-5.15/ext4-make-directory-inode-spreading-reflect-flexbg-size.patch
new file mode 100644 (file)
index 0000000..a82e75c
--- /dev/null
@@ -0,0 +1,39 @@
+From 613c5a85898d1cd44e68f28d65eccf64a8ace9cf Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 8 Sep 2022 11:21:26 +0200
+Subject: ext4: make directory inode spreading reflect flexbg size
+
+From: Jan Kara <jack@suse.cz>
+
+commit 613c5a85898d1cd44e68f28d65eccf64a8ace9cf upstream.
+
+Currently the Orlov inode allocator searches for free inodes for a
+directory only in flex block groups with at most inodes_per_group/16
+more directory inodes than average per flex block group. However with
+growing size of flex block group this becomes unnecessarily strict.
+Scale allowed difference from average directory count per flex block
+group with flex block group size as we do with other metrics.
+
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/all/0d81a7c2-46b7-6010-62a4-3e6cfc1628d6@i2se.com/
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20220908092136.11770-3-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ialloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -510,7 +510,7 @@ static int find_group_orlov(struct super
+               goto fallback;
+       }
+-      max_dirs = ndirs / ngroups + inodes_per_group / 16;
++      max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
+       min_inodes = avefreei - inodes_per_group*flex_size / 4;
+       if (min_inodes < 1)
+               min_inodes = 1;
diff --git a/queue-5.15/ext4-use-locality-group-preallocation-for-small-closed-files.patch b/queue-5.15/ext4-use-locality-group-preallocation-for-small-closed-files.patch
new file mode 100644 (file)
index 0000000..fd33ac9
--- /dev/null
@@ -0,0 +1,82 @@
+From a9f2a2931d0e197ab28c6007966053fdababd53f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 8 Sep 2022 11:21:27 +0200
+Subject: ext4: use locality group preallocation for small closed files
+
+From: Jan Kara <jack@suse.cz>
+
+commit a9f2a2931d0e197ab28c6007966053fdababd53f upstream.
+
+Curently we don't use any preallocation when a file is already closed
+when allocating blocks (from writeback code when converting delayed
+allocation). However for small files, using locality group preallocation
+is actually desirable as that is not specific to a particular file.
+Rather it is a method to pack small files together to reduce
+fragmentation and for that the fact the file is closed is actually even
+stronger hint the file would benefit from packing. So change the logic
+to allow locality group preallocation in this case.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@kernel.org
+Reported-and-tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/all/0d81a7c2-46b7-6010-62a4-3e6cfc1628d6@i2se.com/
+Link: https://lore.kernel.org/r/20220908092136.11770-4-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -5169,6 +5169,7 @@ static void ext4_mb_group_or_file(struct
+       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+       int bsbits = ac->ac_sb->s_blocksize_bits;
+       loff_t size, isize;
++      bool inode_pa_eligible, group_pa_eligible;
+       if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
+               return;
+@@ -5176,25 +5177,27 @@ static void ext4_mb_group_or_file(struct
+       if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
+               return;
++      group_pa_eligible = sbi->s_mb_group_prealloc > 0;
++      inode_pa_eligible = true;
+       size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+       isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
+               >> bsbits;
++      /* No point in using inode preallocation for closed files */
+       if ((size == isize) && !ext4_fs_is_busy(sbi) &&
+-          !inode_is_open_for_write(ac->ac_inode)) {
+-              ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
+-              return;
+-      }
+-
+-      if (sbi->s_mb_group_prealloc <= 0) {
+-              ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+-              return;
+-      }
++          !inode_is_open_for_write(ac->ac_inode))
++              inode_pa_eligible = false;
+-      /* don't use group allocation for large files */
+       size = max(size, isize);
+-      if (size > sbi->s_mb_stream_request) {
+-              ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
++      /* Don't use group allocation for large files */
++      if (size > sbi->s_mb_stream_request)
++              group_pa_eligible = false;
++
++      if (!group_pa_eligible) {
++              if (inode_pa_eligible)
++                      ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
++              else
++                      ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
+               return;
+       }
index 7a6475b9e427bba10db07175cd9b8e553521e6a4..7961664c4845a190d56b54cf14e4f8539d61a714 100644 (file)
@@ -143,3 +143,6 @@ devdax-fix-soft-reservation-memory-description.patch
 ext4-fix-bug-in-extents-parsing-when-eh_entries-0-and-eh_depth-0.patch
 ext4-limit-the-number-of-retries-after-discarding-preallocations-blocks.patch
 ext4-make-mballoc-try-target-group-first-even-with-mb_optimize_scan.patch
+ext4-avoid-unnecessary-spreading-of-allocations-among-groups.patch
+ext4-make-directory-inode-spreading-reflect-flexbg-size.patch
+ext4-use-locality-group-preallocation-for-small-closed-files.patch