]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Jan 2023 14:24:07 +0000 (15:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Jan 2023 14:24:07 +0000 (15:24 +0100)
added patches:
revert-ext4-add-new-pending-reservation-mechanism.patch
revert-ext4-fix-delayed-allocation-bug-in-ext4_clu_mapped-for-bigalloc-inline.patch
revert-ext4-fix-reserved-cluster-accounting-at-delayed-write-time.patch
revert-ext4-generalize-extents-status-tree-search-functions.patch
x86-fpu-use-_alignof-to-avoid-undefined-behavior-in-type_align.patch

queue-4.19/revert-ext4-add-new-pending-reservation-mechanism.patch [new file with mode: 0644]
queue-4.19/revert-ext4-fix-delayed-allocation-bug-in-ext4_clu_mapped-for-bigalloc-inline.patch [new file with mode: 0644]
queue-4.19/revert-ext4-fix-reserved-cluster-accounting-at-delayed-write-time.patch [new file with mode: 0644]
queue-4.19/revert-ext4-generalize-extents-status-tree-search-functions.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-fpu-use-_alignof-to-avoid-undefined-behavior-in-type_align.patch [new file with mode: 0644]

diff --git a/queue-4.19/revert-ext4-add-new-pending-reservation-mechanism.patch b/queue-4.19/revert-ext4-add-new-pending-reservation-mechanism.patch
new file mode 100644 (file)
index 0000000..ba9deee
--- /dev/null
@@ -0,0 +1,350 @@
+From 16c9f584d13461c6c41863427dcb5ee2a465366b Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 22 Jan 2023 15:12:37 +0100
+Subject: Revert "ext4: add new pending reservation mechanism"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 9bacbb4cfdbc41518d13f620d3f53c0ba36ca87e which is
+commit 1dc0aa46e74a3366e12f426b7caaca477853e9c3 upstream.
+
+Eric writes:
+       I recommend not backporting this patch or the other three
+       patches apparently intended to support it to 4.19 stable.  All
+       these patches are related to ext4's bigalloc feature, which was
+       experimental as of 4.19 (expressly noted by contemporary
+       versions of e2fsprogs) and also suffered from a number of bugs.
+       A significant number of additional patches that were applied to
+       5.X kernels over time would have to be backported to 4.19 for
+       the patch below to function correctly. It's really not worth
+       doing that given bigalloc's experimental status as of 4.19 and
+       the very rare combination of the bigalloc and inline features.
+
+Link: https://lore.kernel.org/r/Y8mAe1SlcLD5fykg@debian-BULLSEYE-live-builder-AMD64
+Cc: Eric Whitney <enwlinux@gmail.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h           |    3 
+ fs/ext4/extents_status.c |  187 -----------------------------------------------
+ fs/ext4/extents_status.h |   51 ------------
+ fs/ext4/super.c          |    8 --
+ 4 files changed, 249 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1041,9 +1041,6 @@ struct ext4_inode_info {
+       ext4_lblk_t i_da_metadata_calc_last_lblock;
+       int i_da_metadata_calc_len;
+-      /* pending cluster reservations for bigalloc file systems */
+-      struct ext4_pending_tree i_pending_tree;
+-
+       /* on-disk additional length */
+       __u16 i_extra_isize;
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -142,7 +142,6 @@
+  */
+ static struct kmem_cache *ext4_es_cachep;
+-static struct kmem_cache *ext4_pending_cachep;
+ static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+@@ -1364,189 +1363,3 @@ static int es_reclaim_extents(struct ext
+       ei->i_es_tree.cache_es = NULL;
+       return nr_shrunk;
+ }
+-
+-#ifdef ES_DEBUG__
+-static void ext4_print_pending_tree(struct inode *inode)
+-{
+-      struct ext4_pending_tree *tree;
+-      struct rb_node *node;
+-      struct pending_reservation *pr;
+-
+-      printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
+-      tree = &EXT4_I(inode)->i_pending_tree;
+-      node = rb_first(&tree->root);
+-      while (node) {
+-              pr = rb_entry(node, struct pending_reservation, rb_node);
+-              printk(KERN_DEBUG " %u", pr->lclu);
+-              node = rb_next(node);
+-      }
+-      printk(KERN_DEBUG "\n");
+-}
+-#else
+-#define ext4_print_pending_tree(inode)
+-#endif
+-
+-int __init ext4_init_pending(void)
+-{
+-      ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
+-                                         sizeof(struct pending_reservation),
+-                                         0, (SLAB_RECLAIM_ACCOUNT), NULL);
+-      if (ext4_pending_cachep == NULL)
+-              return -ENOMEM;
+-      return 0;
+-}
+-
+-void ext4_exit_pending(void)
+-{
+-      kmem_cache_destroy(ext4_pending_cachep);
+-}
+-
+-void ext4_init_pending_tree(struct ext4_pending_tree *tree)
+-{
+-      tree->root = RB_ROOT;
+-}
+-
+-/*
+- * __get_pending - retrieve a pointer to a pending reservation
+- *
+- * @inode - file containing the pending cluster reservation
+- * @lclu - logical cluster of interest
+- *
+- * Returns a pointer to a pending reservation if it's a member of
+- * the set, and NULL if not.  Must be called holding i_es_lock.
+- */
+-static struct pending_reservation *__get_pending(struct inode *inode,
+-                                               ext4_lblk_t lclu)
+-{
+-      struct ext4_pending_tree *tree;
+-      struct rb_node *node;
+-      struct pending_reservation *pr = NULL;
+-
+-      tree = &EXT4_I(inode)->i_pending_tree;
+-      node = (&tree->root)->rb_node;
+-
+-      while (node) {
+-              pr = rb_entry(node, struct pending_reservation, rb_node);
+-              if (lclu < pr->lclu)
+-                      node = node->rb_left;
+-              else if (lclu > pr->lclu)
+-                      node = node->rb_right;
+-              else if (lclu == pr->lclu)
+-                      return pr;
+-      }
+-      return NULL;
+-}
+-
+-/*
+- * __insert_pending - adds a pending cluster reservation to the set of
+- *                    pending reservations
+- *
+- * @inode - file containing the cluster
+- * @lblk - logical block in the cluster to be added
+- *
+- * Returns 0 on successful insertion and -ENOMEM on failure.  If the
+- * pending reservation is already in the set, returns successfully.
+- */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+-      struct rb_node **p = &tree->root.rb_node;
+-      struct rb_node *parent = NULL;
+-      struct pending_reservation *pr;
+-      ext4_lblk_t lclu;
+-      int ret = 0;
+-
+-      lclu = EXT4_B2C(sbi, lblk);
+-      /* search to find parent for insertion */
+-      while (*p) {
+-              parent = *p;
+-              pr = rb_entry(parent, struct pending_reservation, rb_node);
+-
+-              if (lclu < pr->lclu) {
+-                      p = &(*p)->rb_left;
+-              } else if (lclu > pr->lclu) {
+-                      p = &(*p)->rb_right;
+-              } else {
+-                      /* pending reservation already inserted */
+-                      goto out;
+-              }
+-      }
+-
+-      pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+-      if (pr == NULL) {
+-              ret = -ENOMEM;
+-              goto out;
+-      }
+-      pr->lclu = lclu;
+-
+-      rb_link_node(&pr->rb_node, parent, p);
+-      rb_insert_color(&pr->rb_node, &tree->root);
+-
+-out:
+-      return ret;
+-}
+-
+-/*
+- * __remove_pending - removes a pending cluster reservation from the set
+- *                    of pending reservations
+- *
+- * @inode - file containing the cluster
+- * @lblk - logical block in the pending cluster reservation to be removed
+- *
+- * Returns successfully if pending reservation is not a member of the set.
+- */
+-static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      struct pending_reservation *pr;
+-      struct ext4_pending_tree *tree;
+-
+-      pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
+-      if (pr != NULL) {
+-              tree = &EXT4_I(inode)->i_pending_tree;
+-              rb_erase(&pr->rb_node, &tree->root);
+-              kmem_cache_free(ext4_pending_cachep, pr);
+-      }
+-}
+-
+-/*
+- * ext4_remove_pending - removes a pending cluster reservation from the set
+- *                       of pending reservations
+- *
+- * @inode - file containing the cluster
+- * @lblk - logical block in the pending cluster reservation to be removed
+- *
+- * Locking for external use of __remove_pending.
+- */
+-void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
+-{
+-      struct ext4_inode_info *ei = EXT4_I(inode);
+-
+-      write_lock(&ei->i_es_lock);
+-      __remove_pending(inode, lblk);
+-      write_unlock(&ei->i_es_lock);
+-}
+-
+-/*
+- * ext4_is_pending - determine whether a cluster has a pending reservation
+- *                   on it
+- *
+- * @inode - file containing the cluster
+- * @lblk - logical block in the cluster
+- *
+- * Returns true if there's a pending reservation for the cluster in the
+- * set of pending reservations, and false if not.
+- */
+-bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      struct ext4_inode_info *ei = EXT4_I(inode);
+-      bool ret;
+-
+-      read_lock(&ei->i_es_lock);
+-      ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
+-      read_unlock(&ei->i_es_lock);
+-
+-      return ret;
+-}
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -78,51 +78,6 @@ struct ext4_es_stats {
+       struct percpu_counter es_stats_shk_cnt;
+ };
+-/*
+- * Pending cluster reservations for bigalloc file systems
+- *
+- * A cluster with a pending reservation is a logical cluster shared by at
+- * least one extent in the extents status tree with delayed and unwritten
+- * status and at least one other written or unwritten extent.  The
+- * reservation is said to be pending because a cluster reservation would
+- * have to be taken in the event all blocks in the cluster shared with
+- * written or unwritten extents were deleted while the delayed and
+- * unwritten blocks remained.
+- *
+- * The set of pending cluster reservations is an auxiliary data structure
+- * used with the extents status tree to implement reserved cluster/block
+- * accounting for bigalloc file systems.  The set is kept in memory and
+- * records all pending cluster reservations.
+- *
+- * Its primary function is to avoid the need to read extents from the
+- * disk when invalidating pages as a result of a truncate, punch hole, or
+- * collapse range operation.  Page invalidation requires a decrease in the
+- * reserved cluster count if it results in the removal of all delayed
+- * and unwritten extents (blocks) from a cluster that is not shared with a
+- * written or unwritten extent, and no decrease otherwise.  Determining
+- * whether the cluster is shared can be done by searching for a pending
+- * reservation on it.
+- *
+- * Secondarily, it provides a potentially faster method for determining
+- * whether the reserved cluster count should be increased when a physical
+- * cluster is deallocated as a result of a truncate, punch hole, or
+- * collapse range operation.  The necessary information is also present
+- * in the extents status tree, but might be more rapidly accessed in
+- * the pending reservation set in many cases due to smaller size.
+- *
+- * The pending cluster reservation set is implemented as a red-black tree
+- * with the goal of minimizing per page search time overhead.
+- */
+-
+-struct pending_reservation {
+-      struct rb_node rb_node;
+-      ext4_lblk_t lclu;
+-};
+-
+-struct ext4_pending_tree {
+-      struct rb_root root;
+-};
+-
+ extern int __init ext4_init_es(void);
+ extern void ext4_exit_es(void);
+ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
+@@ -227,10 +182,4 @@ extern void ext4_es_unregister_shrinker(
+ extern int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v);
+-extern int __init ext4_init_pending(void);
+-extern void ext4_exit_pending(void);
+-extern void ext4_init_pending_tree(struct ext4_pending_tree *tree);
+-extern void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk);
+-extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
+-
+ #endif /* _EXT4_EXTENTS_STATUS_H */
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1095,7 +1095,6 @@ static struct inode *ext4_alloc_inode(st
+       ei->i_da_metadata_calc_len = 0;
+       ei->i_da_metadata_calc_last_lblock = 0;
+       spin_lock_init(&(ei->i_block_reservation_lock));
+-      ext4_init_pending_tree(&ei->i_pending_tree);
+ #ifdef CONFIG_QUOTA
+       ei->i_reserved_quota = 0;
+       memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
+@@ -6190,10 +6189,6 @@ static int __init ext4_init_fs(void)
+       if (err)
+               return err;
+-      err = ext4_init_pending();
+-      if (err)
+-              goto out6;
+-
+       err = ext4_init_pageio();
+       if (err)
+               goto out5;
+@@ -6232,8 +6227,6 @@ out3:
+ out4:
+       ext4_exit_pageio();
+ out5:
+-      ext4_exit_pending();
+-out6:
+       ext4_exit_es();
+       return err;
+@@ -6251,7 +6244,6 @@ static void __exit ext4_exit_fs(void)
+       ext4_exit_system_zone();
+       ext4_exit_pageio();
+       ext4_exit_es();
+-      ext4_exit_pending();
+ }
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/queue-4.19/revert-ext4-fix-delayed-allocation-bug-in-ext4_clu_mapped-for-bigalloc-inline.patch b/queue-4.19/revert-ext4-fix-delayed-allocation-bug-in-ext4_clu_mapped-for-bigalloc-inline.patch
new file mode 100644 (file)
index 0000000..ab07657
--- /dev/null
@@ -0,0 +1,49 @@
+From 0fc4d61400fcc65f5d079d9a26e65ed44597f849 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 22 Jan 2023 15:10:23 +0100
+Subject: Revert "ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 1ed1eef0551bebee8e56973ccd0900e3578edfb7 which is
+commit 131294c35ed6f777bd4e79d42af13b5c41bf2775 upstream.
+
+Eric writes:
+       I recommend not backporting this patch or the other three
+       patches apparently intended to support it to 4.19 stable.  All
+       these patches are related to ext4's bigalloc feature, which was
+       experimental as of 4.19 (expressly noted by contemporary
+       versions of e2fsprogs) and also suffered from a number of bugs.
+       A significant number of additional patches that were applied to
+       5.X kernels over time would have to be backported to 4.19 for
+       the patch below to function correctly. It's really not worth
+       doing that given bigalloc's experimental status as of 4.19 and
+       the very rare combination of the bigalloc and inline features.
+
+Link: https://lore.kernel.org/r/Y8mAe1SlcLD5fykg@debian-BULLSEYE-live-builder-AMD64
+Cc: Eric Whitney <enwlinux@gmail.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c |    8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5984,14 +5984,6 @@ int ext4_clu_mapped(struct inode *inode,
+       struct ext4_extent *extent;
+       ext4_lblk_t first_lblk, first_lclu, last_lclu;
+-      /*
+-       * if data can be stored inline, the logical cluster isn't
+-       * mapped - no physical clusters have been allocated, and the
+-       * file has no extents
+-       */
+-      if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+-              return 0;
+-
+       /* search for the extent closest to the first block in the cluster */
+       path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+       if (IS_ERR(path)) {
diff --git a/queue-4.19/revert-ext4-fix-reserved-cluster-accounting-at-delayed-write-time.patch b/queue-4.19/revert-ext4-fix-reserved-cluster-accounting-at-delayed-write-time.patch
new file mode 100644 (file)
index 0000000..b3e5647
--- /dev/null
@@ -0,0 +1,358 @@
+From 247f9d3697319a8b65d0af6ed185023a4c511a4f Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 22 Jan 2023 15:12:07 +0100
+Subject: Revert "ext4: fix reserved cluster accounting at delayed write time"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit d40e09f701cf7a44e595a558b067b2b4f67fbf87 which is
+commit 0b02f4c0d6d9e2c611dfbdd4317193e9dca740e6 upstream.
+
+Eric writes:
+       I recommend not backporting this patch or the other three
+       patches apparently intended to support it to 4.19 stable.  All
+       these patches are related to ext4's bigalloc feature, which was
+       experimental as of 4.19 (expressly noted by contemporary
+       versions of e2fsprogs) and also suffered from a number of bugs.
+       A significant number of additional patches that were applied to
+       5.X kernels over time would have to be backported to 4.19 for
+       the patch below to function correctly. It's really not worth
+       doing that given bigalloc's experimental status as of 4.19 and
+       the very rare combination of the bigalloc and inline features.
+
+Link: https://lore.kernel.org/r/Y8mAe1SlcLD5fykg@debian-BULLSEYE-live-builder-AMD64
+Cc: Eric Whitney <enwlinux@gmail.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h              |    1 
+ fs/ext4/extents.c           |   79 --------------------------------------------
+ fs/ext4/extents_status.c    |   53 -----------------------------
+ fs/ext4/extents_status.h    |   12 ------
+ fs/ext4/inode.c             |   79 ++++++++++----------------------------------
+ include/trace/events/ext4.h |   35 -------------------
+ 6 files changed, 18 insertions(+), 241 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3241,7 +3241,6 @@ extern int ext4_swap_extents(handle_t *h
+                               struct inode *inode2, ext4_lblk_t lblk1,
+                            ext4_lblk_t lblk2,  ext4_lblk_t count,
+                            int mark_unwritten,int *err);
+-extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
+ /* move_extent.c */
+ extern void ext4_double_down_write_data_sem(struct inode *first,
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5963,82 +5963,3 @@ ext4_swap_extents(handle_t *handle, stru
+       }
+       return replaced_count;
+ }
+-
+-/*
+- * ext4_clu_mapped - determine whether any block in a logical cluster has
+- *                   been mapped to a physical cluster
+- *
+- * @inode - file containing the logical cluster
+- * @lclu - logical cluster of interest
+- *
+- * Returns 1 if any block in the logical cluster is mapped, signifying
+- * that a physical cluster has been allocated for it.  Otherwise,
+- * returns 0.  Can also return negative error codes.  Derived from
+- * ext4_ext_map_blocks().
+- */
+-int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      struct ext4_ext_path *path;
+-      int depth, mapped = 0, err = 0;
+-      struct ext4_extent *extent;
+-      ext4_lblk_t first_lblk, first_lclu, last_lclu;
+-
+-      /* search for the extent closest to the first block in the cluster */
+-      path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+-      if (IS_ERR(path)) {
+-              err = PTR_ERR(path);
+-              path = NULL;
+-              goto out;
+-      }
+-
+-      depth = ext_depth(inode);
+-
+-      /*
+-       * A consistent leaf must not be empty.  This situation is possible,
+-       * though, _during_ tree modification, and it's why an assert can't
+-       * be put in ext4_find_extent().
+-       */
+-      if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+-              EXT4_ERROR_INODE(inode,
+-                  "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
+-                               (unsigned long) EXT4_C2B(sbi, lclu),
+-                               depth, path[depth].p_block);
+-              err = -EFSCORRUPTED;
+-              goto out;
+-      }
+-
+-      extent = path[depth].p_ext;
+-
+-      /* can't be mapped if the extent tree is empty */
+-      if (extent == NULL)
+-              goto out;
+-
+-      first_lblk = le32_to_cpu(extent->ee_block);
+-      first_lclu = EXT4_B2C(sbi, first_lblk);
+-
+-      /*
+-       * Three possible outcomes at this point - found extent spanning
+-       * the target cluster, to the left of the target cluster, or to the
+-       * right of the target cluster.  The first two cases are handled here.
+-       * The last case indicates the target cluster is not mapped.
+-       */
+-      if (lclu >= first_lclu) {
+-              last_lclu = EXT4_B2C(sbi, first_lblk +
+-                                   ext4_ext_get_actual_len(extent) - 1);
+-              if (lclu <= last_lclu) {
+-                      mapped = 1;
+-              } else {
+-                      first_lblk = ext4_ext_next_allocated_block(path);
+-                      first_lclu = EXT4_B2C(sbi, first_lblk);
+-                      if (lclu == first_lclu)
+-                              mapped = 1;
+-              }
+-      }
+-
+-out:
+-      ext4_ext_drop_refs(path);
+-      kfree(path);
+-
+-      return err ? err : mapped;
+-}
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1550,56 +1550,3 @@ bool ext4_is_pending(struct inode *inode
+       return ret;
+ }
+-
+-/*
+- * ext4_es_insert_delayed_block - adds a delayed block to the extents status
+- *                                tree, adding a pending reservation where
+- *                                needed
+- *
+- * @inode - file containing the newly added block
+- * @lblk - logical block to be added
+- * @allocated - indicates whether a physical cluster has been allocated for
+- *              the logical cluster that contains the block
+- *
+- * Returns 0 on success, negative error code on failure.
+- */
+-int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+-                               bool allocated)
+-{
+-      struct extent_status newes;
+-      int err = 0;
+-
+-      es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
+-               lblk, inode->i_ino);
+-
+-      newes.es_lblk = lblk;
+-      newes.es_len = 1;
+-      ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
+-      trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
+-
+-      ext4_es_insert_extent_check(inode, &newes);
+-
+-      write_lock(&EXT4_I(inode)->i_es_lock);
+-
+-      err = __es_remove_extent(inode, lblk, lblk);
+-      if (err != 0)
+-              goto error;
+-retry:
+-      err = __es_insert_extent(inode, &newes);
+-      if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+-                                        128, EXT4_I(inode)))
+-              goto retry;
+-      if (err != 0)
+-              goto error;
+-
+-      if (allocated)
+-              __insert_pending(inode, lblk);
+-
+-error:
+-      write_unlock(&EXT4_I(inode)->i_es_lock);
+-
+-      ext4_es_print_tree(inode);
+-      ext4_print_pending_tree(inode);
+-
+-      return err;
+-}
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -178,16 +178,6 @@ static inline int ext4_es_is_hole(struct
+       return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
+ }
+-static inline int ext4_es_is_mapped(struct extent_status *es)
+-{
+-      return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
+-}
+-
+-static inline int ext4_es_is_delonly(struct extent_status *es)
+-{
+-      return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
+-}
+-
+ static inline void ext4_es_set_referenced(struct extent_status *es)
+ {
+       es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
+@@ -242,7 +232,5 @@ extern void ext4_exit_pending(void);
+ extern void ext4_init_pending_tree(struct ext4_pending_tree *tree);
+ extern void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk);
+ extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
+-extern int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+-                                      bool allocated);
+ #endif /* _EXT4_EXTENTS_STATUS_H */
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1824,65 +1824,6 @@ static int ext4_bh_delay_or_unwritten(ha
+ }
+ /*
+- * ext4_insert_delayed_block - adds a delayed block to the extents status
+- *                             tree, incrementing the reserved cluster/block
+- *                             count or making a pending reservation
+- *                             where needed
+- *
+- * @inode - file containing the newly added block
+- * @lblk - logical block to be added
+- *
+- * Returns 0 on success, negative error code on failure.
+- */
+-static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      int ret;
+-      bool allocated = false;
+-
+-      /*
+-       * If the cluster containing lblk is shared with a delayed,
+-       * written, or unwritten extent in a bigalloc file system, it's
+-       * already been accounted for and does not need to be reserved.
+-       * A pending reservation must be made for the cluster if it's
+-       * shared with a written or unwritten extent and doesn't already
+-       * have one.  Written and unwritten extents can be purged from the
+-       * extents status tree if the system is under memory pressure, so
+-       * it's necessary to examine the extent tree if a search of the
+-       * extents status tree doesn't get a match.
+-       */
+-      if (sbi->s_cluster_ratio == 1) {
+-              ret = ext4_da_reserve_space(inode);
+-              if (ret != 0)   /* ENOSPC */
+-                      goto errout;
+-      } else {   /* bigalloc */
+-              if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
+-                      if (!ext4_es_scan_clu(inode,
+-                                            &ext4_es_is_mapped, lblk)) {
+-                              ret = ext4_clu_mapped(inode,
+-                                                    EXT4_B2C(sbi, lblk));
+-                              if (ret < 0)
+-                                      goto errout;
+-                              if (ret == 0) {
+-                                      ret = ext4_da_reserve_space(inode);
+-                                      if (ret != 0)   /* ENOSPC */
+-                                              goto errout;
+-                              } else {
+-                                      allocated = true;
+-                              }
+-                      } else {
+-                              allocated = true;
+-                      }
+-              }
+-      }
+-
+-      ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+-
+-errout:
+-      return ret;
+-}
+-
+-/*
+  * This function is grabs code from the very beginning of
+  * ext4_map_blocks, but assumes that the caller is from delayed write
+  * time. This function looks up the requested blocks and sets the
+@@ -1966,9 +1907,25 @@ add_delayed:
+                * XXX: __block_prepare_write() unmaps passed block,
+                * is it OK?
+                */
++              /*
++               * If the block was allocated from previously allocated cluster,
++               * then we don't need to reserve it again. However we still need
++               * to reserve metadata for every block we're going to write.
++               */
++              if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
++                  !ext4_es_scan_clu(inode,
++                                    &ext4_es_is_delayed, map->m_lblk)) {
++                      ret = ext4_da_reserve_space(inode);
++                      if (ret) {
++                              /* not enough space to reserve */
++                              retval = ret;
++                              goto out_unlock;
++                      }
++              }
+-              ret = ext4_insert_delayed_block(inode, map->m_lblk);
+-              if (ret != 0) {
++              ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++                                          ~0, EXTENT_STATUS_DELAYED);
++              if (ret) {
+                       retval = ret;
+                       goto out_unlock;
+               }
+--- a/include/trace/events/ext4.h
++++ b/include/trace/events/ext4.h
+@@ -2532,41 +2532,6 @@ TRACE_EVENT(ext4_es_shrink,
+                 __entry->scan_time, __entry->nr_skipped, __entry->retried)
+ );
+-TRACE_EVENT(ext4_es_insert_delayed_block,
+-      TP_PROTO(struct inode *inode, struct extent_status *es,
+-               bool allocated),
+-
+-      TP_ARGS(inode, es, allocated),
+-
+-      TP_STRUCT__entry(
+-              __field(        dev_t,          dev             )
+-              __field(        ino_t,          ino             )
+-              __field(        ext4_lblk_t,    lblk            )
+-              __field(        ext4_lblk_t,    len             )
+-              __field(        ext4_fsblk_t,   pblk            )
+-              __field(        char,           status          )
+-              __field(        bool,           allocated       )
+-      ),
+-
+-      TP_fast_assign(
+-              __entry->dev            = inode->i_sb->s_dev;
+-              __entry->ino            = inode->i_ino;
+-              __entry->lblk           = es->es_lblk;
+-              __entry->len            = es->es_len;
+-              __entry->pblk           = ext4_es_pblock(es);
+-              __entry->status         = ext4_es_status(es);
+-              __entry->allocated      = allocated;
+-      ),
+-
+-      TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s "
+-                "allocated %d",
+-                MAJOR(__entry->dev), MINOR(__entry->dev),
+-                (unsigned long) __entry->ino,
+-                __entry->lblk, __entry->len,
+-                __entry->pblk, show_extent_status(__entry->status),
+-                __entry->allocated)
+-);
+-
+ /* fsmap traces */
+ DECLARE_EVENT_CLASS(ext4_fsmap_class,
+       TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len,
diff --git a/queue-4.19/revert-ext4-generalize-extents-status-tree-search-functions.patch b/queue-4.19/revert-ext4-generalize-extents-status-tree-search-functions.patch
new file mode 100644 (file)
index 0000000..7f89bc5
--- /dev/null
@@ -0,0 +1,445 @@
+From 80aab1eb27a8b4ca316b1914967a159e77582190 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 22 Jan 2023 15:13:05 +0100
+Subject: Revert "ext4: generalize extents status tree search functions"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit cca8671f3a7f5775a078f2676f6d1039afb925e6 which is
+commit ad431025aecda85d3ebef5e4a3aca5c1c681d0c7 upstream.
+
+Eric writes:
+       I recommend not backporting this patch or the other three
+       patches apparently intended to support it to 4.19 stable.  All
+       these patches are related to ext4's bigalloc feature, which was
+       experimental as of 4.19 (expressly noted by contemporary
+       versions of e2fsprogs) and also suffered from a number of bugs.
+       A significant number of additional patches that were applied to
+       5.X kernels over time would have to be backported to 4.19 for
+       the patch below to function correctly. It's really not worth
+       doing that given bigalloc's experimental status as of 4.19 and
+       the very rare combination of the bigalloc and inline features.
+
+Link: https://lore.kernel.org/r/Y8mAe1SlcLD5fykg@debian-BULLSEYE-live-builder-AMD64
+Cc: Eric Whitney <enwlinux@gmail.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h              |    4 +
+ fs/ext4/extents.c           |   52 +++++++++++----
+ fs/ext4/extents_status.c    |  151 +++++---------------------------------------
+ fs/ext4/extents_status.h    |   13 ---
+ fs/ext4/inode.c             |   17 ++--
+ include/trace/events/ext4.h |    4 -
+ 6 files changed, 75 insertions(+), 166 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3228,6 +3228,10 @@ extern struct ext4_ext_path *ext4_find_e
+                                             int flags);
+ extern void ext4_ext_drop_refs(struct ext4_ext_path *);
+ extern int ext4_ext_check_inode(struct inode *inode);
++extern int ext4_find_delalloc_range(struct inode *inode,
++                                  ext4_lblk_t lblk_start,
++                                  ext4_lblk_t lblk_end);
++extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
+ extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
+ extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+                       __u64 start, __u64 len);
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2381,8 +2381,8 @@ ext4_ext_put_gap_in_cache(struct inode *
+ {
+       struct extent_status es;
+-      ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+-                                hole_start + hole_len - 1, &es);
++      ext4_es_find_delayed_extent_range(inode, hole_start,
++                                        hole_start + hole_len - 1, &es);
+       if (es.es_len) {
+               /* There's delayed extent containing lblock? */
+               if (es.es_lblk <= hole_start)
+@@ -3853,6 +3853,39 @@ out:
+ }
+ /**
++ * ext4_find_delalloc_range: find delayed allocated block in the given range.
++ *
++ * Return 1 if there is a delalloc block in the range, otherwise 0.
++ */
++int ext4_find_delalloc_range(struct inode *inode,
++                           ext4_lblk_t lblk_start,
++                           ext4_lblk_t lblk_end)
++{
++      struct extent_status es;
++
++      ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
++      if (es.es_len == 0)
++              return 0; /* there is no delay extent in this tree */
++      else if (es.es_lblk <= lblk_start &&
++               lblk_start < es.es_lblk + es.es_len)
++              return 1;
++      else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
++              return 1;
++      else
++              return 0;
++}
++
++int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++      ext4_lblk_t lblk_start, lblk_end;
++      lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
++      lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
++
++      return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
++}
++
++/**
+  * Determines how many complete clusters (out of those specified by the 'map')
+  * are under delalloc and were reserved quota for.
+  * This function is called when we are writing out the blocks that were
+@@ -3910,8 +3943,7 @@ get_reserved_cluster_alloc(struct inode
+               lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
+               lblk_to = lblk_from + c_offset - 1;
+-              if (ext4_es_scan_range(inode, &ext4_es_is_delayed, lblk_from,
+-                                     lblk_to))
++              if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
+                       allocated_clusters--;
+       }
+@@ -3921,8 +3953,7 @@ get_reserved_cluster_alloc(struct inode
+               lblk_from = lblk_start + num_blks;
+               lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
+-              if (ext4_es_scan_range(inode, &ext4_es_is_delayed, lblk_from,
+-                                     lblk_to))
++              if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
+                       allocated_clusters--;
+       }
+@@ -5077,10 +5108,8 @@ static int ext4_find_delayed_extent(stru
+       ext4_lblk_t block, next_del;
+       if (newes->es_pblk == 0) {
+-              ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+-                                        newes->es_lblk,
+-                                        newes->es_lblk + newes->es_len - 1,
+-                                        &es);
++              ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
++                              newes->es_lblk + newes->es_len - 1, &es);
+               /*
+                * No extent in extent-tree contains block @newes->es_pblk,
+@@ -5101,8 +5130,7 @@ static int ext4_find_delayed_extent(stru
+       }
+       block = newes->es_lblk + newes->es_len;
+-      ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
+-                                EXT_MAX_BLOCKS, &es);
++      ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
+       if (es.es_len == 0)
+               next_del = EXT_MAX_BLOCKS;
+       else
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -233,38 +233,30 @@ static struct extent_status *__es_tree_s
+ }
+ /*
+- * ext4_es_find_extent_range - find extent with specified status within block
+- *                             range or next extent following block range in
+- *                             extents status tree
++ * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
++ * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
+  *
+- * @inode - file containing the range
+- * @matching_fn - pointer to function that matches extents with desired status
+- * @lblk - logical block defining start of range
+- * @end - logical block defining end of range
+- * @es - extent found, if any
+- *
+- * Find the first extent within the block range specified by @lblk and @end
+- * in the extents status tree that satisfies @matching_fn.  If a match
+- * is found, it's returned in @es.  If not, and a matching extent is found
+- * beyond the block range, it's returned in @es.  If no match is found, an
+- * extent is returned in @es whose es_lblk, es_len, and es_pblk components
+- * are 0.
+- */
+-static void __es_find_extent_range(struct inode *inode,
+-                                 int (*matching_fn)(struct extent_status *es),
+-                                 ext4_lblk_t lblk, ext4_lblk_t end,
+-                                 struct extent_status *es)
++ * @inode: the inode which owns delayed extents
++ * @lblk: the offset where we start to search
++ * @end: the offset where we stop to search
++ * @es: delayed extent that we found
++ */
++void ext4_es_find_delayed_extent_range(struct inode *inode,
++                               ext4_lblk_t lblk, ext4_lblk_t end,
++                               struct extent_status *es)
+ {
+       struct ext4_es_tree *tree = NULL;
+       struct extent_status *es1 = NULL;
+       struct rb_node *node;
+-      WARN_ON(es == NULL);
+-      WARN_ON(end < lblk);
++      BUG_ON(es == NULL);
++      BUG_ON(end < lblk);
++      trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
++      read_lock(&EXT4_I(inode)->i_es_lock);
+       tree = &EXT4_I(inode)->i_es_tree;
+-      /* see if the extent has been cached */
++      /* find extent in cache firstly */
+       es->es_lblk = es->es_len = es->es_pblk = 0;
+       if (tree->cache_es) {
+               es1 = tree->cache_es;
+@@ -279,133 +271,28 @@ static void __es_find_extent_range(struc
+       es1 = __es_tree_search(&tree->root, lblk);
+ out:
+-      if (es1 && !matching_fn(es1)) {
++      if (es1 && !ext4_es_is_delayed(es1)) {
+               while ((node = rb_next(&es1->rb_node)) != NULL) {
+                       es1 = rb_entry(node, struct extent_status, rb_node);
+                       if (es1->es_lblk > end) {
+                               es1 = NULL;
+                               break;
+                       }
+-                      if (matching_fn(es1))
++                      if (ext4_es_is_delayed(es1))
+                               break;
+               }
+       }
+-      if (es1 && matching_fn(es1)) {
++      if (es1 && ext4_es_is_delayed(es1)) {
+               tree->cache_es = es1;
+               es->es_lblk = es1->es_lblk;
+               es->es_len = es1->es_len;
+               es->es_pblk = es1->es_pblk;
+       }
+-}
+-
+-/*
+- * Locking for __es_find_extent_range() for external use
+- */
+-void ext4_es_find_extent_range(struct inode *inode,
+-                             int (*matching_fn)(struct extent_status *es),
+-                             ext4_lblk_t lblk, ext4_lblk_t end,
+-                             struct extent_status *es)
+-{
+-      trace_ext4_es_find_extent_range_enter(inode, lblk);
+-
+-      read_lock(&EXT4_I(inode)->i_es_lock);
+-      __es_find_extent_range(inode, matching_fn, lblk, end, es);
+-      read_unlock(&EXT4_I(inode)->i_es_lock);
+-
+-      trace_ext4_es_find_extent_range_exit(inode, es);
+-}
+-
+-/*
+- * __es_scan_range - search block range for block with specified status
+- *                   in extents status tree
+- *
+- * @inode - file containing the range
+- * @matching_fn - pointer to function that matches extents with desired status
+- * @lblk - logical block defining start of range
+- * @end - logical block defining end of range
+- *
+- * Returns true if at least one block in the specified block range satisfies
+- * the criterion specified by @matching_fn, and false if not.  If at least
+- * one extent has the specified status, then there is at least one block
+- * in the cluster with that status.  Should only be called by code that has
+- * taken i_es_lock.
+- */
+-static bool __es_scan_range(struct inode *inode,
+-                          int (*matching_fn)(struct extent_status *es),
+-                          ext4_lblk_t start, ext4_lblk_t end)
+-{
+-      struct extent_status es;
+-
+-      __es_find_extent_range(inode, matching_fn, start, end, &es);
+-      if (es.es_len == 0)
+-              return false;   /* no matching extent in the tree */
+-      else if (es.es_lblk <= start &&
+-               start < es.es_lblk + es.es_len)
+-              return true;
+-      else if (start <= es.es_lblk && es.es_lblk <= end)
+-              return true;
+-      else
+-              return false;
+-}
+-/*
+- * Locking for __es_scan_range() for external use
+- */
+-bool ext4_es_scan_range(struct inode *inode,
+-                      int (*matching_fn)(struct extent_status *es),
+-                      ext4_lblk_t lblk, ext4_lblk_t end)
+-{
+-      bool ret;
+-
+-      read_lock(&EXT4_I(inode)->i_es_lock);
+-      ret = __es_scan_range(inode, matching_fn, lblk, end);
+-      read_unlock(&EXT4_I(inode)->i_es_lock);
+-
+-      return ret;
+-}
+-
+-/*
+- * __es_scan_clu - search cluster for block with specified status in
+- *                 extents status tree
+- *
+- * @inode - file containing the cluster
+- * @matching_fn - pointer to function that matches extents with desired status
+- * @lblk - logical block in cluster to be searched
+- *
+- * Returns true if at least one extent in the cluster containing @lblk
+- * satisfies the criterion specified by @matching_fn, and false if not.  If at
+- * least one extent has the specified status, then there is at least one block
+- * in the cluster with that status.  Should only be called by code that has
+- * taken i_es_lock.
+- */
+-static bool __es_scan_clu(struct inode *inode,
+-                        int (*matching_fn)(struct extent_status *es),
+-                        ext4_lblk_t lblk)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      ext4_lblk_t lblk_start, lblk_end;
+-
+-      lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+-      lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+-
+-      return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
+-}
+-
+-/*
+- * Locking for __es_scan_clu() for external use
+- */
+-bool ext4_es_scan_clu(struct inode *inode,
+-                    int (*matching_fn)(struct extent_status *es),
+-                    ext4_lblk_t lblk)
+-{
+-      bool ret;
+-
+-      read_lock(&EXT4_I(inode)->i_es_lock);
+-      ret = __es_scan_clu(inode, matching_fn, lblk);
+       read_unlock(&EXT4_I(inode)->i_es_lock);
+-      return ret;
++      trace_ext4_es_find_delayed_extent_range_exit(inode, es);
+ }
+ static void ext4_es_list_add(struct inode *inode)
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -90,18 +90,11 @@ extern void ext4_es_cache_extent(struct
+                                unsigned int status);
+ extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+                                ext4_lblk_t len);
+-extern void ext4_es_find_extent_range(struct inode *inode,
+-                                    int (*match_fn)(struct extent_status *es),
+-                                    ext4_lblk_t lblk, ext4_lblk_t end,
+-                                    struct extent_status *es);
++extern void ext4_es_find_delayed_extent_range(struct inode *inode,
++                                      ext4_lblk_t lblk, ext4_lblk_t end,
++                                      struct extent_status *es);
+ extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+                                struct extent_status *es);
+-extern bool ext4_es_scan_range(struct inode *inode,
+-                             int (*matching_fn)(struct extent_status *es),
+-                             ext4_lblk_t lblk, ext4_lblk_t end);
+-extern bool ext4_es_scan_clu(struct inode *inode,
+-                           int (*matching_fn)(struct extent_status *es),
+-                           ext4_lblk_t lblk);
+ static inline unsigned int ext4_es_status(struct extent_status *es)
+ {
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -600,8 +600,8 @@ int ext4_map_blocks(handle_t *handle, st
+                               EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+               if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   !(status & EXTENT_STATUS_WRITTEN) &&
+-                  ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+-                                     map->m_lblk + map->m_len - 1))
++                  ext4_find_delalloc_range(inode, map->m_lblk,
++                                           map->m_lblk + map->m_len - 1))
+                       status |= EXTENT_STATUS_DELAYED;
+               ret = ext4_es_insert_extent(inode, map->m_lblk,
+                                           map->m_len, map->m_pblk, status);
+@@ -724,8 +724,8 @@ found:
+                               EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+               if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   !(status & EXTENT_STATUS_WRITTEN) &&
+-                  ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+-                                     map->m_lblk + map->m_len - 1))
++                  ext4_find_delalloc_range(inode, map->m_lblk,
++                                           map->m_lblk + map->m_len - 1))
+                       status |= EXTENT_STATUS_DELAYED;
+               ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+                                           map->m_pblk, status);
+@@ -1717,7 +1717,7 @@ static void ext4_da_page_release_reserva
+               lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
+                       ((num_clusters - 1) << sbi->s_cluster_bits);
+               if (sbi->s_cluster_ratio == 1 ||
+-                  !ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk))
++                  !ext4_find_delalloc_cluster(inode, lblk))
+                       ext4_da_release_space(inode, 1);
+               num_clusters--;
+@@ -1902,7 +1902,6 @@ static int ext4_da_map_blocks(struct ino
+ add_delayed:
+       if (retval == 0) {
+               int ret;
+-
+               /*
+                * XXX: __block_prepare_write() unmaps passed block,
+                * is it OK?
+@@ -1913,8 +1912,7 @@ add_delayed:
+                * to reserve metadata for every block we're going to write.
+                */
+               if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
+-                  !ext4_es_scan_clu(inode,
+-                                    &ext4_es_is_delayed, map->m_lblk)) {
++                  !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
+                       ret = ext4_da_reserve_space(inode);
+                       if (ret) {
+                               /* not enough space to reserve */
+@@ -3521,8 +3519,7 @@ static int ext4_iomap_begin(struct inode
+                       ext4_lblk_t end = map.m_lblk + map.m_len - 1;
+                       struct extent_status es;
+-                      ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+-                                                map.m_lblk, end, &es);
++                      ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es);
+                       if (!es.es_len || es.es_lblk > end) {
+                               /* entire range is a hole */
+--- a/include/trace/events/ext4.h
++++ b/include/trace/events/ext4.h
+@@ -2290,7 +2290,7 @@ TRACE_EVENT(ext4_es_remove_extent,
+                 __entry->lblk, __entry->len)
+ );
+-TRACE_EVENT(ext4_es_find_extent_range_enter,
++TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
+       TP_ARGS(inode, lblk),
+@@ -2312,7 +2312,7 @@ TRACE_EVENT(ext4_es_find_extent_range_en
+                 (unsigned long) __entry->ino, __entry->lblk)
+ );
+-TRACE_EVENT(ext4_es_find_extent_range_exit,
++TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
+       TP_PROTO(struct inode *inode, struct extent_status *es),
+       TP_ARGS(inode, es),
index 0830ec637b4f2c189b4bbd51678918e575a2c925..a966c94e51fbc537452048b32199e942af4874fe 100644 (file)
@@ -30,3 +30,8 @@ usb-storage-apply-ignore_uas-only-for-hiksemi-md202-on-rtl9210.patch
 serial-pch_uart-pass-correct-sg-to-dma_unmap_sg.patch
 serial-atmel-fix-incorrect-baudrate-setup.patch
 gsmi-fix-null-deref-in-gsmi_get_variable.patch
+revert-ext4-fix-delayed-allocation-bug-in-ext4_clu_mapped-for-bigalloc-inline.patch
+revert-ext4-fix-reserved-cluster-accounting-at-delayed-write-time.patch
+revert-ext4-add-new-pending-reservation-mechanism.patch
+revert-ext4-generalize-extents-status-tree-search-functions.patch
+x86-fpu-use-_alignof-to-avoid-undefined-behavior-in-type_align.patch
diff --git a/queue-4.19/x86-fpu-use-_alignof-to-avoid-undefined-behavior-in-type_align.patch b/queue-4.19/x86-fpu-use-_alignof-to-avoid-undefined-behavior-in-type_align.patch
new file mode 100644 (file)
index 0000000..dbf60bb
--- /dev/null
@@ -0,0 +1,63 @@
+From 56f2f15b0ea75dfe5ab5f304ab7c4972162e0ea1 Mon Sep 17 00:00:00 2001
+From: YingChi Long <me@inclyc.cn>
+Date: Fri, 18 Nov 2022 08:55:35 +0800
+Subject: x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN
+
+From: YingChi Long <me@inclyc.cn>
+
+commit 55228db2697c09abddcb9487c3d9fa5854a932cd upstream.
+
+WG14 N2350 specifies that it is an undefined behavior to have type
+definitions within offsetof", see
+
+  https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2350.htm
+
+This specification is also part of C23.
+
+Therefore, replace the TYPE_ALIGN macro with the _Alignof builtin to
+avoid undefined behavior. (_Alignof itself is C11 and the kernel is
+built with -gnu11).
+
+ISO C11 _Alignof is subtly different from the GNU C extension
+__alignof__. Latter is the preferred alignment and _Alignof the
+minimal alignment. For long long on x86 these are 8 and 4
+respectively.
+
+The macro TYPE_ALIGN's behavior matches _Alignof rather than
+__alignof__.
+
+  [ bp: Massage commit message. ]
+
+Signed-off-by: YingChi Long <me@inclyc.cn>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Link: https://lore.kernel.org/r/20220925153151.2467884-1-me@inclyc.cn
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/init.c |    7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -138,9 +138,6 @@ static void __init fpu__init_system_gene
+ unsigned int fpu_kernel_xstate_size;
+ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
+-/* Get alignment of the TYPE. */
+-#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
+-
+ /*
+  * Enforce that 'MEMBER' is the last field of 'TYPE'.
+  *
+@@ -148,8 +145,8 @@ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size
+  * because that's how C aligns structs.
+  */
+ #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+-      BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
+-                                         TYPE_ALIGN(TYPE)))
++      BUILD_BUG_ON(sizeof(TYPE) !=         \
++                   ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
+ /*
+  * We append the 'struct fpu' to the task_struct: