]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.5
authorSasha Levin <sashal@kernel.org>
Sat, 23 Sep 2023 12:16:07 +0000 (08:16 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 23 Sep 2023 12:16:07 +0000 (08:16 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
19 files changed:
queue-6.5/btrfs-improve-error-message-after-failure-to-add-del.patch [new file with mode: 0644]
queue-6.5/btrfs-remove-bug-after-failure-to-insert-delayed-dir.patch [new file with mode: 0644]
queue-6.5/ext4-do-not-let-fstrim-block-system-suspend.patch [new file with mode: 0644]
queue-6.5/ext4-move-setting-of-trimmed-bit-into-ext4_try_to_tr.patch [new file with mode: 0644]
queue-6.5/ext4-replace-the-traditional-ternary-conditional-ope.patch [new file with mode: 0644]
queue-6.5/gfs2-fix-another-freeze-thaw-hang.patch [new file with mode: 0644]
queue-6.5/media-v4l-use-correct-dependency-for-camera-sensor-d.patch [new file with mode: 0644]
queue-6.5/media-via-use-correct-dependency-for-camera-sensor-d.patch [new file with mode: 0644]
queue-6.5/netfs-only-call-folio_start_fscache-one-time-for-eac.patch [new file with mode: 0644]
queue-6.5/nfs-fix-error-handling-for-o_direct-write-scheduling.patch [new file with mode: 0644]
queue-6.5/nfs-fix-o_direct-locking-issues.patch [new file with mode: 0644]
queue-6.5/nfs-more-fixes-for-nfs_direct_write_reschedule_io.patch [new file with mode: 0644]
queue-6.5/nfs-more-o_direct-accounting-fixes-for-error-paths.patch [new file with mode: 0644]
queue-6.5/nfs-pnfs-report-einval-errors-from-connect-to-the-se.patch [new file with mode: 0644]
queue-6.5/nfs-use-the-correct-commit-info-in-nfs_join_page_gro.patch [new file with mode: 0644]
queue-6.5/nfsv4.1-fix-pnfs-mds-ds-session-trunking.patch [new file with mode: 0644]
queue-6.5/nfsv4.1-use-exchgid4_flag_use_pnfs_ds-for-ds-server.patch [new file with mode: 0644]
queue-6.5/series [new file with mode: 0644]
queue-6.5/sunrpc-mark-the-cred-for-revalidation-if-the-server-.patch [new file with mode: 0644]

diff --git a/queue-6.5/btrfs-improve-error-message-after-failure-to-add-del.patch b/queue-6.5/btrfs-improve-error-message-after-failure-to-add-del.patch
new file mode 100644 (file)
index 0000000..652b1bd
--- /dev/null
@@ -0,0 +1,54 @@
+From a8a34fc728b80c2adf2b7f92f03bbc0f4deed210 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Aug 2023 09:06:42 +0100
+Subject: btrfs: improve error message after failure to add delayed dir index
+ item
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 91bfe3104b8db0310f76f2dcb6aacef24c889366 ]
+
+If we fail to add a delayed dir index item because there's already another
+item with the same index number, we print an error message (and then BUG).
+However that message isn't very helpful to debug anything because we don't
+know what's the index number and what are the values of index counters in
+the inode and its delayed inode (index_cnt fields of struct btrfs_inode
+and struct btrfs_delayed_node).
+
+So update the error message to include the index number and counters.
+
+We actually had a recent case where this issue was hit by a syzbot report
+(see the link below).
+
+Link: https://lore.kernel.org/linux-btrfs/00000000000036e1290603e097e0@google.com/
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 2c58c3931ede ("btrfs: remove BUG() after failure to insert delayed dir index item")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-inode.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 0f147240ce9b8..1a050e48e0e57 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1511,9 +1511,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+       ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+       if (unlikely(ret)) {
+               btrfs_err(trans->fs_info,
+-                        "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+-                        name_len, name, delayed_node->root->root_key.objectid,
+-                        delayed_node->inode_id, ret);
++"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
++                        name_len, name, index, btrfs_root_id(delayed_node->root),
++                        delayed_node->inode_id, dir->index_cnt,
++                        delayed_node->index_cnt, ret);
+               BUG();
+       }
+       mutex_unlock(&delayed_node->mutex);
+-- 
+2.40.1
+
diff --git a/queue-6.5/btrfs-remove-bug-after-failure-to-insert-delayed-dir.patch b/queue-6.5/btrfs-remove-bug-after-failure-to-insert-delayed-dir.patch
new file mode 100644 (file)
index 0000000..a0ae4b3
--- /dev/null
@@ -0,0 +1,140 @@
+From 6f5873c15290d71b7a94140f2350b510a6c88e7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Aug 2023 09:06:43 +0100
+Subject: btrfs: remove BUG() after failure to insert delayed dir index item
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 2c58c3931ede7cd08cbecf1f1a4acaf0a04a41a9 ]
+
+Instead of calling BUG() when we fail to insert a delayed dir index item
+into the delayed node's tree, we can just release all the resources we
+have allocated/acquired before and return the error to the caller. This is
+fine because all existing call chains undo anything they have done before
+calling btrfs_insert_delayed_dir_index() or BUG_ON (when creating pending
+snapshots in the transaction commit path).
+
+So remove the BUG() call and do proper error handling.
+
+This relates to a syzbot report linked below, but does not fix it because
+it only prevents hitting a BUG(), it does not fix the issue where somehow
+we attempt to use twice the same index number for different index items.
+
+Link: https://lore.kernel.org/linux-btrfs/00000000000036e1290603e097e0@google.com/
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-inode.c | 74 +++++++++++++++++++++++++---------------
+ 1 file changed, 47 insertions(+), 27 deletions(-)
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 1a050e48e0e57..b5f684cb4e749 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1426,7 +1426,29 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
+       btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
+ }
+-/* Will return 0 or -ENOMEM */
++static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
++{
++      struct btrfs_fs_info *fs_info = trans->fs_info;
++      const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
++
++      if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
++              return;
++
++      /*
++       * Adding the new dir index item does not require touching another
++       * leaf, so we can release 1 unit of metadata that was previously
++       * reserved when starting the transaction. This applies only to
++       * the case where we had a transaction start and excludes the
++       * transaction join case (when replaying log trees).
++       */
++      trace_btrfs_space_reservation(fs_info, "transaction",
++                                    trans->transid, bytes, 0);
++      btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
++      ASSERT(trans->bytes_reserved >= bytes);
++      trans->bytes_reserved -= bytes;
++}
++
++/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
+ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  const char *name, int name_len,
+                                  struct btrfs_inode *dir,
+@@ -1468,6 +1490,27 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+       mutex_lock(&delayed_node->mutex);
++      /*
++       * First attempt to insert the delayed item. This is to make the error
++       * handling path simpler in case we fail (-EEXIST). There's no risk of
++       * any other task coming in and running the delayed item before we do
++       * the metadata space reservation below, because we are holding the
++       * delayed node's mutex and that mutex must also be locked before the
++       * node's delayed items can be run.
++       */
++      ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
++      if (unlikely(ret)) {
++              btrfs_err(trans->fs_info,
++"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
++                        name_len, name, index, btrfs_root_id(delayed_node->root),
++                        delayed_node->inode_id, dir->index_cnt,
++                        delayed_node->index_cnt, ret);
++              btrfs_release_delayed_item(delayed_item);
++              btrfs_release_dir_index_item_space(trans);
++              mutex_unlock(&delayed_node->mutex);
++              goto release_node;
++      }
++
+       if (delayed_node->index_item_leaves == 0 ||
+           delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
+               delayed_node->curr_index_batch_size = data_len;
+@@ -1485,37 +1528,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                * impossible.
+                */
+               if (WARN_ON(ret)) {
+-                      mutex_unlock(&delayed_node->mutex);
+                       btrfs_release_delayed_item(delayed_item);
++                      mutex_unlock(&delayed_node->mutex);
+                       goto release_node;
+               }
+               delayed_node->index_item_leaves++;
+-      } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
+-              const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+-
+-              /*
+-               * Adding the new dir index item does not require touching another
+-               * leaf, so we can release 1 unit of metadata that was previously
+-               * reserved when starting the transaction. This applies only to
+-               * the case where we had a transaction start and excludes the
+-               * transaction join case (when replaying log trees).
+-               */
+-              trace_btrfs_space_reservation(fs_info, "transaction",
+-                                            trans->transid, bytes, 0);
+-              btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+-              ASSERT(trans->bytes_reserved >= bytes);
+-              trans->bytes_reserved -= bytes;
+-      }
+-
+-      ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+-      if (unlikely(ret)) {
+-              btrfs_err(trans->fs_info,
+-"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
+-                        name_len, name, index, btrfs_root_id(delayed_node->root),
+-                        delayed_node->inode_id, dir->index_cnt,
+-                        delayed_node->index_cnt, ret);
+-              BUG();
++      } else {
++              btrfs_release_dir_index_item_space(trans);
+       }
+       mutex_unlock(&delayed_node->mutex);
+-- 
+2.40.1
+
diff --git a/queue-6.5/ext4-do-not-let-fstrim-block-system-suspend.patch b/queue-6.5/ext4-do-not-let-fstrim-block-system-suspend.patch
new file mode 100644 (file)
index 0000000..5691c9b
--- /dev/null
@@ -0,0 +1,76 @@
+From fccd1df1ea6603df9176017cddc7da3e63e5040e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 17:04:55 +0200
+Subject: ext4: do not let fstrim block system suspend
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 5229a658f6453362fbb9da6bf96872ef25a7097e ]
+
+Len Brown has reported that system suspend sometimes fail due to
+inability to freeze a task working in ext4_trim_fs() for one minute.
+Trimming a large filesystem on a disk that slowly processes discard
+requests can indeed take a long time. Since discard is just an advisory
+call, it is perfectly fine to interrupt it at any time and the return
+number of discarded blocks until that moment. Do that when we detect the
+task is being frozen.
+
+Cc: stable@kernel.org
+Reported-by: Len Brown <lenb@kernel.org>
+Suggested-by: Dave Chinner <david@fromorbit.com>
+References: https://bugzilla.kernel.org/show_bug.cgi?id=216322
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230913150504.9054-2-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 63dde4f5f984f..3711be697a0a5 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/nospec.h>
+ #include <linux/backing-dev.h>
++#include <linux/freezer.h>
+ #include <trace/events/ext4.h>
+ /*
+@@ -6930,6 +6931,11 @@ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+                                       EXT4_CLUSTER_BITS(sb);
+ }
++static bool ext4_trim_interrupted(void)
++{
++      return fatal_signal_pending(current) || freezing(current);
++}
++
+ static int ext4_try_to_trim_range(struct super_block *sb,
+               struct ext4_buddy *e4b, ext4_grpblk_t start,
+               ext4_grpblk_t max, ext4_grpblk_t minblocks)
+@@ -6963,8 +6969,8 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+               free_count += next - start;
+               start = next + 1;
+-              if (fatal_signal_pending(current))
+-                      return -ERESTARTSYS;
++              if (ext4_trim_interrupted())
++                      return count;
+               if (need_resched()) {
+                       ext4_unlock_group(sb, e4b->bd_group);
+@@ -7086,6 +7092,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+       end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+       for (group = first_group; group <= last_group; group++) {
++              if (ext4_trim_interrupted())
++                      break;
+               grp = ext4_get_group_info(sb, group);
+               if (!grp)
+                       continue;
+-- 
+2.40.1
+
diff --git a/queue-6.5/ext4-move-setting-of-trimmed-bit-into-ext4_try_to_tr.patch b/queue-6.5/ext4-move-setting-of-trimmed-bit-into-ext4_try_to_tr.patch
new file mode 100644 (file)
index 0000000..1676d95
--- /dev/null
@@ -0,0 +1,170 @@
+From 282b757eda706e771f54035d8ef37a790fd90b61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 17:04:54 +0200
+Subject: ext4: move setting of trimmed bit into ext4_try_to_trim_range()
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 45e4ab320c9b5fa67b1fc3b6a9b381cfcc0c8488 ]
+
+Currently we set the group's trimmed bit in ext4_trim_all_free() based
+on return value of ext4_try_to_trim_range(). However when we will want
+to abort trimming because of suspend attempt, we want to return success
+from ext4_try_to_trim_range() but not set the trimmed bit. Instead
+implementing awkward propagation of this information, just move setting
+of trimmed bit into ext4_try_to_trim_range() when the whole group is
+trimmed.
+
+Cc: stable@kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230913150504.9054-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 46 +++++++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 21 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7d81df6667b9a..63dde4f5f984f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6920,6 +6920,16 @@ __acquires(bitlock)
+       return ret;
+ }
++static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
++                                         ext4_group_t grp)
++{
++      if (grp < ext4_get_groups_count(sb))
++              return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
++      return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++              ext4_group_first_block_no(sb, grp) - 1) >>
++                                      EXT4_CLUSTER_BITS(sb);
++}
++
+ static int ext4_try_to_trim_range(struct super_block *sb,
+               struct ext4_buddy *e4b, ext4_grpblk_t start,
+               ext4_grpblk_t max, ext4_grpblk_t minblocks)
+@@ -6927,9 +6937,12 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
+ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ {
+       ext4_grpblk_t next, count, free_count;
++      bool set_trimmed = false;
+       void *bitmap;
+       bitmap = e4b->bd_bitmap;
++      if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
++              set_trimmed = true;
+       start = max(e4b->bd_info->bb_first_free, start);
+       count = 0;
+       free_count = 0;
+@@ -6944,16 +6957,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+                       int ret = ext4_trim_extent(sb, start, next - start, e4b);
+                       if (ret && ret != -EOPNOTSUPP)
+-                              break;
++                              return count;
+                       count += next - start;
+               }
+               free_count += next - start;
+               start = next + 1;
+-              if (fatal_signal_pending(current)) {
+-                      count = -ERESTARTSYS;
+-                      break;
+-              }
++              if (fatal_signal_pending(current))
++                      return -ERESTARTSYS;
+               if (need_resched()) {
+                       ext4_unlock_group(sb, e4b->bd_group);
+@@ -6965,6 +6976,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+                       break;
+       }
++      if (set_trimmed)
++              EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
++
+       return count;
+ }
+@@ -6975,7 +6989,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+  * @start:            first group block to examine
+  * @max:              last group block to examine
+  * @minblocks:                minimum extent block count
+- * @set_trimmed:      set the trimmed flag if at least one block is trimmed
+  *
+  * ext4_trim_all_free walks through group's block bitmap searching for free
+  * extents. When the free extent is found, mark it as used in group buddy
+@@ -6985,7 +6998,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ static ext4_grpblk_t
+ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+                  ext4_grpblk_t start, ext4_grpblk_t max,
+-                 ext4_grpblk_t minblocks, bool set_trimmed)
++                 ext4_grpblk_t minblocks)
+ {
+       struct ext4_buddy e4b;
+       int ret;
+@@ -7002,13 +7015,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+       ext4_lock_group(sb, group);
+       if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
+-          minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
++          minblocks < EXT4_SB(sb)->s_last_trim_minblks)
+               ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
+-              if (ret >= 0 && set_trimmed)
+-                      EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+-      } else {
++      else
+               ret = 0;
+-      }
+       ext4_unlock_group(sb, group);
+       ext4_mb_unload_buddy(&e4b);
+@@ -7041,7 +7051,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+       ext4_fsblk_t first_data_blk =
+                       le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+       ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
+-      bool whole_group, eof = false;
+       int ret = 0;
+       start = range->start >> sb->s_blocksize_bits;
+@@ -7060,10 +7069,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+               if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
+                       goto out;
+       }
+-      if (end >= max_blks - 1) {
++      if (end >= max_blks - 1)
+               end = max_blks - 1;
+-              eof = true;
+-      }
+       if (end <= first_data_blk)
+               goto out;
+       if (start < first_data_blk)
+@@ -7077,7 +7084,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+       /* end now represents the last cluster to discard in this group */
+       end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-      whole_group = true;
+       for (group = first_group; group <= last_group; group++) {
+               grp = ext4_get_group_info(sb, group);
+@@ -7096,13 +7102,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+                * change it for the last group, note that last_cluster is
+                * already computed earlier by ext4_get_group_no_and_offset()
+                */
+-              if (group == last_group) {
++              if (group == last_group)
+                       end = last_cluster;
+-                      whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-              }
+               if (grp->bb_free >= minlen) {
+                       cnt = ext4_trim_all_free(sb, group, first_cluster,
+-                                               end, minlen, whole_group);
++                                               end, minlen);
+                       if (cnt < 0) {
+                               ret = cnt;
+                               break;
+-- 
+2.40.1
+
diff --git a/queue-6.5/ext4-replace-the-traditional-ternary-conditional-ope.patch b/queue-6.5/ext4-replace-the-traditional-ternary-conditional-ope.patch
new file mode 100644 (file)
index 0000000..acab137
--- /dev/null
@@ -0,0 +1,49 @@
+From 89b9b2e853c2eae6f322393b852d532d005fac10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Aug 2023 22:32:00 +0800
+Subject: ext4: replace the traditional ternary conditional operator with with
+ max()/min()
+
+From: Kemeng Shi <shikemeng@huaweicloud.com>
+
+[ Upstream commit de8bf0e5ee7482585450357c6d4eddec8efc5cb7 ]
+
+Replace the traditional ternary conditional operator with with max()/min()
+
+Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Link: https://lore.kernel.org/r/20230801143204.2284343-7-shikemeng@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 45e4ab320c9b ("ext4: move setting of trimmed bit into ext4_try_to_trim_range()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index bd7557d8dec41..7d81df6667b9a 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6930,8 +6930,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+       void *bitmap;
+       bitmap = e4b->bd_bitmap;
+-      start = (e4b->bd_info->bb_first_free > start) ?
+-              e4b->bd_info->bb_first_free : start;
++      start = max(e4b->bd_info->bb_first_free, start);
+       count = 0;
+       free_count = 0;
+@@ -7148,8 +7147,7 @@ ext4_mballoc_query_range(
+       ext4_lock_group(sb, group);
+-      start = (e4b.bd_info->bb_first_free > start) ?
+-              e4b.bd_info->bb_first_free : start;
++      start = max(e4b.bd_info->bb_first_free, start);
+       if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
+               end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-- 
+2.40.1
+
diff --git a/queue-6.5/gfs2-fix-another-freeze-thaw-hang.patch b/queue-6.5/gfs2-fix-another-freeze-thaw-hang.patch
new file mode 100644 (file)
index 0000000..0089b90
--- /dev/null
@@ -0,0 +1,64 @@
+From 14be3766c9ec51e659d23a762306b09c5223ea89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Sep 2023 20:00:28 +0200
+Subject: gfs2: Fix another freeze/thaw hang
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+[ Upstream commit 52954b750958dcab9e44935f0c32643279091c85 ]
+
+On a thawed filesystem, the freeze glock is held in shared mode.  In
+order to initiate a cluster-wide freeze, the node initiating the freeze
+drops the freeze glock and grabs it in exclusive mode.  The other nodes
+recognize this as contention on the freeze glock; function
+freeze_go_callback is invoked.  This indicates to them that they must
+freeze the filesystem locally, drop the freeze glock, and then
+re-acquire it in shared mode before being able to unfreeze the
+filesystem locally.
+
+While a node is trying to re-acquire the freeze glock in shared mode,
+additional contention can occur.  In that case, the node must behave in
+the same way as above.
+
+Unfortunately, freeze_go_callback() contains a check that causes it to
+bail out when the freeze glock isn't held in shared mode.  Fix that to
+allow the glock to be unlocked or held in shared mode.
+
+In addition, update a reference to trylock_super() which has been
+renamed to super_trylock_shared() in the meantime.
+
+Fixes: b77b4a4815a9 ("gfs2: Rework freeze / thaw logic")
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/glops.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 54319328b16b5..0a3b069386ec9 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -567,15 +567,16 @@ static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
+       struct super_block *sb = sdp->sd_vfs;
+       if (!remote ||
+-          gl->gl_state != LM_ST_SHARED ||
++          (gl->gl_state != LM_ST_SHARED &&
++           gl->gl_state != LM_ST_UNLOCKED) ||
+           gl->gl_demote_state != LM_ST_UNLOCKED)
+               return;
+       /*
+        * Try to get an active super block reference to prevent racing with
+-       * unmount (see trylock_super()).  But note that unmount isn't the only
+-       * place where a write lock on s_umount is taken, and we can fail here
+-       * because of things like remount as well.
++       * unmount (see super_trylock_shared()).  But note that unmount isn't
++       * the only place where a write lock on s_umount is taken, and we can
++       * fail here because of things like remount as well.
+        */
+       if (down_read_trylock(&sb->s_umount)) {
+               atomic_inc(&sb->s_active);
+-- 
+2.40.1
+
diff --git a/queue-6.5/media-v4l-use-correct-dependency-for-camera-sensor-d.patch b/queue-6.5/media-v4l-use-correct-dependency-for-camera-sensor-d.patch
new file mode 100644 (file)
index 0000000..fa46c80
--- /dev/null
@@ -0,0 +1,81 @@
+From f16bfda9a2d937ade59715395335fd02469c72a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 12:51:49 +0300
+Subject: media: v4l: Use correct dependency for camera sensor drivers
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit 86e16b87afac20779da1228d690a95c54d7e2ad0 ]
+
+The Kconfig option that enables compiling camera sensor drivers is
+VIDEO_CAMERA_SENSOR rather than MEDIA_CAMERA_SUPPORT as it was previously.
+Fix this.
+
+Also select VIDEO_OV7670 for marvell platform drivers only if
+MEDIA_SUBDRV_AUTOSELECT and VIDEO_CAMERA_SENSOR are enabled.
+
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Fixes: 7d3c7d2a2914 ("media: i2c: Add a camera sensor top level menu")
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/marvell/Kconfig | 4 ++--
+ drivers/media/usb/em28xx/Kconfig       | 4 ++--
+ drivers/media/usb/go7007/Kconfig       | 2 +-
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/media/platform/marvell/Kconfig b/drivers/media/platform/marvell/Kconfig
+index ec1a16734a280..d6499ffe30e8b 100644
+--- a/drivers/media/platform/marvell/Kconfig
++++ b/drivers/media/platform/marvell/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_CAFE_CCIC
+       depends on V4L_PLATFORM_DRIVERS
+       depends on PCI && I2C && VIDEO_DEV
+       depends on COMMON_CLK
+-      select VIDEO_OV7670
++      select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+       select VIDEOBUF2_VMALLOC
+       select VIDEOBUF2_DMA_CONTIG
+       select VIDEOBUF2_DMA_SG
+@@ -22,7 +22,7 @@ config VIDEO_MMP_CAMERA
+       depends on I2C && VIDEO_DEV
+       depends on ARCH_MMP || COMPILE_TEST
+       depends on COMMON_CLK
+-      select VIDEO_OV7670
++      select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+       select I2C_GPIO
+       select VIDEOBUF2_VMALLOC
+       select VIDEOBUF2_DMA_CONTIG
+diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
+index b3c472b8c5a96..cb61fd6cc6c61 100644
+--- a/drivers/media/usb/em28xx/Kconfig
++++ b/drivers/media/usb/em28xx/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_EM28XX_V4L2
+       select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+       select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
+       select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
+-      select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+-      select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
++      select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
++      select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+       help
+         This is a video4linux driver for Empia 28xx based TV cards.
+diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
+index 4ff79940ad8d4..b2a15d9fb1f33 100644
+--- a/drivers/media/usb/go7007/Kconfig
++++ b/drivers/media/usb/go7007/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_GO7007
+       select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
+       select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
+       select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
+-      select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+       select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
++      select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+       help
+         This is a video4linux driver for the WIS GO7007 MPEG
+         encoder chip.
+-- 
+2.40.1
+
diff --git a/queue-6.5/media-via-use-correct-dependency-for-camera-sensor-d.patch b/queue-6.5/media-via-use-correct-dependency-for-camera-sensor-d.patch
new file mode 100644 (file)
index 0000000..a59dd99
--- /dev/null
@@ -0,0 +1,39 @@
+From f143a802f86aef4c1e4da59a8dd21ffc8b6c9ce5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 11:10:34 +0300
+Subject: media: via: Use correct dependency for camera sensor drivers
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit 41425941dfcf47cc6df8e500af6ff16a7be6539f ]
+
+The via camera controller driver selected ov7670 driver, however now that
+driver has dependencies and may no longer be selected unconditionally.
+
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Fixes: 7d3c7d2a2914 ("media: i2c: Add a camera sensor top level menu")
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Acked-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/via/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/media/platform/via/Kconfig b/drivers/media/platform/via/Kconfig
+index 8926eb0803b27..6e603c0382487 100644
+--- a/drivers/media/platform/via/Kconfig
++++ b/drivers/media/platform/via/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_VIA_CAMERA
+       depends on V4L_PLATFORM_DRIVERS
+       depends on FB_VIA && VIDEO_DEV
+       select VIDEOBUF2_DMA_SG
+-      select VIDEO_OV7670
++      select VIDEO_OV7670 if VIDEO_CAMERA_SENSOR
+       help
+          Driver support for the integrated camera controller in VIA
+          Chrome9 chipsets.  Currently only tested on OLPC xo-1.5 systems
+-- 
+2.40.1
+
diff --git a/queue-6.5/netfs-only-call-folio_start_fscache-one-time-for-eac.patch b/queue-6.5/netfs-only-call-folio_start_fscache-one-time-for-eac.patch
new file mode 100644 (file)
index 0000000..7220f56
--- /dev/null
@@ -0,0 +1,95 @@
+From 54594168e4cf46ae6c56f1eed8e1a5c86e47502e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 14:17:11 +0100
+Subject: netfs: Only call folio_start_fscache() one time for each folio
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+[ Upstream commit df1c357f25d808e30b216188330e708e09e1a412 ]
+
+If a network filesystem using netfs implements a clamp_length()
+function, it can set subrequest lengths smaller than a page size.
+
+When we loop through the folios in netfs_rreq_unlock_folios() to
+set any folios to be written back, we need to make sure we only
+call folio_start_fscache() once for each folio.
+
+Otherwise, this simple testcase:
+
+  mount -o fsc,rsize=1024,wsize=1024 127.0.0.1:/export /mnt/nfs
+  dd if=/dev/zero of=/mnt/nfs/file.bin bs=4096 count=1
+  1+0 records in
+  1+0 records out
+  4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0126359 s, 324 kB/s
+  echo 3 > /proc/sys/vm/drop_caches
+  cat /mnt/nfs/file.bin > /dev/null
+
+will trigger an oops similar to the following:
+
+  page dumped because: VM_BUG_ON_FOLIO(folio_test_private_2(folio))
+  ------------[ cut here ]------------
+  kernel BUG at include/linux/netfs.h:44!
+  ...
+  CPU: 5 PID: 134 Comm: kworker/u16:5 Kdump: loaded Not tainted 6.4.0-rc5
+  ...
+  RIP: 0010:netfs_rreq_unlock_folios+0x68e/0x730 [netfs]
+  ...
+  Call Trace:
+    netfs_rreq_assess+0x497/0x660 [netfs]
+    netfs_subreq_terminated+0x32b/0x610 [netfs]
+    nfs_netfs_read_completion+0x14e/0x1a0 [nfs]
+    nfs_read_completion+0x2f9/0x330 [nfs]
+    rpc_free_task+0x72/0xa0 [sunrpc]
+    rpc_async_release+0x46/0x70 [sunrpc]
+    process_one_work+0x3bd/0x710
+    worker_thread+0x89/0x610
+    kthread+0x181/0x1c0
+    ret_from_fork+0x29/0x50
+
+Fixes: 3d3c95046742 ("netfs: Provide readahead and readpage netfs helpers"
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2210612
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20230608214137.856006-1-dwysocha@redhat.com/ # v1
+Link: https://lore.kernel.org/r/20230915185704.1082982-1-dwysocha@redhat.com/ # v2
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/buffered_read.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 3404707ddbe73..2cd3ccf4c4399 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -47,12 +47,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+       xas_for_each(&xas, folio, last_page) {
+               loff_t pg_end;
+               bool pg_failed = false;
++              bool folio_started;
+               if (xas_retry(&xas, folio))
+                       continue;
+               pg_end = folio_pos(folio) + folio_size(folio) - 1;
++              folio_started = false;
+               for (;;) {
+                       loff_t sreq_end;
+@@ -60,8 +62,10 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+                               pg_failed = true;
+                               break;
+                       }
+-                      if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
++                      if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+                               folio_start_fscache(folio);
++                              folio_started = true;
++                      }
+                       pg_failed |= subreq_failed;
+                       sreq_end = subreq->start + subreq->len - 1;
+                       if (pg_end < sreq_end)
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-fix-error-handling-for-o_direct-write-scheduling.patch b/queue-6.5/nfs-fix-error-handling-for-o_direct-write-scheduling.patch
new file mode 100644 (file)
index 0000000..ad6457b
--- /dev/null
@@ -0,0 +1,145 @@
+From 2eb6f426a58cb209a3f528fb8fc4a6446d6eaeeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:34:37 -0400
+Subject: NFS: Fix error handling for O_DIRECT write scheduling
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 954998b60caa8f2a3bf3abe490de6f08d283687a ]
+
+If we fail to schedule a request for transmission, there are 2
+possibilities:
+1) Either we hit a fatal error, and we just want to drop the remaining
+   requests on the floor.
+2) We were asked to try again, in which case we should allow the
+   outstanding RPC calls to complete, so that we can recoalesce requests
+   and try again.
+
+Fixes: d600ad1f2bdb ("NFS41: pop some layoutget errors to application")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c | 62 ++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 16 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 47d892a1d363d..ee88f0a6e7b81 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -528,10 +528,9 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
+ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ {
+       struct nfs_pageio_descriptor desc;
+-      struct nfs_page *req, *tmp;
++      struct nfs_page *req;
+       LIST_HEAD(reqs);
+       struct nfs_commit_info cinfo;
+-      LIST_HEAD(failed);
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+       nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+@@ -549,27 +548,36 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
+-      list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
++      while (!list_empty(&reqs)) {
++              req = nfs_list_entry(reqs.next);
+               /* Bump the transmission count */
+               req->wb_nio++;
+               if (!nfs_pageio_add_request(&desc, req)) {
+-                      nfs_list_move_request(req, &failed);
+                       spin_lock(&cinfo.inode->i_lock);
+-                      dreq->flags = 0;
+-                      if (desc.pg_error < 0)
++                      if (dreq->error < 0) {
++                              desc.pg_error = dreq->error;
++                      } else if (desc.pg_error != -EAGAIN) {
++                              dreq->flags = 0;
++                              if (!desc.pg_error)
++                                      desc.pg_error = -EIO;
+                               dreq->error = desc.pg_error;
+-                      else
+-                              dreq->error = -EIO;
++                      } else
++                              dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+                       spin_unlock(&cinfo.inode->i_lock);
++                      break;
+               }
+               nfs_release_request(req);
+       }
+       nfs_pageio_complete(&desc);
+-      while (!list_empty(&failed)) {
+-              req = nfs_list_entry(failed.next);
++      while (!list_empty(&reqs)) {
++              req = nfs_list_entry(reqs.next);
+               nfs_list_remove_request(req);
+               nfs_unlock_and_release_request(req);
++              if (desc.pg_error == -EAGAIN)
++                      nfs_mark_request_commit(req, NULL, &cinfo, 0);
++              else
++                      nfs_release_request(req);
+       }
+       if (put_dreq(dreq))
+@@ -794,9 +802,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ {
+       struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
++      struct nfs_commit_info cinfo;
+       ssize_t result = 0;
+       size_t requested_bytes = 0;
+       size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
++      bool defer = false;
+       trace_nfs_direct_write_schedule_iovec(dreq);
+@@ -837,17 +847,37 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+                               break;
+                       }
+-                      nfs_lock_request(req);
+-                      if (!nfs_pageio_add_request(&desc, req)) {
+-                              result = desc.pg_error;
+-                              nfs_unlock_and_release_request(req);
+-                              break;
+-                      }
+                       pgbase = 0;
+                       bytes -= req_len;
+                       requested_bytes += req_len;
+                       pos += req_len;
+                       dreq->bytes_left -= req_len;
++
++                      if (defer) {
++                              nfs_mark_request_commit(req, NULL, &cinfo, 0);
++                              continue;
++                      }
++
++                      nfs_lock_request(req);
++                      if (nfs_pageio_add_request(&desc, req))
++                              continue;
++
++                      /* Exit on hard errors */
++                      if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
++                              result = desc.pg_error;
++                              nfs_unlock_and_release_request(req);
++                              break;
++                      }
++
++                      /* If the error is soft, defer remaining requests */
++                      nfs_init_cinfo_from_dreq(&cinfo, dreq);
++                      spin_lock(&cinfo.inode->i_lock);
++                      dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++                      spin_unlock(&cinfo.inode->i_lock);
++                      nfs_unlock_request(req);
++                      nfs_mark_request_commit(req, NULL, &cinfo, 0);
++                      desc.pg_error = 0;
++                      defer = true;
+               }
+               nfs_direct_release_pages(pagevec, npages);
+               kvfree(pagevec);
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-fix-o_direct-locking-issues.patch b/queue-6.5/nfs-fix-o_direct-locking-issues.patch
new file mode 100644 (file)
index 0000000..07c36f2
--- /dev/null
@@ -0,0 +1,56 @@
+From b1af4196d833606a2129866c94cfd7619aa3a837 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:34:38 -0400
+Subject: NFS: Fix O_DIRECT locking issues
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 7c6339322ce0c6128acbe36aacc1eeb986dd7bf1 ]
+
+The dreq fields are protected by the dreq->lock.
+
+Fixes: 954998b60caa ("NFS: Fix error handling for O_DIRECT write scheduling")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index ee88f0a6e7b81..e8a1645857dd6 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -553,7 +553,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+               /* Bump the transmission count */
+               req->wb_nio++;
+               if (!nfs_pageio_add_request(&desc, req)) {
+-                      spin_lock(&cinfo.inode->i_lock);
++                      spin_lock(&dreq->lock);
+                       if (dreq->error < 0) {
+                               desc.pg_error = dreq->error;
+                       } else if (desc.pg_error != -EAGAIN) {
+@@ -563,7 +563,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+                               dreq->error = desc.pg_error;
+                       } else
+                               dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-                      spin_unlock(&cinfo.inode->i_lock);
++                      spin_unlock(&dreq->lock);
+                       break;
+               }
+               nfs_release_request(req);
+@@ -871,9 +871,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+                       /* If the error is soft, defer remaining requests */
+                       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+-                      spin_lock(&cinfo.inode->i_lock);
++                      spin_lock(&dreq->lock);
+                       dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-                      spin_unlock(&cinfo.inode->i_lock);
++                      spin_unlock(&dreq->lock);
+                       nfs_unlock_request(req);
+                       nfs_mark_request_commit(req, NULL, &cinfo, 0);
+                       desc.pg_error = 0;
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-more-fixes-for-nfs_direct_write_reschedule_io.patch b/queue-6.5/nfs-more-fixes-for-nfs_direct_write_reschedule_io.patch
new file mode 100644 (file)
index 0000000..f596cb9
--- /dev/null
@@ -0,0 +1,57 @@
+From 6f28615070aadca753f1a2ae7a7bb56b9853de5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:34:41 -0400
+Subject: NFS: More fixes for nfs_direct_write_reschedule_io()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit b11243f720ee5f9376861099019c8542969b6318 ]
+
+Ensure that all requests are put back onto the commit list so that they
+can be rescheduled.
+
+Fixes: 4daaeba93822 ("NFS: Fix nfs_direct_write_reschedule_io()")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 3391c8b97da5e..f6c74f4246917 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -780,18 +780,23 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
+ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
+ {
+       struct nfs_direct_req *dreq = hdr->dreq;
++      struct nfs_page *req;
++      struct nfs_commit_info cinfo;
+       trace_nfs_direct_write_reschedule_io(dreq);
++      nfs_init_cinfo_from_dreq(&cinfo, dreq);
+       spin_lock(&dreq->lock);
+-      if (dreq->error == 0) {
++      if (dreq->error == 0)
+               dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-              /* fake unstable write to let common nfs resend pages */
+-              hdr->verf.committed = NFS_UNSTABLE;
+-              hdr->good_bytes = hdr->args.offset + hdr->args.count -
+-                      hdr->io_start;
+-      }
++      set_bit(NFS_IOHDR_REDO, &hdr->flags);
+       spin_unlock(&dreq->lock);
++      while (!list_empty(&hdr->pages)) {
++              req = nfs_list_entry(hdr->pages.next);
++              nfs_list_remove_request(req);
++              nfs_unlock_request(req);
++              nfs_mark_request_commit(req, NULL, &cinfo, 0);
++      }
+ }
+ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-more-o_direct-accounting-fixes-for-error-paths.patch b/queue-6.5/nfs-more-o_direct-accounting-fixes-for-error-paths.patch
new file mode 100644 (file)
index 0000000..2ac9442
--- /dev/null
@@ -0,0 +1,140 @@
+From 3a0bca0ba4312ad1e169cc84079e4dbcbfd12438 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:34:39 -0400
+Subject: NFS: More O_DIRECT accounting fixes for error paths
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 8982f7aff39fb526aba4441fff2525fcedd5e1a3 ]
+
+If we hit a fatal error when retransmitting, we do need to record the
+removal of the request from the count of written bytes.
+
+Fixes: 031d73ed768a ("NFS: Fix O_DIRECT accounting of number of bytes read/written")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c | 47 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 31 insertions(+), 16 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index e8a1645857dd6..a53e501234993 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -93,12 +93,10 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+               dreq->max_count = dreq_len;
+               if (dreq->count > dreq_len)
+                       dreq->count = dreq_len;
+-
+-              if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+-                      dreq->error = hdr->error;
+-              else /* Clear outstanding error if this is EOF */
+-                      dreq->error = 0;
+       }
++
++      if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
++              dreq->error = hdr->error;
+ }
+ static void
+@@ -120,6 +118,18 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+               dreq->count = dreq_len;
+ }
++static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
++                                      struct nfs_page *req)
++{
++      loff_t offs = req_offset(req);
++      size_t req_start = (size_t)(offs - dreq->io_start);
++
++      if (req_start < dreq->max_count)
++              dreq->max_count = req_start;
++      if (req_start < dreq->count)
++              dreq->count = req_start;
++}
++
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -537,10 +547,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+       nfs_direct_join_group(&reqs, dreq->inode);
+-      dreq->count = 0;
+-      dreq->max_count = 0;
+-      list_for_each_entry(req, &reqs, wb_list)
+-              dreq->max_count += req->wb_bytes;
+       nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
+       get_dreq(dreq);
+@@ -574,10 +580,14 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+               req = nfs_list_entry(reqs.next);
+               nfs_list_remove_request(req);
+               nfs_unlock_and_release_request(req);
+-              if (desc.pg_error == -EAGAIN)
++              if (desc.pg_error == -EAGAIN) {
+                       nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-              else
++              } else {
++                      spin_lock(&dreq->lock);
++                      nfs_direct_truncate_request(dreq, req);
++                      spin_unlock(&dreq->lock);
+                       nfs_release_request(req);
++              }
+       }
+       if (put_dreq(dreq))
+@@ -597,8 +607,6 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+       if (status < 0) {
+               /* Errors in commit are fatal */
+               dreq->error = status;
+-              dreq->max_count = 0;
+-              dreq->count = 0;
+               dreq->flags = NFS_ODIRECT_DONE;
+       } else {
+               status = dreq->error;
+@@ -609,7 +617,12 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+       while (!list_empty(&data->pages)) {
+               req = nfs_list_entry(data->pages.next);
+               nfs_list_remove_request(req);
+-              if (status >= 0 && !nfs_write_match_verf(verf, req)) {
++              if (status < 0) {
++                      spin_lock(&dreq->lock);
++                      nfs_direct_truncate_request(dreq, req);
++                      spin_unlock(&dreq->lock);
++                      nfs_release_request(req);
++              } else if (!nfs_write_match_verf(verf, req)) {
+                       dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+                       /*
+                        * Despite the reboot, the write was successful,
+@@ -617,7 +630,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+                        */
+                       req->wb_nio = 0;
+                       nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-              } else /* Error or match */
++              } else
+                       nfs_release_request(req);
+               nfs_unlock_and_release_request(req);
+       }
+@@ -670,6 +683,7 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+       while (!list_empty(&reqs)) {
+               req = nfs_list_entry(reqs.next);
+               nfs_list_remove_request(req);
++              nfs_direct_truncate_request(dreq, req);
+               nfs_release_request(req);
+               nfs_unlock_and_release_request(req);
+       }
+@@ -719,7 +733,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+       }
+       nfs_direct_count_bytes(dreq, hdr);
+-      if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
++      if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
++          !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+               if (!dreq->flags)
+                       dreq->flags = NFS_ODIRECT_DO_COMMIT;
+               flags = dreq->flags;
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-pnfs-report-einval-errors-from-connect-to-the-se.patch b/queue-6.5/nfs-pnfs-report-einval-errors-from-connect-to-the-se.patch
new file mode 100644 (file)
index 0000000..d437471
--- /dev/null
@@ -0,0 +1,36 @@
+From 65c0c020ecb82bfa5f12973282dbe70c0d8ccc9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:43:58 -0400
+Subject: NFS/pNFS: Report EINVAL errors from connect() to the server
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit dd7d7ee3ba2a70d12d02defb478790cf57d5b87b ]
+
+With IPv6, connect() can occasionally return EINVAL if a route is
+unavailable. If this happens during I/O to a data server, we want to
+report it using LAYOUTERROR as an inability to connect.
+
+Fixes: dd52128afdde ("NFSv4.1/pnfs Ensure flexfiles reports all connection related errors")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 7deb3cd76abe4..a1dc338649062 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1235,6 +1235,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+               case -EPFNOSUPPORT:
+               case -EPROTONOSUPPORT:
+               case -EOPNOTSUPP:
++              case -EINVAL:
+               case -ECONNREFUSED:
+               case -ECONNRESET:
+               case -EHOSTDOWN:
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfs-use-the-correct-commit-info-in-nfs_join_page_gro.patch b/queue-6.5/nfs-use-the-correct-commit-info-in-nfs_join_page_gro.patch
new file mode 100644 (file)
index 0000000..66f69bc
--- /dev/null
@@ -0,0 +1,150 @@
+From 3406232bd0f170a8581f36a8a68807859eb878ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:34:40 -0400
+Subject: NFS: Use the correct commit info in nfs_join_page_group()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit b193a78ddb5ee7dba074d3f28dc050069ba083c0 ]
+
+Ensure that nfs_clear_request_commit() updates the correct counters when
+it removes them from the commit list.
+
+Fixes: ed5d588fe47f ("NFS: Try to join page groups before an O_DIRECT retransmission")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c          |  8 +++++---
+ fs/nfs/write.c           | 23 ++++++++++++-----------
+ include/linux/nfs_page.h |  4 +++-
+ 3 files changed, 20 insertions(+), 15 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index a53e501234993..3391c8b97da5e 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -498,7 +498,9 @@ static void nfs_direct_add_page_head(struct list_head *list,
+       kref_get(&head->wb_kref);
+ }
+-static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
++static void nfs_direct_join_group(struct list_head *list,
++                                struct nfs_commit_info *cinfo,
++                                struct inode *inode)
+ {
+       struct nfs_page *req, *subreq;
+@@ -520,7 +522,7 @@ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+                               nfs_release_request(subreq);
+                       }
+               } while ((subreq = subreq->wb_this_page) != req);
+-              nfs_join_page_group(req, inode);
++              nfs_join_page_group(req, cinfo, inode);
+       }
+ }
+@@ -545,7 +547,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+       nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+-      nfs_direct_join_group(&reqs, dreq->inode);
++      nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
+       nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
+       get_dreq(dreq);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index f4cca8f00c0c2..8c1ee1a1a28f1 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -59,7 +59,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+ static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
+ static const struct nfs_rw_ops nfs_rw_write_ops;
+ static void nfs_inode_remove_request(struct nfs_page *req);
+-static void nfs_clear_request_commit(struct nfs_page *req);
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++                                   struct nfs_page *req);
+ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+                                     struct inode *inode);
+ static struct nfs_page *
+@@ -502,8 +503,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+  * the (former) group.  All subrequests are removed from any write or commit
+  * lists, unlinked from the group and destroyed.
+  */
+-void
+-nfs_join_page_group(struct nfs_page *head, struct inode *inode)
++void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
++                       struct inode *inode)
+ {
+       struct nfs_page *subreq;
+       struct nfs_page *destroy_list = NULL;
+@@ -533,7 +534,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+        * Commit list removal accounting is done after locks are dropped */
+       subreq = head;
+       do {
+-              nfs_clear_request_commit(subreq);
++              nfs_clear_request_commit(cinfo, subreq);
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+@@ -566,8 +567,10 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ {
+       struct inode *inode = folio_file_mapping(folio)->host;
+       struct nfs_page *head;
++      struct nfs_commit_info cinfo;
+       int ret;
++      nfs_init_cinfo_from_inode(&cinfo, inode);
+       /*
+        * A reference is taken only on the head request which acts as a
+        * reference to the whole page group - the group will not be destroyed
+@@ -584,7 +587,7 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+               return ERR_PTR(ret);
+       }
+-      nfs_join_page_group(head, inode);
++      nfs_join_page_group(head, &cinfo, inode);
+       return head;
+ }
+@@ -955,18 +958,16 @@ static void nfs_folio_clear_commit(struct folio *folio)
+ }
+ /* Called holding the request lock on @req */
+-static void
+-nfs_clear_request_commit(struct nfs_page *req)
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++                                   struct nfs_page *req)
+ {
+       if (test_bit(PG_CLEAN, &req->wb_flags)) {
+               struct nfs_open_context *ctx = nfs_req_openctx(req);
+               struct inode *inode = d_inode(ctx->dentry);
+-              struct nfs_commit_info cinfo;
+-              nfs_init_cinfo_from_inode(&cinfo, inode);
+               mutex_lock(&NFS_I(inode)->commit_mutex);
+-              if (!pnfs_clear_request_commit(req, &cinfo)) {
+-                      nfs_request_remove_commit_list(req, &cinfo);
++              if (!pnfs_clear_request_commit(req, cinfo)) {
++                      nfs_request_remove_commit_list(req, cinfo);
+               }
+               mutex_unlock(&NFS_I(inode)->commit_mutex);
+               nfs_folio_clear_commit(nfs_page_to_folio(req));
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index aa9f4c6ebe261..1c315f854ea80 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -157,7 +157,9 @@ extern     void nfs_unlock_request(struct nfs_page *req);
+ extern        void nfs_unlock_and_release_request(struct nfs_page *);
+ extern        struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+ extern        int nfs_page_group_lock_subrequests(struct nfs_page *head);
+-extern        void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
++extern void nfs_join_page_group(struct nfs_page *head,
++                              struct nfs_commit_info *cinfo,
++                              struct inode *inode);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfsv4.1-fix-pnfs-mds-ds-session-trunking.patch b/queue-6.5/nfsv4.1-fix-pnfs-mds-ds-session-trunking.patch
new file mode 100644 (file)
index 0000000..e025e23
--- /dev/null
@@ -0,0 +1,134 @@
+From 51033819a93169320cabef5e55d41947a1a83042 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Aug 2023 15:29:34 -0400
+Subject: NFSv4.1: fix pnfs MDS=DS session trunking
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+[ Upstream commit 806a3bc421a115fbb287c1efce63a48c54ee804b ]
+
+Currently, when GETDEVICEINFO returns multiple locations where each
+is a different IP but the server's identity is same as MDS, then
+nfs4_set_ds_client() finds the existing nfs_client structure which
+has the MDS's max_connect value (and if it's 1), then the 1st IP
+on the DS's list will get dropped due to MDS trunking rules. Other
+IPs would be added as they fall under the pnfs trunking rules.
+
+For the list of IPs the 1st goes thru calling nfs4_set_ds_client()
+which will eventually call nfs4_add_trunk() and call into
+rpc_clnt_test_and_add_xprt() which has the check for MDS trunking.
+The other IPs (after the 1st one), would call rpc_clnt_add_xprt()
+which doesn't go thru that check.
+
+nfs4_add_trunk() is called when MDS trunking is happening and it
+needs to enforce the usage of max_connect mount option of the
+1st mount. However, this shouldn't be applied to pnfs flow.
+
+Instead, this patch proposed to treat MDS=DS as DS trunking and
+make sure that MDS's max_connect limit does not apply to the
+1st IP returned in the GETDEVICEINFO list. It does so by
+marking the newly created client with a new flag NFS_CS_PNFS
+which then used to pass max_connect value to use into the
+rpc_clnt_test_and_add_xprt() instead of the existing rpc
+client's max_connect value set by the MDS connection.
+
+For example, mount was done without max_connect value set
+so MDS's rpc client has cl_max_connect=1. Upon calling into
+rpc_clnt_test_and_add_xprt() and using rpc client's value,
+the caller passes in max_connect value which is previously
+been set in the pnfs path (as a part of handling
+GETDEVICEINFO list of IPs) in nfs4_set_ds_client().
+
+However, when NFS_CS_PNFS flag is not set and we know we
+are doing MDS trunking, comparing a new IP of the same
+server, we then set the max_connect value to the
+existing MDS's value and pass that into
+rpc_clnt_test_and_add_xprt().
+
+Fixes: dc48e0abee24 ("SUNRPC enforce creation of no more than max_connect xprts")
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4client.c       |  6 +++++-
+ include/linux/nfs_fs_sb.h |  1 +
+ net/sunrpc/clnt.c         | 11 +++++++----
+ 3 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 27fb25567ce75..11e3a285594c2 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -417,6 +417,8 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+               .net = old->cl_net,
+               .servername = old->cl_hostname,
+       };
++      int max_connect = test_bit(NFS_CS_PNFS, &clp->cl_flags) ?
++              clp->cl_max_connect : old->cl_max_connect;
+       if (clp->cl_proto != old->cl_proto)
+               return;
+@@ -430,7 +432,7 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+       xprt_args.addrlen = clp_salen;
+       rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
+-                        rpc_clnt_test_and_add_xprt, NULL);
++                        rpc_clnt_test_and_add_xprt, &max_connect);
+ }
+ /**
+@@ -1010,6 +1012,8 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+               __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+       __set_bit(NFS_CS_DS, &cl_init.init_flags);
++      __set_bit(NFS_CS_PNFS, &cl_init.init_flags);
++      cl_init.max_connect = NFS_MAX_TRANSPORTS;
+       /*
+        * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+        * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 20eeba8b009df..cd628c4b011e5 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -48,6 +48,7 @@ struct nfs_client {
+ #define NFS_CS_NOPING         6               /* - don't ping on connect */
+ #define NFS_CS_DS             7               /* - Server is a DS */
+ #define NFS_CS_REUSEPORT      8               /* - reuse src port on reconnect */
++#define NFS_CS_PNFS           9               /* - Server used for pnfs */
+       struct sockaddr_storage cl_addr;        /* server identifier */
+       size_t                  cl_addrlen;
+       char *                  cl_hostname;    /* hostname of server */
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index c8ee7be4c631c..62a09e51e4316 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2905,19 +2905,22 @@ static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
+  * @clnt: pointer to struct rpc_clnt
+  * @xps: pointer to struct rpc_xprt_switch,
+  * @xprt: pointer struct rpc_xprt
+- * @dummy: unused
++ * @in_max_connect: pointer to the max_connect value for the passed in xprt transport
+  */
+ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
+               struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
+-              void *dummy)
++              void *in_max_connect)
+ {
+       struct rpc_cb_add_xprt_calldata *data;
+       struct rpc_task *task;
++      int max_connect = clnt->cl_max_connect;
+-      if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) {
++      if (in_max_connect)
++              max_connect = *(int *)in_max_connect;
++      if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) {
+               rcu_read_lock();
+               pr_warn("SUNRPC: reached max allowed number (%d) did not add "
+-                      "transport to server: %s\n", clnt->cl_max_connect,
++                      "transport to server: %s\n", max_connect,
+                       rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
+               rcu_read_unlock();
+               return -EINVAL;
+-- 
+2.40.1
+
diff --git a/queue-6.5/nfsv4.1-use-exchgid4_flag_use_pnfs_ds-for-ds-server.patch b/queue-6.5/nfsv4.1-use-exchgid4_flag_use_pnfs_ds-for-ds-server.patch
new file mode 100644 (file)
index 0000000..1603266
--- /dev/null
@@ -0,0 +1,68 @@
+From 68734a9ff2ba9dba99a25c26225d04e38256822a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Jul 2023 13:02:38 -0400
+Subject: NFSv4.1: use EXCHGID4_FLAG_USE_PNFS_DS for DS server
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+[ Upstream commit 51d674a5e4889f1c8e223ac131cf218e1631e423 ]
+
+After receiving the location(s) of the DS server(s) in the
+GETDEVINCEINFO, create the request for the clientid to such
+server and indicate that the client is connecting to a DS.
+
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Stable-dep-of: 806a3bc421a1 ("NFSv4.1: fix pnfs MDS=DS session trunking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4client.c | 3 +++
+ fs/nfs/nfs4proc.c   | 4 ++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index d9114a754db73..27fb25567ce75 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -232,6 +232,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+       __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+       __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
++      if (test_bit(NFS_CS_DS, &cl_init->init_flags))
++              __set_bit(NFS_CS_DS, &clp->cl_flags);
+       /*
+        * Set up the connection to the server before we add add to the
+        * global list.
+@@ -1007,6 +1009,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+       if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+               __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
++      __set_bit(NFS_CS_DS, &cl_init.init_flags);
+       /*
+        * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+        * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3c24c3c99e8ac..3bc6bfdf7b814 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8787,6 +8787,8 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+       calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
+ #endif
++      if (test_bit(NFS_CS_DS, &clp->cl_flags))
++              calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
+       msg.rpc_argp = &calldata->args;
+       msg.rpc_resp = &calldata->res;
+       task_setup_data.callback_data = calldata;
+@@ -8864,6 +8866,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+       /* Save the EXCHANGE_ID verifier session trunk tests */
+       memcpy(clp->cl_confirm.data, argp->verifier.data,
+              sizeof(clp->cl_confirm.data));
++      if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
++              set_bit(NFS_CS_DS, &clp->cl_flags);
+ out:
+       trace_nfs4_exchange_id(clp, status);
+       rpc_put_task(task);
+-- 
+2.40.1
+
diff --git a/queue-6.5/series b/queue-6.5/series
new file mode 100644 (file)
index 0000000..c62094a
--- /dev/null
@@ -0,0 +1,18 @@
+nfs-fix-error-handling-for-o_direct-write-scheduling.patch
+nfs-fix-o_direct-locking-issues.patch
+nfs-more-o_direct-accounting-fixes-for-error-paths.patch
+nfs-use-the-correct-commit-info-in-nfs_join_page_gro.patch
+nfs-more-fixes-for-nfs_direct_write_reschedule_io.patch
+nfs-pnfs-report-einval-errors-from-connect-to-the-se.patch
+sunrpc-mark-the-cred-for-revalidation-if-the-server-.patch
+nfsv4.1-use-exchgid4_flag_use_pnfs_ds-for-ds-server.patch
+nfsv4.1-fix-pnfs-mds-ds-session-trunking.patch
+media-v4l-use-correct-dependency-for-camera-sensor-d.patch
+media-via-use-correct-dependency-for-camera-sensor-d.patch
+gfs2-fix-another-freeze-thaw-hang.patch
+netfs-only-call-folio_start_fscache-one-time-for-eac.patch
+btrfs-improve-error-message-after-failure-to-add-del.patch
+btrfs-remove-bug-after-failure-to-insert-delayed-dir.patch
+ext4-replace-the-traditional-ternary-conditional-ope.patch
+ext4-move-setting-of-trimmed-bit-into-ext4_try_to_tr.patch
+ext4-do-not-let-fstrim-block-system-suspend.patch
diff --git a/queue-6.5/sunrpc-mark-the-cred-for-revalidation-if-the-server-.patch b/queue-6.5/sunrpc-mark-the-cred-for-revalidation-if-the-server-.patch
new file mode 100644 (file)
index 0000000..3fa3b1f
--- /dev/null
@@ -0,0 +1,35 @@
+From b84bb760f7b059990eae285fc28202ed152072b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 12:50:09 -0400
+Subject: SUNRPC: Mark the cred for revalidation if the server rejects it
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 611fa42dfa9d2f3918ac5f4dd5705dfad81b323d ]
+
+If the server rejects the credential as being stale, or bad, then we
+should mark it for revalidation before retransmitting.
+
+Fixes: 7f5667a5f8c4 ("SUNRPC: Clean up rpc_verify_header()")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 315bd59dea056..c8ee7be4c631c 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2748,6 +2748,7 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
+       case rpc_autherr_rejectedverf:
+       case rpcsec_gsserr_credproblem:
+       case rpcsec_gsserr_ctxproblem:
++              rpcauth_invalcred(task);
+               if (!task->tk_cred_retry)
+                       break;
+               task->tk_cred_retry--;
+-- 
+2.40.1
+