]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
btrfs: scrub: add cancel/pause/removed bg checks for raid56 parity stripes
authorQu Wenruo <wqu@suse.com>
Sun, 19 Oct 2025 00:45:26 +0000 (11:15 +1030)
committerDavid Sterba <dsterba@suse.com>
Mon, 24 Nov 2025 21:21:38 +0000 (22:21 +0100)
For raid56, data and parity stripes are handled differently.

For data stripes they are handled just like regular RAID1/RAID10 stripes,
going through the regular scrub_simple_mirror().

But for parity stripes we have to read out all involved data stripes and
do any needed verification and repair, then scrub the parity stripe.

This process will take a much longer time than a regular stripe, but
unlike scrub_simple_mirror(), we do not check if we should cancel/pause
or the block group is already removed.

Aligned the behavior of scrub_raid56_parity_stripe() to
scrub_simple_mirror(), by adding:

- Cancel check
- Pause check
- Removed block group check

Since those checks are the same from the scrub_simple_mirror(), also
update the comments of scrub_simple_mirror() by:

- Remove too obvious comments
  We do not need extra comments on what we're checking, it's really too
  obvious.

- Remove a stale comment about pausing
  Now the scrub is always queuing all involved stripes, and submit them
  in one go, there is no more submission part during pausing.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/scrub.c

index e760e76df3f0f527f7e9d099d5ae8c2905d00f7f..00e42a7f52afca0d8fe9044d83030c89d2432336 100644 (file)
@@ -2091,6 +2091,20 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
 
        ASSERT(sctx->raid56_data_stripes);
 
+       if (atomic_read(&fs_info->scrub_cancel_req) ||
+           atomic_read(&sctx->cancel_req))
+               return -ECANCELED;
+
+       if (atomic_read(&fs_info->scrub_pause_req))
+               scrub_blocked_if_needed(fs_info);
+
+       spin_lock(&bg->lock);
+       if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
+               spin_unlock(&bg->lock);
+               return 0;
+       }
+       spin_unlock(&bg->lock);
+
        /*
         * For data stripe search, we cannot reuse the same extent/csum paths,
         * as the data stripe bytenr may be smaller than previous extent.  Thus
@@ -2263,18 +2277,15 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
                u64 found_logical = U64_MAX;
                u64 cur_physical = physical + cur_logical - logical_start;
 
-               /* Canceled? */
                if (atomic_read(&fs_info->scrub_cancel_req) ||
                    atomic_read(&sctx->cancel_req)) {
                        ret = -ECANCELED;
                        break;
                }
-               /* Paused? */
-               if (atomic_read(&fs_info->scrub_pause_req)) {
-                       /* Push queued extents */
+
+               if (atomic_read(&fs_info->scrub_pause_req))
                        scrub_blocked_if_needed(fs_info);
-               }
-               /* Block group removed? */
+
                spin_lock(&bg->lock);
                if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
                        spin_unlock(&bg->lock);