#define BTRFS_PATH_AUTO_FREE(path_name) \
struct btrfs_path *path_name __free(btrfs_free_path) = NULL
+/*
+ * This defines an on-stack path that will be auto released when exiting the scope.
+ *
+ * It is compatible with any existing manual btrfs_release_path() calls.
+ */
+#define BTRFS_PATH_AUTO_RELEASE(path_name) \
+ struct btrfs_path path_name __free(btrfs_release_path) = { 0 }
+
/*
* The state of btrfs root
*/
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
DEFINE_FREE(btrfs_free_path, struct btrfs_path *, btrfs_free_path(_T))
+DEFINE_FREE(btrfs_release_path, struct btrfs_path, btrfs_release_path(&_T))
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
{
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *fi;
- struct btrfs_path path = { 0 };
+ BTRFS_PATH_AUTO_RELEASE(path);
struct extent_map *em;
struct btrfs_key key;
u64 ino = btrfs_ino(inode);
if (ret > 0)
goto not_found;
}
- btrfs_release_path(&path);
return em;
not_found:
- btrfs_release_path(&path);
btrfs_free_extent_map(em);
return NULL;
err:
- btrfs_release_path(&path);
btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
int mirror_num)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_path path = { 0 };
+ BTRFS_PATH_AUTO_RELEASE(path);
struct btrfs_key found_key = { 0 };
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
if (ret < 0) {
btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
logical, ret);
- btrfs_release_path(&path);
return;
}
eb = path.nodes[0];
(ref_level ? "node" : "leaf"),
ref_level, ref_root);
}
- btrfs_release_path(&path);
} else {
struct btrfs_backref_walk_ctx ctx = { 0 };
struct data_reloc_warn reloc_warn = { 0 };
+ /*
+ * Do not hold the path as later iterate_extent_inodes() call
+ * can be time consuming.
+ */
btrfs_release_path(&path);
ctx.bytenr = found_key.objectid;
u64 full_stripe_start)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_path extent_path = { 0 };
- struct btrfs_path csum_path = { 0 };
+ BTRFS_PATH_AUTO_RELEASE(extent_path);
+ BTRFS_PATH_AUTO_RELEASE(csum_path);
struct scrub_stripe *stripe;
bool all_empty = true;
const int data_stripes = nr_data_stripes(map);
full_stripe_start + btrfs_stripe_nr_to_offset(i),
BTRFS_STRIPE_LEN, stripe);
if (ret < 0)
- goto out;
+ return ret;
/*
* No extent in this data stripe, need to manually mark them
* initialized to make later read submission happy.
break;
}
}
- if (all_empty) {
- ret = 0;
- goto out;
- }
+ if (all_empty)
+ return 0;
for (int i = 0; i < data_stripes; i++) {
stripe = &sctx->raid56_data_stripes[i];
"scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
full_stripe_start, i, stripe->nr_sectors,
&error);
- ret = -EIO;
- goto out;
+ return ret;
}
bitmap_or(&extent_bitmap, &extent_bitmap, &has_extent,
stripe->nr_sectors);
}
/* Now we can check and regenerate the P/Q stripe. */
- ret = scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
- &extent_bitmap);
-out:
- btrfs_release_path(&extent_path);
- btrfs_release_path(&csum_path);
- return ret;
+ return scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
+ &extent_bitmap);
}
/*