while (n) {
cache = rb_entry(n, struct btrfs_block_group, cache_node);
- end = cache->start + cache->length - 1;
+ end = btrfs_block_group_end(cache) - 1;
start = cache->start;
if (bytenr < start) {
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
- const u64 next_bytenr = cache->start + cache->length;
+ const u64 next_bytenr = btrfs_block_group_end(cache);
read_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *extent_root;
u64 search_offset;
- u64 search_end = block_group->start + block_group->length;
+ const u64 search_end = btrfs_block_group_end(block_group);
BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key search_key;
int ret = 0;
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group *block_group = caching_ctl->block_group;
+ const u64 block_group_end = btrfs_block_group_end(block_group);
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *extent_root;
BTRFS_PATH_AUTO_FREE(path);
continue;
}
- if (key.objectid >= block_group->start + block_group->length)
+ if (key.objectid >= block_group_end)
break;
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
path->slots[0]++;
}
- ret = btrfs_add_new_free_space(block_group, last,
- block_group->start + block_group->length,
- NULL);
+ ret = btrfs_add_new_free_space(block_group, last, block_group_end, NULL);
out:
return ret;
}
static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
{
btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start,
- bg->start + bg->length - 1, EXTENT_DIRTY, NULL);
+ btrfs_block_group_end(bg) - 1, EXTENT_DIRTY, NULL);
}
static noinline void caching_thread(struct btrfs_work *work)
while (nr--) {
u64 len = min_t(u64, stripe_len,
- cache->start + cache->length - logical[nr]);
+ btrfs_block_group_end(cache) - logical[nr]);
cache->bytes_super += len;
ret = btrfs_set_extent_bit(&fs_info->excluded_extents,
} else if (cache->used == 0 && cache->remap_bytes == 0) {
cache->cached = BTRFS_CACHE_FINISHED;
ret = btrfs_add_new_free_space(cache, cache->start,
- cache->start + cache->length, NULL);
+ btrfs_block_group_end(cache), NULL);
btrfs_free_excluded_extents(cache);
if (ret)
goto error;
return -ENOENT;
/* An extent can not span multiple block groups. */
- ASSERT(bytenr + num_bytes <= cache->start + cache->length);
+ ASSERT(bytenr + num_bytes <= btrfs_block_group_end(cache));
space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
u64 len;
bool readonly;
- if (!cache ||
- start >= cache->start + cache->length) {
+ if (!cache || start >= btrfs_block_group_end(cache)) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
empty_cluster <<= 1;
}
- len = cache->start + cache->length - start;
+ len = btrfs_block_group_end(cache) - start;
len = min(len, end + 1 - start);
if (return_free_space)
/* move on to the next group */
if (ffe_ctl->search_start + ffe_ctl->num_bytes >
- block_group->start + block_group->length) {
+ btrfs_block_group_end(block_group)) {
btrfs_add_free_space_unused(block_group,
ffe_ctl->found_offset,
ffe_ctl->num_bytes);
}
start = max(range->start, cache->start);
- end = min(range_end, cache->start + cache->length);
+ end = min(range_end, btrfs_block_group_end(cache));
if (end - start >= range->minlen) {
if (!btrfs_block_group_done(cache)) {
int *entries)
{
u64 start, extent_start, extent_end, len;
+ const u64 block_group_end = btrfs_block_group_end(block_group);
struct extent_io_tree *unpin = NULL;
int ret;
start = block_group->start;
- while (start < block_group->start + block_group->length) {
+ while (start < block_group_end) {
if (!btrfs_find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL))
return 0;
/* This pinned extent is out of our range */
- if (extent_start >= block_group->start + block_group->length)
+ if (extent_start >= block_group_end)
return 0;
extent_start = max(extent_start, start);
- extent_end = min(block_group->start + block_group->length,
- extent_end + 1);
+ extent_end = min(block_group_end, extent_end + 1);
len = extent_end - extent_start;
*entries += 1;
return 0;
start = block_group->start;
- end = block_group->start + block_group->length;
+ end = btrfs_block_group_end(block_group);
key.objectid = end - 1;
key.type = (u8)-1;
return 0;
start = block_group->start;
- end = block_group->start + block_group->length;
+ end = btrfs_block_group_end(block_group);
key.objectid = end - 1;
key.type = (u8)-1;
* Read the bit for the block immediately after the extent of space if
* that block is within the block group.
*/
- if (end < block_group->start + block_group->length) {
+ if (end < btrfs_block_group_end(block_group)) {
/* The next block may be in the next bitmap. */
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (end >= key.objectid + key.offset) {
right:
/* Search for a neighbor on the right. */
- if (end == block_group->start + block_group->length)
+ if (end == btrfs_block_group_end(block_group))
goto insert;
key.objectid = end;
key.type = (u8)-1;
* highest, block group).
*/
start = block_group->start;
- end = block_group->start + block_group->length;
+ end = btrfs_block_group_end(block_group);
while (ret == 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
}
start = block_group->start;
- end = block_group->start + block_group->length;
+ end = btrfs_block_group_end(block_group);
key.objectid = end - 1;
key.type = (u8)-1;
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group *block_group;
- struct btrfs_fs_info *fs_info;
+ struct btrfs_block_group *block_group = caching_ctl->block_group;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root;
struct btrfs_key key;
bool prev_bit_set = false;
/* Initialize to silence GCC. */
u64 extent_start = 0;
- u64 end, offset;
+ const u64 end = btrfs_block_group_end(block_group);
+ u64 offset;
u64 total_found = 0;
u32 extent_count = 0;
int ret;
- block_group = caching_ctl->block_group;
- fs_info = block_group->fs_info;
root = btrfs_free_space_root(block_group);
- end = block_group->start + block_group->length;
-
while (1) {
ret = btrfs_next_item(root, path);
if (ret < 0)
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group *block_group;
- struct btrfs_fs_info *fs_info;
+ struct btrfs_block_group *block_group = caching_ctl->block_group;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root;
struct btrfs_key key;
- u64 end;
+ const u64 end = btrfs_block_group_end(block_group);
u64 total_found = 0;
u32 extent_count = 0;
int ret;
- block_group = caching_ctl->block_group;
- fs_info = block_group->fs_info;
root = btrfs_free_space_root(block_group);
- end = block_group->start + block_group->length;
-
while (1) {
u64 space_added;
scrub_stripe_reset_bitmaps(stripe);
/* The range must be inside the bg. */
- ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length,
+ ASSERT(logical_start >= bg->start && logical_end <= btrfs_block_group_end(bg),
"bg->start=%llu logical_start=%llu logical_end=%llu end=%llu",
- bg->start, logical_start, logical_end, bg->start + bg->length);
+ bg->start, logical_start, logical_end, btrfs_block_group_end(bg));
ret = find_first_extent_item(extent_root, extent_path, logical_start,
logical_len);
int ret = 0;
/* The range must be inside the bg */
- ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+ ASSERT(logical_start >= bg->start && logical_end <= btrfs_block_group_end(bg));
/* Go through each extent items inside the logical range */
while (cur_logical < logical_end) {
const u64 logical_increment = simple_stripe_full_stripe_len(map);
const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
const u64 orig_physical = map->stripes[stripe_index].physical;
+ const u64 end = btrfs_block_group_end(bg);
const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
u64 cur_logical = orig_logical;
u64 cur_physical = orig_physical;
int ret = 0;
- while (cur_logical < bg->start + bg->length) {
+ while (cur_logical < end) {
/*
* Inside each stripe, RAID0 is just SINGLE, and RAID10 is
* just RAID1, so we can reuse scrub_simple_mirror() to scrub
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
if (path->slots[0] != 0)
goto invalid;
- end = cache->start + cache->length;
+ end = btrfs_block_group_end(cache);
i = 0;
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
int ret;
ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
- cache->start + cache->length - alignment,
+ btrfs_block_group_end(cache) - alignment,
alignment);
if (ret) {
test_err("could not remove free space");
BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
+ const u64 bg_end = btrfs_block_group_end(cache);
int ret;
u64 length;
if (!path)
return -ENOMEM;
- key.objectid = cache->start + cache->length;
+ key.objectid = bg_end;
key.type = 0;
key.offset = 0;
length = fs_info->nodesize;
if (unlikely(!(found_key.objectid >= cache->start &&
- found_key.objectid + length <= cache->start + cache->length))) {
+ found_key.objectid + length <= bg_end))) {
return -EUCLEAN;
}
*offset_ret = found_key.objectid + length - cache->start;
if (block_group) {
if (block_group->start > eb->start ||
- block_group->start + block_group->length <= eb->start) {
+ btrfs_block_group_end(block_group) <= eb->start) {
btrfs_put_block_group(block_group);
block_group = NULL;
ctx->zoned_bg = NULL;
static void wait_eb_writebacks(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- const u64 end = block_group->start + block_group->length;
+ const u64 end = btrfs_block_group_end(block_group);
struct extent_buffer *eb;
unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);