]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/btrfs/block-group.c
btrfs: fix log context list corruption after rename whiteout error
[thirdparty/linux.git] / fs / btrfs / block-group.c
CommitLineData
2e405ad8
JB
1// SPDX-License-Identifier: GPL-2.0
2
784352fe 3#include "misc.h"
2e405ad8
JB
4#include "ctree.h"
5#include "block-group.h"
3eeb3226 6#include "space-info.h"
9f21246d
JB
7#include "disk-io.h"
8#include "free-space-cache.h"
9#include "free-space-tree.h"
e3e0520b
JB
10#include "disk-io.h"
11#include "volumes.h"
12#include "transaction.h"
13#include "ref-verify.h"
4358d963
JB
14#include "sysfs.h"
15#include "tree-log.h"
77745c05 16#include "delalloc-space.h"
b0643e59 17#include "discard.h"
96a14336 18#include "raid56.h"
2e405ad8 19
878d7b67
JB
20/*
21 * Return target flags in extended format or 0 if restripe for this chunk_type
22 * is not in progress
23 *
24 * Should be called with balance_lock held
25 */
e11c0406 26static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
878d7b67
JB
27{
28 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
29 u64 target = 0;
30
31 if (!bctl)
32 return 0;
33
34 if (flags & BTRFS_BLOCK_GROUP_DATA &&
35 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
36 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
37 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
38 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
39 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
40 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
41 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
42 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
43 }
44
45 return target;
46}
47
48/*
49 * @flags: available profiles in extended format (see ctree.h)
50 *
51 * Return reduced profile in chunk format. If profile changing is in progress
52 * (either running or paused) picks the target profile (if it's already
53 * available), otherwise falls back to plain reducing.
54 */
55static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
56{
57 u64 num_devices = fs_info->fs_devices->rw_devices;
58 u64 target;
59 u64 raid_type;
60 u64 allowed = 0;
61
62 /*
63 * See if restripe for this chunk_type is in progress, if so try to
64 * reduce to the target profile
65 */
66 spin_lock(&fs_info->balance_lock);
e11c0406 67 target = get_restripe_target(fs_info, flags);
878d7b67
JB
68 if (target) {
69 /* Pick target profile only if it's already available */
70 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
71 spin_unlock(&fs_info->balance_lock);
72 return extended_to_chunk(target);
73 }
74 }
75 spin_unlock(&fs_info->balance_lock);
76
77 /* First, mask out the RAID levels which aren't possible */
78 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
79 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
80 allowed |= btrfs_raid_array[raid_type].bg_flag;
81 }
82 allowed &= flags;
83
84 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
85 allowed = BTRFS_BLOCK_GROUP_RAID6;
86 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
87 allowed = BTRFS_BLOCK_GROUP_RAID5;
88 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
89 allowed = BTRFS_BLOCK_GROUP_RAID10;
90 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
91 allowed = BTRFS_BLOCK_GROUP_RAID1;
92 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
93 allowed = BTRFS_BLOCK_GROUP_RAID0;
94
95 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
96
97 return extended_to_chunk(flags | allowed);
98}
99
ef0a82da 100u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
878d7b67
JB
101{
102 unsigned seq;
103 u64 flags;
104
105 do {
106 flags = orig_flags;
107 seq = read_seqbegin(&fs_info->profiles_lock);
108
109 if (flags & BTRFS_BLOCK_GROUP_DATA)
110 flags |= fs_info->avail_data_alloc_bits;
111 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
112 flags |= fs_info->avail_system_alloc_bits;
113 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
114 flags |= fs_info->avail_metadata_alloc_bits;
115 } while (read_seqretry(&fs_info->profiles_lock, seq));
116
117 return btrfs_reduce_alloc_profile(fs_info, flags);
118}
119
32da5386 120void btrfs_get_block_group(struct btrfs_block_group *cache)
3cad1284
JB
121{
122 atomic_inc(&cache->count);
123}
124
32da5386 125void btrfs_put_block_group(struct btrfs_block_group *cache)
3cad1284
JB
126{
127 if (atomic_dec_and_test(&cache->count)) {
128 WARN_ON(cache->pinned > 0);
129 WARN_ON(cache->reserved > 0);
130
b0643e59
DZ
131 /*
132 * A block_group shouldn't be on the discard_list anymore.
133 * Remove the block_group from the discard_list to prevent us
134 * from causing a panic due to NULL pointer dereference.
135 */
136 if (WARN_ON(!list_empty(&cache->discard_list)))
137 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
138 cache);
139
3cad1284
JB
140 /*
141 * If not empty, someone is still holding mutex of
142 * full_stripe_lock, which can only be released by caller.
143 * And it will definitely cause use-after-free when caller
144 * tries to release full stripe lock.
145 *
146 * No better way to resolve, but only to warn.
147 */
148 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
149 kfree(cache->free_space_ctl);
150 kfree(cache);
151 }
152}
153
4358d963
JB
154/*
155 * This adds the block group to the fs_info rb tree for the block group cache
156 */
157static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
32da5386 158 struct btrfs_block_group *block_group)
4358d963
JB
159{
160 struct rb_node **p;
161 struct rb_node *parent = NULL;
32da5386 162 struct btrfs_block_group *cache;
4358d963
JB
163
164 spin_lock(&info->block_group_cache_lock);
165 p = &info->block_group_cache_tree.rb_node;
166
167 while (*p) {
168 parent = *p;
32da5386 169 cache = rb_entry(parent, struct btrfs_block_group, cache_node);
b3470b5d 170 if (block_group->start < cache->start) {
4358d963 171 p = &(*p)->rb_left;
b3470b5d 172 } else if (block_group->start > cache->start) {
4358d963
JB
173 p = &(*p)->rb_right;
174 } else {
175 spin_unlock(&info->block_group_cache_lock);
176 return -EEXIST;
177 }
178 }
179
180 rb_link_node(&block_group->cache_node, parent, p);
181 rb_insert_color(&block_group->cache_node,
182 &info->block_group_cache_tree);
183
b3470b5d
DS
184 if (info->first_logical_byte > block_group->start)
185 info->first_logical_byte = block_group->start;
4358d963
JB
186
187 spin_unlock(&info->block_group_cache_lock);
188
189 return 0;
190}
191
2e405ad8
JB
192/*
193 * This will return the block group at or after bytenr if contains is 0, else
194 * it will return the block group that contains the bytenr
195 */
32da5386 196static struct btrfs_block_group *block_group_cache_tree_search(
2e405ad8
JB
197 struct btrfs_fs_info *info, u64 bytenr, int contains)
198{
32da5386 199 struct btrfs_block_group *cache, *ret = NULL;
2e405ad8
JB
200 struct rb_node *n;
201 u64 end, start;
202
203 spin_lock(&info->block_group_cache_lock);
204 n = info->block_group_cache_tree.rb_node;
205
206 while (n) {
32da5386 207 cache = rb_entry(n, struct btrfs_block_group, cache_node);
b3470b5d
DS
208 end = cache->start + cache->length - 1;
209 start = cache->start;
2e405ad8
JB
210
211 if (bytenr < start) {
b3470b5d 212 if (!contains && (!ret || start < ret->start))
2e405ad8
JB
213 ret = cache;
214 n = n->rb_left;
215 } else if (bytenr > start) {
216 if (contains && bytenr <= end) {
217 ret = cache;
218 break;
219 }
220 n = n->rb_right;
221 } else {
222 ret = cache;
223 break;
224 }
225 }
226 if (ret) {
227 btrfs_get_block_group(ret);
b3470b5d
DS
228 if (bytenr == 0 && info->first_logical_byte > ret->start)
229 info->first_logical_byte = ret->start;
2e405ad8
JB
230 }
231 spin_unlock(&info->block_group_cache_lock);
232
233 return ret;
234}
235
236/*
237 * Return the block group that starts at or after bytenr
238 */
32da5386 239struct btrfs_block_group *btrfs_lookup_first_block_group(
2e405ad8
JB
240 struct btrfs_fs_info *info, u64 bytenr)
241{
242 return block_group_cache_tree_search(info, bytenr, 0);
243}
244
245/*
246 * Return the block group that contains the given bytenr
247 */
32da5386 248struct btrfs_block_group *btrfs_lookup_block_group(
2e405ad8
JB
249 struct btrfs_fs_info *info, u64 bytenr)
250{
251 return block_group_cache_tree_search(info, bytenr, 1);
252}
253
32da5386
DS
254struct btrfs_block_group *btrfs_next_block_group(
255 struct btrfs_block_group *cache)
2e405ad8
JB
256{
257 struct btrfs_fs_info *fs_info = cache->fs_info;
258 struct rb_node *node;
259
260 spin_lock(&fs_info->block_group_cache_lock);
261
262 /* If our block group was removed, we need a full search. */
263 if (RB_EMPTY_NODE(&cache->cache_node)) {
b3470b5d 264 const u64 next_bytenr = cache->start + cache->length;
2e405ad8
JB
265
266 spin_unlock(&fs_info->block_group_cache_lock);
267 btrfs_put_block_group(cache);
268 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
269 }
270 node = rb_next(&cache->cache_node);
271 btrfs_put_block_group(cache);
272 if (node) {
32da5386 273 cache = rb_entry(node, struct btrfs_block_group, cache_node);
2e405ad8
JB
274 btrfs_get_block_group(cache);
275 } else
276 cache = NULL;
277 spin_unlock(&fs_info->block_group_cache_lock);
278 return cache;
279}
3eeb3226
JB
280
281bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
282{
32da5386 283 struct btrfs_block_group *bg;
3eeb3226
JB
284 bool ret = true;
285
286 bg = btrfs_lookup_block_group(fs_info, bytenr);
287 if (!bg)
288 return false;
289
290 spin_lock(&bg->lock);
291 if (bg->ro)
292 ret = false;
293 else
294 atomic_inc(&bg->nocow_writers);
295 spin_unlock(&bg->lock);
296
297 /* No put on block group, done by btrfs_dec_nocow_writers */
298 if (!ret)
299 btrfs_put_block_group(bg);
300
301 return ret;
302}
303
304void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
305{
32da5386 306 struct btrfs_block_group *bg;
3eeb3226
JB
307
308 bg = btrfs_lookup_block_group(fs_info, bytenr);
309 ASSERT(bg);
310 if (atomic_dec_and_test(&bg->nocow_writers))
311 wake_up_var(&bg->nocow_writers);
312 /*
313 * Once for our lookup and once for the lookup done by a previous call
314 * to btrfs_inc_nocow_writers()
315 */
316 btrfs_put_block_group(bg);
317 btrfs_put_block_group(bg);
318}
319
32da5386 320void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
3eeb3226
JB
321{
322 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
323}
324
325void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
326 const u64 start)
327{
32da5386 328 struct btrfs_block_group *bg;
3eeb3226
JB
329
330 bg = btrfs_lookup_block_group(fs_info, start);
331 ASSERT(bg);
332 if (atomic_dec_and_test(&bg->reservations))
333 wake_up_var(&bg->reservations);
334 btrfs_put_block_group(bg);
335}
336
32da5386 337void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
3eeb3226
JB
338{
339 struct btrfs_space_info *space_info = bg->space_info;
340
341 ASSERT(bg->ro);
342
343 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
344 return;
345
346 /*
347 * Our block group is read only but before we set it to read only,
348 * some task might have had allocated an extent from it already, but it
349 * has not yet created a respective ordered extent (and added it to a
350 * root's list of ordered extents).
351 * Therefore wait for any task currently allocating extents, since the
352 * block group's reservations counter is incremented while a read lock
353 * on the groups' semaphore is held and decremented after releasing
354 * the read access on that semaphore and creating the ordered extent.
355 */
356 down_write(&space_info->groups_sem);
357 up_write(&space_info->groups_sem);
358
359 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
360}
9f21246d
JB
361
362struct btrfs_caching_control *btrfs_get_caching_control(
32da5386 363 struct btrfs_block_group *cache)
9f21246d
JB
364{
365 struct btrfs_caching_control *ctl;
366
367 spin_lock(&cache->lock);
368 if (!cache->caching_ctl) {
369 spin_unlock(&cache->lock);
370 return NULL;
371 }
372
373 ctl = cache->caching_ctl;
374 refcount_inc(&ctl->count);
375 spin_unlock(&cache->lock);
376 return ctl;
377}
378
379void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
380{
381 if (refcount_dec_and_test(&ctl->count))
382 kfree(ctl);
383}
384
385/*
386 * When we wait for progress in the block group caching, its because our
387 * allocation attempt failed at least once. So, we must sleep and let some
388 * progress happen before we try again.
389 *
390 * This function will sleep at least once waiting for new free space to show
391 * up, and then it will check the block group free space numbers for our min
392 * num_bytes. Another option is to have it go ahead and look in the rbtree for
393 * a free extent of a given size, but this is a good start.
394 *
395 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
396 * any of the information in this block group.
397 */
32da5386 398void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
9f21246d
JB
399 u64 num_bytes)
400{
401 struct btrfs_caching_control *caching_ctl;
402
403 caching_ctl = btrfs_get_caching_control(cache);
404 if (!caching_ctl)
405 return;
406
32da5386 407 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
9f21246d
JB
408 (cache->free_space_ctl->free_space >= num_bytes));
409
410 btrfs_put_caching_control(caching_ctl);
411}
412
32da5386 413int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
9f21246d
JB
414{
415 struct btrfs_caching_control *caching_ctl;
416 int ret = 0;
417
418 caching_ctl = btrfs_get_caching_control(cache);
419 if (!caching_ctl)
420 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
421
32da5386 422 wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
9f21246d
JB
423 if (cache->cached == BTRFS_CACHE_ERROR)
424 ret = -EIO;
425 btrfs_put_caching_control(caching_ctl);
426 return ret;
427}
428
429#ifdef CONFIG_BTRFS_DEBUG
32da5386 430static void fragment_free_space(struct btrfs_block_group *block_group)
9f21246d
JB
431{
432 struct btrfs_fs_info *fs_info = block_group->fs_info;
b3470b5d
DS
433 u64 start = block_group->start;
434 u64 len = block_group->length;
9f21246d
JB
435 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
436 fs_info->nodesize : fs_info->sectorsize;
437 u64 step = chunk << 1;
438
439 while (len > chunk) {
440 btrfs_remove_free_space(block_group, start, chunk);
441 start += step;
442 if (len < step)
443 len = 0;
444 else
445 len -= step;
446 }
447}
448#endif
449
450/*
451 * This is only called by btrfs_cache_block_group, since we could have freed
452 * extents we need to check the pinned_extents for any extents that can't be
453 * used yet since their free space will be released as soon as the transaction
454 * commits.
455 */
32da5386 456u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
9f21246d
JB
457{
458 struct btrfs_fs_info *info = block_group->fs_info;
459 u64 extent_start, extent_end, size, total_added = 0;
460 int ret;
461
462 while (start < end) {
463 ret = find_first_extent_bit(info->pinned_extents, start,
464 &extent_start, &extent_end,
465 EXTENT_DIRTY | EXTENT_UPTODATE,
466 NULL);
467 if (ret)
468 break;
469
470 if (extent_start <= start) {
471 start = extent_end + 1;
472 } else if (extent_start > start && extent_start < end) {
473 size = extent_start - start;
474 total_added += size;
b0643e59
DZ
475 ret = btrfs_add_free_space_async_trimmed(block_group,
476 start, size);
9f21246d
JB
477 BUG_ON(ret); /* -ENOMEM or logic error */
478 start = extent_end + 1;
479 } else {
480 break;
481 }
482 }
483
484 if (start < end) {
485 size = end - start;
486 total_added += size;
b0643e59
DZ
487 ret = btrfs_add_free_space_async_trimmed(block_group, start,
488 size);
9f21246d
JB
489 BUG_ON(ret); /* -ENOMEM or logic error */
490 }
491
492 return total_added;
493}
494
495static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
496{
32da5386 497 struct btrfs_block_group *block_group = caching_ctl->block_group;
9f21246d
JB
498 struct btrfs_fs_info *fs_info = block_group->fs_info;
499 struct btrfs_root *extent_root = fs_info->extent_root;
500 struct btrfs_path *path;
501 struct extent_buffer *leaf;
502 struct btrfs_key key;
503 u64 total_found = 0;
504 u64 last = 0;
505 u32 nritems;
506 int ret;
507 bool wakeup = true;
508
509 path = btrfs_alloc_path();
510 if (!path)
511 return -ENOMEM;
512
b3470b5d 513 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
9f21246d
JB
514
515#ifdef CONFIG_BTRFS_DEBUG
516 /*
517 * If we're fragmenting we don't want to make anybody think we can
518 * allocate from this block group until we've had a chance to fragment
519 * the free space.
520 */
521 if (btrfs_should_fragment_free_space(block_group))
522 wakeup = false;
523#endif
524 /*
525 * We don't want to deadlock with somebody trying to allocate a new
526 * extent for the extent root while also trying to search the extent
527 * root to add free space. So we skip locking and search the commit
528 * root, since its read-only
529 */
530 path->skip_locking = 1;
531 path->search_commit_root = 1;
532 path->reada = READA_FORWARD;
533
534 key.objectid = last;
535 key.offset = 0;
536 key.type = BTRFS_EXTENT_ITEM_KEY;
537
538next:
539 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
540 if (ret < 0)
541 goto out;
542
543 leaf = path->nodes[0];
544 nritems = btrfs_header_nritems(leaf);
545
546 while (1) {
547 if (btrfs_fs_closing(fs_info) > 1) {
548 last = (u64)-1;
549 break;
550 }
551
552 if (path->slots[0] < nritems) {
553 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
554 } else {
555 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
556 if (ret)
557 break;
558
559 if (need_resched() ||
560 rwsem_is_contended(&fs_info->commit_root_sem)) {
561 if (wakeup)
562 caching_ctl->progress = last;
563 btrfs_release_path(path);
564 up_read(&fs_info->commit_root_sem);
565 mutex_unlock(&caching_ctl->mutex);
566 cond_resched();
567 mutex_lock(&caching_ctl->mutex);
568 down_read(&fs_info->commit_root_sem);
569 goto next;
570 }
571
572 ret = btrfs_next_leaf(extent_root, path);
573 if (ret < 0)
574 goto out;
575 if (ret)
576 break;
577 leaf = path->nodes[0];
578 nritems = btrfs_header_nritems(leaf);
579 continue;
580 }
581
582 if (key.objectid < last) {
583 key.objectid = last;
584 key.offset = 0;
585 key.type = BTRFS_EXTENT_ITEM_KEY;
586
587 if (wakeup)
588 caching_ctl->progress = last;
589 btrfs_release_path(path);
590 goto next;
591 }
592
b3470b5d 593 if (key.objectid < block_group->start) {
9f21246d
JB
594 path->slots[0]++;
595 continue;
596 }
597
b3470b5d 598 if (key.objectid >= block_group->start + block_group->length)
9f21246d
JB
599 break;
600
601 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
602 key.type == BTRFS_METADATA_ITEM_KEY) {
603 total_found += add_new_free_space(block_group, last,
604 key.objectid);
605 if (key.type == BTRFS_METADATA_ITEM_KEY)
606 last = key.objectid +
607 fs_info->nodesize;
608 else
609 last = key.objectid + key.offset;
610
611 if (total_found > CACHING_CTL_WAKE_UP) {
612 total_found = 0;
613 if (wakeup)
614 wake_up(&caching_ctl->wait);
615 }
616 }
617 path->slots[0]++;
618 }
619 ret = 0;
620
621 total_found += add_new_free_space(block_group, last,
b3470b5d 622 block_group->start + block_group->length);
9f21246d
JB
623 caching_ctl->progress = (u64)-1;
624
625out:
626 btrfs_free_path(path);
627 return ret;
628}
629
630static noinline void caching_thread(struct btrfs_work *work)
631{
32da5386 632 struct btrfs_block_group *block_group;
9f21246d
JB
633 struct btrfs_fs_info *fs_info;
634 struct btrfs_caching_control *caching_ctl;
635 int ret;
636
637 caching_ctl = container_of(work, struct btrfs_caching_control, work);
638 block_group = caching_ctl->block_group;
639 fs_info = block_group->fs_info;
640
641 mutex_lock(&caching_ctl->mutex);
642 down_read(&fs_info->commit_root_sem);
643
644 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
645 ret = load_free_space_tree(caching_ctl);
646 else
647 ret = load_extent_tree_free(caching_ctl);
648
649 spin_lock(&block_group->lock);
650 block_group->caching_ctl = NULL;
651 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
652 spin_unlock(&block_group->lock);
653
654#ifdef CONFIG_BTRFS_DEBUG
655 if (btrfs_should_fragment_free_space(block_group)) {
656 u64 bytes_used;
657
658 spin_lock(&block_group->space_info->lock);
659 spin_lock(&block_group->lock);
b3470b5d 660 bytes_used = block_group->length - block_group->used;
9f21246d
JB
661 block_group->space_info->bytes_used += bytes_used >> 1;
662 spin_unlock(&block_group->lock);
663 spin_unlock(&block_group->space_info->lock);
e11c0406 664 fragment_free_space(block_group);
9f21246d
JB
665 }
666#endif
667
668 caching_ctl->progress = (u64)-1;
669
670 up_read(&fs_info->commit_root_sem);
671 btrfs_free_excluded_extents(block_group);
672 mutex_unlock(&caching_ctl->mutex);
673
674 wake_up(&caching_ctl->wait);
675
676 btrfs_put_caching_control(caching_ctl);
677 btrfs_put_block_group(block_group);
678}
679
32da5386 680int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
9f21246d
JB
681{
682 DEFINE_WAIT(wait);
683 struct btrfs_fs_info *fs_info = cache->fs_info;
684 struct btrfs_caching_control *caching_ctl;
685 int ret = 0;
686
687 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
688 if (!caching_ctl)
689 return -ENOMEM;
690
691 INIT_LIST_HEAD(&caching_ctl->list);
692 mutex_init(&caching_ctl->mutex);
693 init_waitqueue_head(&caching_ctl->wait);
694 caching_ctl->block_group = cache;
b3470b5d 695 caching_ctl->progress = cache->start;
9f21246d 696 refcount_set(&caching_ctl->count, 1);
a0cac0ec 697 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
9f21246d
JB
698
699 spin_lock(&cache->lock);
700 /*
701 * This should be a rare occasion, but this could happen I think in the
702 * case where one thread starts to load the space cache info, and then
703 * some other thread starts a transaction commit which tries to do an
704 * allocation while the other thread is still loading the space cache
705 * info. The previous loop should have kept us from choosing this block
706 * group, but if we've moved to the state where we will wait on caching
707 * block groups we need to first check if we're doing a fast load here,
708 * so we can wait for it to finish, otherwise we could end up allocating
709 * from a block group who's cache gets evicted for one reason or
710 * another.
711 */
712 while (cache->cached == BTRFS_CACHE_FAST) {
713 struct btrfs_caching_control *ctl;
714
715 ctl = cache->caching_ctl;
716 refcount_inc(&ctl->count);
717 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
718 spin_unlock(&cache->lock);
719
720 schedule();
721
722 finish_wait(&ctl->wait, &wait);
723 btrfs_put_caching_control(ctl);
724 spin_lock(&cache->lock);
725 }
726
727 if (cache->cached != BTRFS_CACHE_NO) {
728 spin_unlock(&cache->lock);
729 kfree(caching_ctl);
730 return 0;
731 }
732 WARN_ON(cache->caching_ctl);
733 cache->caching_ctl = caching_ctl;
734 cache->cached = BTRFS_CACHE_FAST;
735 spin_unlock(&cache->lock);
736
737 if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
738 mutex_lock(&caching_ctl->mutex);
739 ret = load_free_space_cache(cache);
740
741 spin_lock(&cache->lock);
742 if (ret == 1) {
743 cache->caching_ctl = NULL;
744 cache->cached = BTRFS_CACHE_FINISHED;
745 cache->last_byte_to_unpin = (u64)-1;
746 caching_ctl->progress = (u64)-1;
747 } else {
748 if (load_cache_only) {
749 cache->caching_ctl = NULL;
750 cache->cached = BTRFS_CACHE_NO;
751 } else {
752 cache->cached = BTRFS_CACHE_STARTED;
753 cache->has_caching_ctl = 1;
754 }
755 }
756 spin_unlock(&cache->lock);
757#ifdef CONFIG_BTRFS_DEBUG
758 if (ret == 1 &&
759 btrfs_should_fragment_free_space(cache)) {
760 u64 bytes_used;
761
762 spin_lock(&cache->space_info->lock);
763 spin_lock(&cache->lock);
b3470b5d 764 bytes_used = cache->length - cache->used;
9f21246d
JB
765 cache->space_info->bytes_used += bytes_used >> 1;
766 spin_unlock(&cache->lock);
767 spin_unlock(&cache->space_info->lock);
e11c0406 768 fragment_free_space(cache);
9f21246d
JB
769 }
770#endif
771 mutex_unlock(&caching_ctl->mutex);
772
773 wake_up(&caching_ctl->wait);
774 if (ret == 1) {
775 btrfs_put_caching_control(caching_ctl);
776 btrfs_free_excluded_extents(cache);
777 return 0;
778 }
779 } else {
780 /*
781 * We're either using the free space tree or no caching at all.
782 * Set cached to the appropriate value and wakeup any waiters.
783 */
784 spin_lock(&cache->lock);
785 if (load_cache_only) {
786 cache->caching_ctl = NULL;
787 cache->cached = BTRFS_CACHE_NO;
788 } else {
789 cache->cached = BTRFS_CACHE_STARTED;
790 cache->has_caching_ctl = 1;
791 }
792 spin_unlock(&cache->lock);
793 wake_up(&caching_ctl->wait);
794 }
795
796 if (load_cache_only) {
797 btrfs_put_caching_control(caching_ctl);
798 return 0;
799 }
800
801 down_write(&fs_info->commit_root_sem);
802 refcount_inc(&caching_ctl->count);
803 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
804 up_write(&fs_info->commit_root_sem);
805
806 btrfs_get_block_group(cache);
807
808 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
809
810 return ret;
811}
e3e0520b
JB
812
813static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
814{
815 u64 extra_flags = chunk_to_extended(flags) &
816 BTRFS_EXTENDED_PROFILE_MASK;
817
818 write_seqlock(&fs_info->profiles_lock);
819 if (flags & BTRFS_BLOCK_GROUP_DATA)
820 fs_info->avail_data_alloc_bits &= ~extra_flags;
821 if (flags & BTRFS_BLOCK_GROUP_METADATA)
822 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
823 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
824 fs_info->avail_system_alloc_bits &= ~extra_flags;
825 write_sequnlock(&fs_info->profiles_lock);
826}
827
828/*
829 * Clear incompat bits for the following feature(s):
830 *
831 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
832 * in the whole filesystem
9c907446
DS
833 *
834 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
e3e0520b
JB
835 */
836static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
837{
9c907446
DS
838 bool found_raid56 = false;
839 bool found_raid1c34 = false;
840
841 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
842 (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
843 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
e3e0520b
JB
844 struct list_head *head = &fs_info->space_info;
845 struct btrfs_space_info *sinfo;
846
847 list_for_each_entry_rcu(sinfo, head, list) {
e3e0520b
JB
848 down_read(&sinfo->groups_sem);
849 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
9c907446 850 found_raid56 = true;
e3e0520b 851 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
9c907446
DS
852 found_raid56 = true;
853 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
854 found_raid1c34 = true;
855 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
856 found_raid1c34 = true;
e3e0520b 857 up_read(&sinfo->groups_sem);
e3e0520b 858 }
9c907446
DS
859 if (found_raid56)
860 btrfs_clear_fs_incompat(fs_info, RAID56);
861 if (found_raid1c34)
862 btrfs_clear_fs_incompat(fs_info, RAID1C34);
e3e0520b
JB
863 }
864}
865
866int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
867 u64 group_start, struct extent_map *em)
868{
869 struct btrfs_fs_info *fs_info = trans->fs_info;
870 struct btrfs_root *root = fs_info->extent_root;
871 struct btrfs_path *path;
32da5386 872 struct btrfs_block_group *block_group;
e3e0520b
JB
873 struct btrfs_free_cluster *cluster;
874 struct btrfs_root *tree_root = fs_info->tree_root;
875 struct btrfs_key key;
876 struct inode *inode;
877 struct kobject *kobj = NULL;
878 int ret;
879 int index;
880 int factor;
881 struct btrfs_caching_control *caching_ctl = NULL;
882 bool remove_em;
883 bool remove_rsv = false;
884
885 block_group = btrfs_lookup_block_group(fs_info, group_start);
886 BUG_ON(!block_group);
887 BUG_ON(!block_group->ro);
888
889 trace_btrfs_remove_block_group(block_group);
890 /*
891 * Free the reserved super bytes from this block group before
892 * remove it.
893 */
894 btrfs_free_excluded_extents(block_group);
b3470b5d
DS
895 btrfs_free_ref_tree_range(fs_info, block_group->start,
896 block_group->length);
e3e0520b 897
e3e0520b
JB
898 index = btrfs_bg_flags_to_raid_index(block_group->flags);
899 factor = btrfs_bg_type_to_factor(block_group->flags);
900
901 /* make sure this block group isn't part of an allocation cluster */
902 cluster = &fs_info->data_alloc_cluster;
903 spin_lock(&cluster->refill_lock);
904 btrfs_return_cluster_to_free_space(block_group, cluster);
905 spin_unlock(&cluster->refill_lock);
906
907 /*
908 * make sure this block group isn't part of a metadata
909 * allocation cluster
910 */
911 cluster = &fs_info->meta_alloc_cluster;
912 spin_lock(&cluster->refill_lock);
913 btrfs_return_cluster_to_free_space(block_group, cluster);
914 spin_unlock(&cluster->refill_lock);
915
916 path = btrfs_alloc_path();
917 if (!path) {
918 ret = -ENOMEM;
919 goto out;
920 }
921
922 /*
923 * get the inode first so any iput calls done for the io_list
924 * aren't the final iput (no unlinks allowed now)
925 */
926 inode = lookup_free_space_inode(block_group, path);
927
928 mutex_lock(&trans->transaction->cache_write_mutex);
929 /*
930 * Make sure our free space cache IO is done before removing the
931 * free space inode
932 */
933 spin_lock(&trans->transaction->dirty_bgs_lock);
934 if (!list_empty(&block_group->io_list)) {
935 list_del_init(&block_group->io_list);
936
937 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
938
939 spin_unlock(&trans->transaction->dirty_bgs_lock);
940 btrfs_wait_cache_io(trans, block_group, path);
941 btrfs_put_block_group(block_group);
942 spin_lock(&trans->transaction->dirty_bgs_lock);
943 }
944
945 if (!list_empty(&block_group->dirty_list)) {
946 list_del_init(&block_group->dirty_list);
947 remove_rsv = true;
948 btrfs_put_block_group(block_group);
949 }
950 spin_unlock(&trans->transaction->dirty_bgs_lock);
951 mutex_unlock(&trans->transaction->cache_write_mutex);
952
953 if (!IS_ERR(inode)) {
954 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
955 if (ret) {
956 btrfs_add_delayed_iput(inode);
957 goto out;
958 }
959 clear_nlink(inode);
960 /* One for the block groups ref */
961 spin_lock(&block_group->lock);
962 if (block_group->iref) {
963 block_group->iref = 0;
964 block_group->inode = NULL;
965 spin_unlock(&block_group->lock);
966 iput(inode);
967 } else {
968 spin_unlock(&block_group->lock);
969 }
970 /* One for our lookup ref */
971 btrfs_add_delayed_iput(inode);
972 }
973
974 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
e3e0520b 975 key.type = 0;
b3470b5d 976 key.offset = block_group->start;
e3e0520b
JB
977
978 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
979 if (ret < 0)
980 goto out;
981 if (ret > 0)
982 btrfs_release_path(path);
983 if (ret == 0) {
984 ret = btrfs_del_item(trans, tree_root, path);
985 if (ret)
986 goto out;
987 btrfs_release_path(path);
988 }
989
990 spin_lock(&fs_info->block_group_cache_lock);
991 rb_erase(&block_group->cache_node,
992 &fs_info->block_group_cache_tree);
993 RB_CLEAR_NODE(&block_group->cache_node);
994
b3470b5d 995 if (fs_info->first_logical_byte == block_group->start)
e3e0520b
JB
996 fs_info->first_logical_byte = (u64)-1;
997 spin_unlock(&fs_info->block_group_cache_lock);
998
999 down_write(&block_group->space_info->groups_sem);
1000 /*
1001 * we must use list_del_init so people can check to see if they
1002 * are still on the list after taking the semaphore
1003 */
1004 list_del_init(&block_group->list);
1005 if (list_empty(&block_group->space_info->block_groups[index])) {
1006 kobj = block_group->space_info->block_group_kobjs[index];
1007 block_group->space_info->block_group_kobjs[index] = NULL;
1008 clear_avail_alloc_bits(fs_info, block_group->flags);
1009 }
1010 up_write(&block_group->space_info->groups_sem);
1011 clear_incompat_bg_bits(fs_info, block_group->flags);
1012 if (kobj) {
1013 kobject_del(kobj);
1014 kobject_put(kobj);
1015 }
1016
1017 if (block_group->has_caching_ctl)
1018 caching_ctl = btrfs_get_caching_control(block_group);
1019 if (block_group->cached == BTRFS_CACHE_STARTED)
1020 btrfs_wait_block_group_cache_done(block_group);
1021 if (block_group->has_caching_ctl) {
1022 down_write(&fs_info->commit_root_sem);
1023 if (!caching_ctl) {
1024 struct btrfs_caching_control *ctl;
1025
1026 list_for_each_entry(ctl,
1027 &fs_info->caching_block_groups, list)
1028 if (ctl->block_group == block_group) {
1029 caching_ctl = ctl;
1030 refcount_inc(&caching_ctl->count);
1031 break;
1032 }
1033 }
1034 if (caching_ctl)
1035 list_del_init(&caching_ctl->list);
1036 up_write(&fs_info->commit_root_sem);
1037 if (caching_ctl) {
1038 /* Once for the caching bgs list and once for us. */
1039 btrfs_put_caching_control(caching_ctl);
1040 btrfs_put_caching_control(caching_ctl);
1041 }
1042 }
1043
1044 spin_lock(&trans->transaction->dirty_bgs_lock);
1045 WARN_ON(!list_empty(&block_group->dirty_list));
1046 WARN_ON(!list_empty(&block_group->io_list));
1047 spin_unlock(&trans->transaction->dirty_bgs_lock);
1048
1049 btrfs_remove_free_space_cache(block_group);
1050
1051 spin_lock(&block_group->space_info->lock);
1052 list_del_init(&block_group->ro_list);
1053
1054 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1055 WARN_ON(block_group->space_info->total_bytes
b3470b5d 1056 < block_group->length);
e3e0520b 1057 WARN_ON(block_group->space_info->bytes_readonly
b3470b5d 1058 < block_group->length);
e3e0520b 1059 WARN_ON(block_group->space_info->disk_total
b3470b5d 1060 < block_group->length * factor);
e3e0520b 1061 }
b3470b5d
DS
1062 block_group->space_info->total_bytes -= block_group->length;
1063 block_group->space_info->bytes_readonly -= block_group->length;
1064 block_group->space_info->disk_total -= block_group->length * factor;
e3e0520b
JB
1065
1066 spin_unlock(&block_group->space_info->lock);
1067
b3470b5d
DS
1068 key.objectid = block_group->start;
1069 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1070 key.offset = block_group->length;
e3e0520b
JB
1071
1072 mutex_lock(&fs_info->chunk_mutex);
1073 spin_lock(&block_group->lock);
1074 block_group->removed = 1;
1075 /*
1076 * At this point trimming can't start on this block group, because we
1077 * removed the block group from the tree fs_info->block_group_cache_tree
1078 * so no one can't find it anymore and even if someone already got this
1079 * block group before we removed it from the rbtree, they have already
1080 * incremented block_group->trimming - if they didn't, they won't find
1081 * any free space entries because we already removed them all when we
1082 * called btrfs_remove_free_space_cache().
1083 *
1084 * And we must not remove the extent map from the fs_info->mapping_tree
1085 * to prevent the same logical address range and physical device space
1086 * ranges from being reused for a new block group. This is because our
1087 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1088 * completely transactionless, so while it is trimming a range the
1089 * currently running transaction might finish and a new one start,
1090 * allowing for new block groups to be created that can reuse the same
1091 * physical device locations unless we take this special care.
1092 *
1093 * There may also be an implicit trim operation if the file system
1094 * is mounted with -odiscard. The same protections must remain
1095 * in place until the extents have been discarded completely when
1096 * the transaction commit has completed.
1097 */
1098 remove_em = (atomic_read(&block_group->trimming) == 0);
1099 spin_unlock(&block_group->lock);
1100
1101 mutex_unlock(&fs_info->chunk_mutex);
1102
1103 ret = remove_block_group_free_space(trans, block_group);
1104 if (ret)
1105 goto out;
1106
1107 btrfs_put_block_group(block_group);
1108 btrfs_put_block_group(block_group);
1109
1110 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1111 if (ret > 0)
1112 ret = -EIO;
1113 if (ret < 0)
1114 goto out;
1115
1116 ret = btrfs_del_item(trans, root, path);
1117 if (ret)
1118 goto out;
1119
1120 if (remove_em) {
1121 struct extent_map_tree *em_tree;
1122
1123 em_tree = &fs_info->mapping_tree;
1124 write_lock(&em_tree->lock);
1125 remove_extent_mapping(em_tree, em);
1126 write_unlock(&em_tree->lock);
1127 /* once for the tree */
1128 free_extent_map(em);
1129 }
1130out:
1131 if (remove_rsv)
1132 btrfs_delayed_refs_rsv_release(fs_info, 1);
1133 btrfs_free_path(path);
1134 return ret;
1135}
1136
1137struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1138 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1139{
1140 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1141 struct extent_map *em;
1142 struct map_lookup *map;
1143 unsigned int num_items;
1144
1145 read_lock(&em_tree->lock);
1146 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1147 read_unlock(&em_tree->lock);
1148 ASSERT(em && em->start == chunk_offset);
1149
1150 /*
1151 * We need to reserve 3 + N units from the metadata space info in order
1152 * to remove a block group (done at btrfs_remove_chunk() and at
1153 * btrfs_remove_block_group()), which are used for:
1154 *
1155 * 1 unit for adding the free space inode's orphan (located in the tree
1156 * of tree roots).
1157 * 1 unit for deleting the block group item (located in the extent
1158 * tree).
1159 * 1 unit for deleting the free space item (located in tree of tree
1160 * roots).
1161 * N units for deleting N device extent items corresponding to each
1162 * stripe (located in the device tree).
1163 *
1164 * In order to remove a block group we also need to reserve units in the
1165 * system space info in order to update the chunk tree (update one or
1166 * more device items and remove one chunk item), but this is done at
1167 * btrfs_remove_chunk() through a call to check_system_chunk().
1168 */
1169 map = em->map_lookup;
1170 num_items = 3 + map->num_stripes;
1171 free_extent_map(em);
1172
1173 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1174 num_items, 1);
1175}
1176
26ce2095
JB
1177/*
1178 * Mark block group @cache read-only, so later write won't happen to block
1179 * group @cache.
1180 *
1181 * If @force is not set, this function will only mark the block group readonly
1182 * if we have enough free space (1M) in other metadata/system block groups.
1183 * If @force is not set, this function will mark the block group readonly
1184 * without checking free space.
1185 *
1186 * NOTE: This function doesn't care if other block groups can contain all the
1187 * data in this block group. That check should be done by relocation routine,
1188 * not this function.
1189 */
32da5386 1190static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
26ce2095
JB
1191{
1192 struct btrfs_space_info *sinfo = cache->space_info;
1193 u64 num_bytes;
26ce2095
JB
1194 int ret = -ENOSPC;
1195
26ce2095
JB
1196 spin_lock(&sinfo->lock);
1197 spin_lock(&cache->lock);
1198
1199 if (cache->ro) {
1200 cache->ro++;
1201 ret = 0;
1202 goto out;
1203 }
1204
b3470b5d 1205 num_bytes = cache->length - cache->reserved - cache->pinned -
bf38be65 1206 cache->bytes_super - cache->used;
26ce2095
JB
1207
1208 /*
a30a3d20
JB
1209 * Data never overcommits, even in mixed mode, so do just the straight
1210 * check of left over space in how much we have allocated.
26ce2095 1211 */
a30a3d20
JB
1212 if (force) {
1213 ret = 0;
1214 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
1215 u64 sinfo_used = btrfs_space_info_used(sinfo, true);
1216
1217 /*
1218 * Here we make sure if we mark this bg RO, we still have enough
1219 * free space as buffer.
1220 */
1221 if (sinfo_used + num_bytes <= sinfo->total_bytes)
1222 ret = 0;
1223 } else {
1224 /*
1225 * We overcommit metadata, so we need to do the
1226 * btrfs_can_overcommit check here, and we need to pass in
1227 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1228 * leeway to allow us to mark this block group as read only.
1229 */
1230 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1231 BTRFS_RESERVE_NO_FLUSH))
1232 ret = 0;
1233 }
1234
1235 if (!ret) {
26ce2095
JB
1236 sinfo->bytes_readonly += num_bytes;
1237 cache->ro++;
1238 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
26ce2095
JB
1239 }
1240out:
1241 spin_unlock(&cache->lock);
1242 spin_unlock(&sinfo->lock);
1243 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1244 btrfs_info(cache->fs_info,
b3470b5d 1245 "unable to make block group %llu ro", cache->start);
26ce2095
JB
1246 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1247 }
1248 return ret;
1249}
1250
e3e0520b
JB
1251/*
1252 * Process the unused_bgs list and remove any that don't have any allocated
1253 * space inside of them.
1254 */
1255void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1256{
32da5386 1257 struct btrfs_block_group *block_group;
e3e0520b
JB
1258 struct btrfs_space_info *space_info;
1259 struct btrfs_trans_handle *trans;
6e80d4f8 1260 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
e3e0520b
JB
1261 int ret = 0;
1262
1263 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1264 return;
1265
1266 spin_lock(&fs_info->unused_bgs_lock);
1267 while (!list_empty(&fs_info->unused_bgs)) {
1268 u64 start, end;
1269 int trimming;
1270
1271 block_group = list_first_entry(&fs_info->unused_bgs,
32da5386 1272 struct btrfs_block_group,
e3e0520b
JB
1273 bg_list);
1274 list_del_init(&block_group->bg_list);
1275
1276 space_info = block_group->space_info;
1277
1278 if (ret || btrfs_mixed_space_info(space_info)) {
1279 btrfs_put_block_group(block_group);
1280 continue;
1281 }
1282 spin_unlock(&fs_info->unused_bgs_lock);
1283
b0643e59
DZ
1284 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1285
e3e0520b
JB
1286 mutex_lock(&fs_info->delete_unused_bgs_mutex);
1287
1288 /* Don't want to race with allocators so take the groups_sem */
1289 down_write(&space_info->groups_sem);
6e80d4f8
DZ
1290
1291 /*
1292 * Async discard moves the final block group discard to be prior
1293 * to the unused_bgs code path. Therefore, if it's not fully
1294 * trimmed, punt it back to the async discard lists.
1295 */
1296 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1297 !btrfs_is_free_space_trimmed(block_group)) {
1298 trace_btrfs_skip_unused_block_group(block_group);
1299 up_write(&space_info->groups_sem);
1300 /* Requeue if we failed because of async discard */
1301 btrfs_discard_queue_work(&fs_info->discard_ctl,
1302 block_group);
1303 goto next;
1304 }
1305
e3e0520b
JB
1306 spin_lock(&block_group->lock);
1307 if (block_group->reserved || block_group->pinned ||
bf38be65 1308 block_group->used || block_group->ro ||
e3e0520b
JB
1309 list_is_singular(&block_group->list)) {
1310 /*
1311 * We want to bail if we made new allocations or have
1312 * outstanding allocations in this block group. We do
1313 * the ro check in case balance is currently acting on
1314 * this block group.
1315 */
1316 trace_btrfs_skip_unused_block_group(block_group);
1317 spin_unlock(&block_group->lock);
1318 up_write(&space_info->groups_sem);
1319 goto next;
1320 }
1321 spin_unlock(&block_group->lock);
1322
1323 /* We don't want to force the issue, only flip if it's ok. */
e11c0406 1324 ret = inc_block_group_ro(block_group, 0);
e3e0520b
JB
1325 up_write(&space_info->groups_sem);
1326 if (ret < 0) {
1327 ret = 0;
1328 goto next;
1329 }
1330
1331 /*
1332 * Want to do this before we do anything else so we can recover
1333 * properly if we fail to join the transaction.
1334 */
1335 trans = btrfs_start_trans_remove_block_group(fs_info,
b3470b5d 1336 block_group->start);
e3e0520b
JB
1337 if (IS_ERR(trans)) {
1338 btrfs_dec_block_group_ro(block_group);
1339 ret = PTR_ERR(trans);
1340 goto next;
1341 }
1342
1343 /*
1344 * We could have pending pinned extents for this block group,
1345 * just delete them, we don't care about them anymore.
1346 */
b3470b5d
DS
1347 start = block_group->start;
1348 end = start + block_group->length - 1;
e3e0520b
JB
1349 /*
1350 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1351 * btrfs_finish_extent_commit(). If we are at transaction N,
1352 * another task might be running finish_extent_commit() for the
1353 * previous transaction N - 1, and have seen a range belonging
1354 * to the block group in freed_extents[] before we were able to
1355 * clear the whole block group range from freed_extents[]. This
1356 * means that task can lookup for the block group after we
1357 * unpinned it from freed_extents[] and removed it, leading to
1358 * a BUG_ON() at btrfs_unpin_extent_range().
1359 */
1360 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1361 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1362 EXTENT_DIRTY);
1363 if (ret) {
1364 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1365 btrfs_dec_block_group_ro(block_group);
1366 goto end_trans;
1367 }
1368 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1369 EXTENT_DIRTY);
1370 if (ret) {
1371 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1372 btrfs_dec_block_group_ro(block_group);
1373 goto end_trans;
1374 }
1375 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1376
b0643e59
DZ
1377 /*
1378 * At this point, the block_group is read only and should fail
1379 * new allocations. However, btrfs_finish_extent_commit() can
1380 * cause this block_group to be placed back on the discard
1381 * lists because now the block_group isn't fully discarded.
1382 * Bail here and try again later after discarding everything.
1383 */
1384 spin_lock(&fs_info->discard_ctl.lock);
1385 if (!list_empty(&block_group->discard_list)) {
1386 spin_unlock(&fs_info->discard_ctl.lock);
1387 btrfs_dec_block_group_ro(block_group);
1388 btrfs_discard_queue_work(&fs_info->discard_ctl,
1389 block_group);
1390 goto end_trans;
1391 }
1392 spin_unlock(&fs_info->discard_ctl.lock);
1393
e3e0520b
JB
1394 /* Reset pinned so btrfs_put_block_group doesn't complain */
1395 spin_lock(&space_info->lock);
1396 spin_lock(&block_group->lock);
1397
1398 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1399 -block_group->pinned);
1400 space_info->bytes_readonly += block_group->pinned;
1401 percpu_counter_add_batch(&space_info->total_bytes_pinned,
1402 -block_group->pinned,
1403 BTRFS_TOTAL_BYTES_PINNED_BATCH);
1404 block_group->pinned = 0;
1405
1406 spin_unlock(&block_group->lock);
1407 spin_unlock(&space_info->lock);
1408
6e80d4f8
DZ
1409 /*
1410 * The normal path here is an unused block group is passed here,
1411 * then trimming is handled in the transaction commit path.
1412 * Async discard interposes before this to do the trimming
1413 * before coming down the unused block group path as trimming
1414 * will no longer be done later in the transaction commit path.
1415 */
1416 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1417 goto flip_async;
1418
e3e0520b 1419 /* DISCARD can flip during remount */
46b27f50 1420 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC);
e3e0520b
JB
1421
1422 /* Implicit trim during transaction commit. */
1423 if (trimming)
1424 btrfs_get_block_group_trimming(block_group);
1425
1426 /*
1427 * Btrfs_remove_chunk will abort the transaction if things go
1428 * horribly wrong.
1429 */
b3470b5d 1430 ret = btrfs_remove_chunk(trans, block_group->start);
e3e0520b
JB
1431
1432 if (ret) {
1433 if (trimming)
1434 btrfs_put_block_group_trimming(block_group);
1435 goto end_trans;
1436 }
1437
1438 /*
1439 * If we're not mounted with -odiscard, we can just forget
1440 * about this block group. Otherwise we'll need to wait
1441 * until transaction commit to do the actual discard.
1442 */
1443 if (trimming) {
1444 spin_lock(&fs_info->unused_bgs_lock);
1445 /*
1446 * A concurrent scrub might have added us to the list
1447 * fs_info->unused_bgs, so use a list_move operation
1448 * to add the block group to the deleted_bgs list.
1449 */
1450 list_move(&block_group->bg_list,
1451 &trans->transaction->deleted_bgs);
1452 spin_unlock(&fs_info->unused_bgs_lock);
1453 btrfs_get_block_group(block_group);
1454 }
1455end_trans:
1456 btrfs_end_transaction(trans);
1457next:
1458 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1459 btrfs_put_block_group(block_group);
1460 spin_lock(&fs_info->unused_bgs_lock);
1461 }
1462 spin_unlock(&fs_info->unused_bgs_lock);
6e80d4f8
DZ
1463 return;
1464
1465flip_async:
1466 btrfs_end_transaction(trans);
1467 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1468 btrfs_put_block_group(block_group);
1469 btrfs_discard_punt_unused_bgs_list(fs_info);
e3e0520b
JB
1470}
1471
32da5386 1472void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
e3e0520b
JB
1473{
1474 struct btrfs_fs_info *fs_info = bg->fs_info;
1475
1476 spin_lock(&fs_info->unused_bgs_lock);
1477 if (list_empty(&bg->bg_list)) {
1478 btrfs_get_block_group(bg);
1479 trace_btrfs_add_unused_block_group(bg);
1480 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1481 }
1482 spin_unlock(&fs_info->unused_bgs_lock);
1483}
4358d963
JB
1484
1485static int find_first_block_group(struct btrfs_fs_info *fs_info,
1486 struct btrfs_path *path,
1487 struct btrfs_key *key)
1488{
1489 struct btrfs_root *root = fs_info->extent_root;
1490 int ret = 0;
1491 struct btrfs_key found_key;
1492 struct extent_buffer *leaf;
1493 struct btrfs_block_group_item bg;
1494 u64 flags;
1495 int slot;
1496
1497 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1498 if (ret < 0)
1499 goto out;
1500
1501 while (1) {
1502 slot = path->slots[0];
1503 leaf = path->nodes[0];
1504 if (slot >= btrfs_header_nritems(leaf)) {
1505 ret = btrfs_next_leaf(root, path);
1506 if (ret == 0)
1507 continue;
1508 if (ret < 0)
1509 goto out;
1510 break;
1511 }
1512 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1513
1514 if (found_key.objectid >= key->objectid &&
1515 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1516 struct extent_map_tree *em_tree;
1517 struct extent_map *em;
1518
1519 em_tree = &root->fs_info->mapping_tree;
1520 read_lock(&em_tree->lock);
1521 em = lookup_extent_mapping(em_tree, found_key.objectid,
1522 found_key.offset);
1523 read_unlock(&em_tree->lock);
1524 if (!em) {
1525 btrfs_err(fs_info,
1526 "logical %llu len %llu found bg but no related chunk",
1527 found_key.objectid, found_key.offset);
1528 ret = -ENOENT;
1529 } else if (em->start != found_key.objectid ||
1530 em->len != found_key.offset) {
1531 btrfs_err(fs_info,
1532 "block group %llu len %llu mismatch with chunk %llu len %llu",
1533 found_key.objectid, found_key.offset,
1534 em->start, em->len);
1535 ret = -EUCLEAN;
1536 } else {
1537 read_extent_buffer(leaf, &bg,
1538 btrfs_item_ptr_offset(leaf, slot),
1539 sizeof(bg));
de0dc456 1540 flags = btrfs_stack_block_group_flags(&bg) &
4358d963
JB
1541 BTRFS_BLOCK_GROUP_TYPE_MASK;
1542
1543 if (flags != (em->map_lookup->type &
1544 BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1545 btrfs_err(fs_info,
1546"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1547 found_key.objectid,
1548 found_key.offset, flags,
1549 (BTRFS_BLOCK_GROUP_TYPE_MASK &
1550 em->map_lookup->type));
1551 ret = -EUCLEAN;
1552 } else {
1553 ret = 0;
1554 }
1555 }
1556 free_extent_map(em);
1557 goto out;
1558 }
1559 path->slots[0]++;
1560 }
1561out:
1562 return ret;
1563}
1564
1565static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1566{
1567 u64 extra_flags = chunk_to_extended(flags) &
1568 BTRFS_EXTENDED_PROFILE_MASK;
1569
1570 write_seqlock(&fs_info->profiles_lock);
1571 if (flags & BTRFS_BLOCK_GROUP_DATA)
1572 fs_info->avail_data_alloc_bits |= extra_flags;
1573 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1574 fs_info->avail_metadata_alloc_bits |= extra_flags;
1575 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1576 fs_info->avail_system_alloc_bits |= extra_flags;
1577 write_sequnlock(&fs_info->profiles_lock);
1578}
1579
96a14336
NB
1580/**
1581 * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
1582 * @chunk_start: logical address of block group
1583 * @physical: physical address to map to logical addresses
1584 * @logical: return array of logical addresses which map to @physical
1585 * @naddrs: length of @logical
1586 * @stripe_len: size of IO stripe for the given block group
1587 *
1588 * Maps a particular @physical disk address to a list of @logical addresses.
1589 * Used primarily to exclude those portions of a block group that contain super
1590 * block copies.
1591 */
1592EXPORT_FOR_TESTS
1593int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
1594 u64 physical, u64 **logical, int *naddrs, int *stripe_len)
1595{
1596 struct extent_map *em;
1597 struct map_lookup *map;
1598 u64 *buf;
1599 u64 bytenr;
1776ad17
NB
1600 u64 data_stripe_length;
1601 u64 io_stripe_size;
1602 int i, nr = 0;
1603 int ret = 0;
96a14336
NB
1604
1605 em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
1606 if (IS_ERR(em))
1607 return -EIO;
1608
1609 map = em->map_lookup;
1776ad17
NB
1610 data_stripe_length = em->len;
1611 io_stripe_size = map->stripe_len;
96a14336
NB
1612
1613 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1776ad17
NB
1614 data_stripe_length = div_u64(data_stripe_length,
1615 map->num_stripes / map->sub_stripes);
96a14336 1616 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1776ad17 1617 data_stripe_length = div_u64(data_stripe_length, map->num_stripes);
96a14336 1618 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1776ad17
NB
1619 data_stripe_length = div_u64(data_stripe_length,
1620 nr_data_stripes(map));
1621 io_stripe_size = map->stripe_len * nr_data_stripes(map);
96a14336
NB
1622 }
1623
1624 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
1776ad17
NB
1625 if (!buf) {
1626 ret = -ENOMEM;
1627 goto out;
1628 }
96a14336
NB
1629
1630 for (i = 0; i < map->num_stripes; i++) {
1776ad17
NB
1631 bool already_inserted = false;
1632 u64 stripe_nr;
1633 int j;
1634
1635 if (!in_range(physical, map->stripes[i].physical,
1636 data_stripe_length))
96a14336
NB
1637 continue;
1638
1639 stripe_nr = physical - map->stripes[i].physical;
1640 stripe_nr = div64_u64(stripe_nr, map->stripe_len);
1641
1642 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1643 stripe_nr = stripe_nr * map->num_stripes + i;
1644 stripe_nr = div_u64(stripe_nr, map->sub_stripes);
1645 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1646 stripe_nr = stripe_nr * map->num_stripes + i;
1647 }
1648 /*
1649 * The remaining case would be for RAID56, multiply by
1650 * nr_data_stripes(). Alternatively, just use rmap_len below
1651 * instead of map->stripe_len
1652 */
1653
1776ad17
NB
1654 bytenr = chunk_start + stripe_nr * io_stripe_size;
1655
1656 /* Ensure we don't add duplicate addresses */
96a14336 1657 for (j = 0; j < nr; j++) {
1776ad17
NB
1658 if (buf[j] == bytenr) {
1659 already_inserted = true;
96a14336 1660 break;
1776ad17 1661 }
96a14336 1662 }
1776ad17
NB
1663
1664 if (!already_inserted)
96a14336 1665 buf[nr++] = bytenr;
96a14336
NB
1666 }
1667
1668 *logical = buf;
1669 *naddrs = nr;
1776ad17
NB
1670 *stripe_len = io_stripe_size;
1671out:
96a14336 1672 free_extent_map(em);
1776ad17 1673 return ret;
96a14336
NB
1674}
1675
32da5386 1676static int exclude_super_stripes(struct btrfs_block_group *cache)
4358d963
JB
1677{
1678 struct btrfs_fs_info *fs_info = cache->fs_info;
1679 u64 bytenr;
1680 u64 *logical;
1681 int stripe_len;
1682 int i, nr, ret;
1683
b3470b5d
DS
1684 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1685 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
4358d963 1686 cache->bytes_super += stripe_len;
b3470b5d 1687 ret = btrfs_add_excluded_extent(fs_info, cache->start,
4358d963
JB
1688 stripe_len);
1689 if (ret)
1690 return ret;
1691 }
1692
1693 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1694 bytenr = btrfs_sb_offset(i);
b3470b5d 1695 ret = btrfs_rmap_block(fs_info, cache->start,
4358d963
JB
1696 bytenr, &logical, &nr, &stripe_len);
1697 if (ret)
1698 return ret;
1699
1700 while (nr--) {
1701 u64 start, len;
1702
b3470b5d 1703 if (logical[nr] > cache->start + cache->length)
4358d963
JB
1704 continue;
1705
b3470b5d 1706 if (logical[nr] + stripe_len <= cache->start)
4358d963
JB
1707 continue;
1708
1709 start = logical[nr];
b3470b5d
DS
1710 if (start < cache->start) {
1711 start = cache->start;
4358d963
JB
1712 len = (logical[nr] + stripe_len) - start;
1713 } else {
1714 len = min_t(u64, stripe_len,
b3470b5d 1715 cache->start + cache->length - start);
4358d963
JB
1716 }
1717
1718 cache->bytes_super += len;
1719 ret = btrfs_add_excluded_extent(fs_info, start, len);
1720 if (ret) {
1721 kfree(logical);
1722 return ret;
1723 }
1724 }
1725
1726 kfree(logical);
1727 }
1728 return 0;
1729}
1730
32da5386 1731static void link_block_group(struct btrfs_block_group *cache)
4358d963
JB
1732{
1733 struct btrfs_space_info *space_info = cache->space_info;
1734 int index = btrfs_bg_flags_to_raid_index(cache->flags);
1735 bool first = false;
1736
1737 down_write(&space_info->groups_sem);
1738 if (list_empty(&space_info->block_groups[index]))
1739 first = true;
1740 list_add_tail(&cache->list, &space_info->block_groups[index]);
1741 up_write(&space_info->groups_sem);
1742
1743 if (first)
1744 btrfs_sysfs_add_block_group_type(cache);
1745}
1746
32da5386 1747static struct btrfs_block_group *btrfs_create_block_group_cache(
4358d963
JB
1748 struct btrfs_fs_info *fs_info, u64 start, u64 size)
1749{
32da5386 1750 struct btrfs_block_group *cache;
4358d963
JB
1751
1752 cache = kzalloc(sizeof(*cache), GFP_NOFS);
1753 if (!cache)
1754 return NULL;
1755
1756 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1757 GFP_NOFS);
1758 if (!cache->free_space_ctl) {
1759 kfree(cache);
1760 return NULL;
1761 }
1762
b3470b5d
DS
1763 cache->start = start;
1764 cache->length = size;
4358d963
JB
1765
1766 cache->fs_info = fs_info;
1767 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1768 set_free_space_tree_thresholds(cache);
1769
6e80d4f8
DZ
1770 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
1771
4358d963
JB
1772 atomic_set(&cache->count, 1);
1773 spin_lock_init(&cache->lock);
1774 init_rwsem(&cache->data_rwsem);
1775 INIT_LIST_HEAD(&cache->list);
1776 INIT_LIST_HEAD(&cache->cluster_list);
1777 INIT_LIST_HEAD(&cache->bg_list);
1778 INIT_LIST_HEAD(&cache->ro_list);
b0643e59 1779 INIT_LIST_HEAD(&cache->discard_list);
4358d963
JB
1780 INIT_LIST_HEAD(&cache->dirty_list);
1781 INIT_LIST_HEAD(&cache->io_list);
1782 btrfs_init_free_space_ctl(cache);
1783 atomic_set(&cache->trimming, 0);
1784 mutex_init(&cache->free_space_lock);
1785 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1786
1787 return cache;
1788}
1789
1790/*
1791 * Iterate all chunks and verify that each of them has the corresponding block
1792 * group
1793 */
1794static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1795{
1796 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1797 struct extent_map *em;
32da5386 1798 struct btrfs_block_group *bg;
4358d963
JB
1799 u64 start = 0;
1800 int ret = 0;
1801
1802 while (1) {
1803 read_lock(&map_tree->lock);
1804 /*
1805 * lookup_extent_mapping will return the first extent map
1806 * intersecting the range, so setting @len to 1 is enough to
1807 * get the first chunk.
1808 */
1809 em = lookup_extent_mapping(map_tree, start, 1);
1810 read_unlock(&map_tree->lock);
1811 if (!em)
1812 break;
1813
1814 bg = btrfs_lookup_block_group(fs_info, em->start);
1815 if (!bg) {
1816 btrfs_err(fs_info,
1817 "chunk start=%llu len=%llu doesn't have corresponding block group",
1818 em->start, em->len);
1819 ret = -EUCLEAN;
1820 free_extent_map(em);
1821 break;
1822 }
b3470b5d 1823 if (bg->start != em->start || bg->length != em->len ||
4358d963
JB
1824 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1825 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1826 btrfs_err(fs_info,
1827"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1828 em->start, em->len,
1829 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
b3470b5d 1830 bg->start, bg->length,
4358d963
JB
1831 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1832 ret = -EUCLEAN;
1833 free_extent_map(em);
1834 btrfs_put_block_group(bg);
1835 break;
1836 }
1837 start = em->start + em->len;
1838 free_extent_map(em);
1839 btrfs_put_block_group(bg);
1840 }
1841 return ret;
1842}
1843
ffb9e0f0
QW
1844static int read_one_block_group(struct btrfs_fs_info *info,
1845 struct btrfs_path *path,
d49a2ddb 1846 const struct btrfs_key *key,
ffb9e0f0
QW
1847 int need_clear)
1848{
1849 struct extent_buffer *leaf = path->nodes[0];
32da5386 1850 struct btrfs_block_group *cache;
ffb9e0f0 1851 struct btrfs_space_info *space_info;
ffb9e0f0
QW
1852 struct btrfs_block_group_item bgi;
1853 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
1854 int slot = path->slots[0];
1855 int ret;
1856
d49a2ddb 1857 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
ffb9e0f0 1858
d49a2ddb 1859 cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
ffb9e0f0
QW
1860 if (!cache)
1861 return -ENOMEM;
1862
1863 if (need_clear) {
1864 /*
1865 * When we mount with old space cache, we need to
1866 * set BTRFS_DC_CLEAR and set dirty flag.
1867 *
1868 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1869 * truncate the old free space cache inode and
1870 * setup a new one.
1871 * b) Setting 'dirty flag' makes sure that we flush
1872 * the new space cache info onto disk.
1873 */
1874 if (btrfs_test_opt(info, SPACE_CACHE))
1875 cache->disk_cache_state = BTRFS_DC_CLEAR;
1876 }
1877 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
1878 sizeof(bgi));
1879 cache->used = btrfs_stack_block_group_used(&bgi);
1880 cache->flags = btrfs_stack_block_group_flags(&bgi);
1881 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1882 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1883 btrfs_err(info,
1884"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1885 cache->start);
1886 ret = -EINVAL;
1887 goto error;
1888 }
1889
1890 /*
1891 * We need to exclude the super stripes now so that the space info has
1892 * super bytes accounted for, otherwise we'll think we have more space
1893 * than we actually do.
1894 */
1895 ret = exclude_super_stripes(cache);
1896 if (ret) {
1897 /* We may have excluded something, so call this just in case. */
1898 btrfs_free_excluded_extents(cache);
1899 goto error;
1900 }
1901
1902 /*
1903 * Check for two cases, either we are full, and therefore don't need
1904 * to bother with the caching work since we won't find any space, or we
1905 * are empty, and we can just add all the space in and be done with it.
1906 * This saves us _a_lot_ of time, particularly in the full case.
1907 */
d49a2ddb 1908 if (key->offset == cache->used) {
ffb9e0f0
QW
1909 cache->last_byte_to_unpin = (u64)-1;
1910 cache->cached = BTRFS_CACHE_FINISHED;
1911 btrfs_free_excluded_extents(cache);
1912 } else if (cache->used == 0) {
1913 cache->last_byte_to_unpin = (u64)-1;
1914 cache->cached = BTRFS_CACHE_FINISHED;
d49a2ddb
QW
1915 add_new_free_space(cache, key->objectid,
1916 key->objectid + key->offset);
ffb9e0f0
QW
1917 btrfs_free_excluded_extents(cache);
1918 }
1919
1920 ret = btrfs_add_block_group_cache(info, cache);
1921 if (ret) {
1922 btrfs_remove_free_space_cache(cache);
1923 goto error;
1924 }
1925 trace_btrfs_add_block_group(info, cache, 0);
d49a2ddb 1926 btrfs_update_space_info(info, cache->flags, key->offset,
ffb9e0f0
QW
1927 cache->used, cache->bytes_super, &space_info);
1928
1929 cache->space_info = space_info;
1930
1931 link_block_group(cache);
1932
1933 set_avail_alloc_bits(info, cache->flags);
1934 if (btrfs_chunk_readonly(info, cache->start)) {
1935 inc_block_group_ro(cache, 1);
1936 } else if (cache->used == 0) {
1937 ASSERT(list_empty(&cache->bg_list));
6e80d4f8
DZ
1938 if (btrfs_test_opt(info, DISCARD_ASYNC))
1939 btrfs_discard_queue_work(&info->discard_ctl, cache);
1940 else
1941 btrfs_mark_bg_unused(cache);
ffb9e0f0
QW
1942 }
1943 return 0;
1944error:
1945 btrfs_put_block_group(cache);
1946 return ret;
1947}
1948
4358d963
JB
1949int btrfs_read_block_groups(struct btrfs_fs_info *info)
1950{
1951 struct btrfs_path *path;
1952 int ret;
32da5386 1953 struct btrfs_block_group *cache;
4358d963
JB
1954 struct btrfs_space_info *space_info;
1955 struct btrfs_key key;
4358d963
JB
1956 int need_clear = 0;
1957 u64 cache_gen;
4358d963
JB
1958
1959 key.objectid = 0;
1960 key.offset = 0;
1961 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1962 path = btrfs_alloc_path();
1963 if (!path)
1964 return -ENOMEM;
1965 path->reada = READA_FORWARD;
1966
1967 cache_gen = btrfs_super_cache_generation(info->super_copy);
1968 if (btrfs_test_opt(info, SPACE_CACHE) &&
1969 btrfs_super_generation(info->super_copy) != cache_gen)
1970 need_clear = 1;
1971 if (btrfs_test_opt(info, CLEAR_CACHE))
1972 need_clear = 1;
1973
1974 while (1) {
1975 ret = find_first_block_group(info, path, &key);
1976 if (ret > 0)
1977 break;
1978 if (ret != 0)
1979 goto error;
1980
ffb9e0f0 1981 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
d49a2ddb 1982 ret = read_one_block_group(info, path, &key, need_clear);
ffb9e0f0 1983 if (ret < 0)
4358d963 1984 goto error;
ffb9e0f0
QW
1985 key.objectid += key.offset;
1986 key.offset = 0;
4358d963 1987 btrfs_release_path(path);
4358d963
JB
1988 }
1989
1990 list_for_each_entry_rcu(space_info, &info->space_info, list) {
1991 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1992 (BTRFS_BLOCK_GROUP_RAID10 |
1993 BTRFS_BLOCK_GROUP_RAID1_MASK |
1994 BTRFS_BLOCK_GROUP_RAID56_MASK |
1995 BTRFS_BLOCK_GROUP_DUP)))
1996 continue;
1997 /*
1998 * Avoid allocating from un-mirrored block group if there are
1999 * mirrored block groups.
2000 */
2001 list_for_each_entry(cache,
2002 &space_info->block_groups[BTRFS_RAID_RAID0],
2003 list)
e11c0406 2004 inc_block_group_ro(cache, 1);
4358d963
JB
2005 list_for_each_entry(cache,
2006 &space_info->block_groups[BTRFS_RAID_SINGLE],
2007 list)
e11c0406 2008 inc_block_group_ro(cache, 1);
4358d963
JB
2009 }
2010
2011 btrfs_init_global_block_rsv(info);
2012 ret = check_chunk_block_group_mappings(info);
2013error:
2014 btrfs_free_path(path);
2015 return ret;
2016}
2017
2018void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2019{
2020 struct btrfs_fs_info *fs_info = trans->fs_info;
32da5386 2021 struct btrfs_block_group *block_group;
4358d963
JB
2022 struct btrfs_root *extent_root = fs_info->extent_root;
2023 struct btrfs_block_group_item item;
2024 struct btrfs_key key;
2025 int ret = 0;
2026
2027 if (!trans->can_flush_pending_bgs)
2028 return;
2029
2030 while (!list_empty(&trans->new_bgs)) {
2031 block_group = list_first_entry(&trans->new_bgs,
32da5386 2032 struct btrfs_block_group,
4358d963
JB
2033 bg_list);
2034 if (ret)
2035 goto next;
2036
2037 spin_lock(&block_group->lock);
de0dc456
DS
2038 btrfs_set_stack_block_group_used(&item, block_group->used);
2039 btrfs_set_stack_block_group_chunk_objectid(&item,
3d976388 2040 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
de0dc456 2041 btrfs_set_stack_block_group_flags(&item, block_group->flags);
b3470b5d
DS
2042 key.objectid = block_group->start;
2043 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2044 key.offset = block_group->length;
4358d963
JB
2045 spin_unlock(&block_group->lock);
2046
2047 ret = btrfs_insert_item(trans, extent_root, &key, &item,
2048 sizeof(item));
2049 if (ret)
2050 btrfs_abort_transaction(trans, ret);
2051 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
2052 if (ret)
2053 btrfs_abort_transaction(trans, ret);
2054 add_block_group_free_space(trans, block_group);
2055 /* Already aborted the transaction if it failed. */
2056next:
2057 btrfs_delayed_refs_rsv_release(fs_info, 1);
2058 list_del_init(&block_group->bg_list);
2059 }
2060 btrfs_trans_release_chunk_metadata(trans);
2061}
2062
2063int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
2064 u64 type, u64 chunk_offset, u64 size)
2065{
2066 struct btrfs_fs_info *fs_info = trans->fs_info;
32da5386 2067 struct btrfs_block_group *cache;
4358d963
JB
2068 int ret;
2069
2070 btrfs_set_log_full_commit(trans);
2071
2072 cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
2073 if (!cache)
2074 return -ENOMEM;
2075
bf38be65 2076 cache->used = bytes_used;
4358d963
JB
2077 cache->flags = type;
2078 cache->last_byte_to_unpin = (u64)-1;
2079 cache->cached = BTRFS_CACHE_FINISHED;
2080 cache->needs_free_space = 1;
2081 ret = exclude_super_stripes(cache);
2082 if (ret) {
2083 /* We may have excluded something, so call this just in case */
2084 btrfs_free_excluded_extents(cache);
2085 btrfs_put_block_group(cache);
2086 return ret;
2087 }
2088
2089 add_new_free_space(cache, chunk_offset, chunk_offset + size);
2090
2091 btrfs_free_excluded_extents(cache);
2092
2093#ifdef CONFIG_BTRFS_DEBUG
2094 if (btrfs_should_fragment_free_space(cache)) {
2095 u64 new_bytes_used = size - bytes_used;
2096
2097 bytes_used += new_bytes_used >> 1;
e11c0406 2098 fragment_free_space(cache);
4358d963
JB
2099 }
2100#endif
2101 /*
2102 * Ensure the corresponding space_info object is created and
2103 * assigned to our block group. We want our bg to be added to the rbtree
2104 * with its ->space_info set.
2105 */
2106 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2107 ASSERT(cache->space_info);
2108
2109 ret = btrfs_add_block_group_cache(fs_info, cache);
2110 if (ret) {
2111 btrfs_remove_free_space_cache(cache);
2112 btrfs_put_block_group(cache);
2113 return ret;
2114 }
2115
2116 /*
2117 * Now that our block group has its ->space_info set and is inserted in
2118 * the rbtree, update the space info's counters.
2119 */
2120 trace_btrfs_add_block_group(fs_info, cache, 1);
2121 btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
2122 cache->bytes_super, &cache->space_info);
2123 btrfs_update_global_block_rsv(fs_info);
2124
2125 link_block_group(cache);
2126
2127 list_add_tail(&cache->bg_list, &trans->new_bgs);
2128 trans->delayed_ref_updates++;
2129 btrfs_update_delayed_refs_rsv(trans);
2130
2131 set_avail_alloc_bits(fs_info, type);
2132 return 0;
2133}
26ce2095
JB
2134
2135static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
2136{
2137 u64 num_devices;
2138 u64 stripped;
2139
2140 /*
2141 * if restripe for this chunk_type is on pick target profile and
2142 * return, otherwise do the usual balance
2143 */
e11c0406 2144 stripped = get_restripe_target(fs_info, flags);
26ce2095
JB
2145 if (stripped)
2146 return extended_to_chunk(stripped);
2147
2148 num_devices = fs_info->fs_devices->rw_devices;
2149
2150 stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
2151 BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
2152
2153 if (num_devices == 1) {
2154 stripped |= BTRFS_BLOCK_GROUP_DUP;
2155 stripped = flags & ~stripped;
2156
2157 /* turn raid0 into single device chunks */
2158 if (flags & BTRFS_BLOCK_GROUP_RAID0)
2159 return stripped;
2160
2161 /* turn mirroring into duplication */
2162 if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2163 BTRFS_BLOCK_GROUP_RAID10))
2164 return stripped | BTRFS_BLOCK_GROUP_DUP;
2165 } else {
2166 /* they already had raid on here, just return */
2167 if (flags & stripped)
2168 return flags;
2169
2170 stripped |= BTRFS_BLOCK_GROUP_DUP;
2171 stripped = flags & ~stripped;
2172
2173 /* switch duplicated blocks with raid1 */
2174 if (flags & BTRFS_BLOCK_GROUP_DUP)
2175 return stripped | BTRFS_BLOCK_GROUP_RAID1;
2176
2177 /* this is drive concat, leave it alone */
2178 }
2179
2180 return flags;
2181}
2182
b12de528
QW
2183/*
2184 * Mark one block group RO, can be called several times for the same block
2185 * group.
2186 *
2187 * @cache: the destination block group
2188 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2189 * ensure we still have some free space after marking this
2190 * block group RO.
2191 */
2192int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2193 bool do_chunk_alloc)
26ce2095
JB
2194{
2195 struct btrfs_fs_info *fs_info = cache->fs_info;
2196 struct btrfs_trans_handle *trans;
2197 u64 alloc_flags;
2198 int ret;
2199
2200again:
2201 trans = btrfs_join_transaction(fs_info->extent_root);
2202 if (IS_ERR(trans))
2203 return PTR_ERR(trans);
2204
2205 /*
2206 * we're not allowed to set block groups readonly after the dirty
2207 * block groups cache has started writing. If it already started,
2208 * back off and let this transaction commit
2209 */
2210 mutex_lock(&fs_info->ro_block_group_mutex);
2211 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2212 u64 transid = trans->transid;
2213
2214 mutex_unlock(&fs_info->ro_block_group_mutex);
2215 btrfs_end_transaction(trans);
2216
2217 ret = btrfs_wait_for_commit(fs_info, transid);
2218 if (ret)
2219 return ret;
2220 goto again;
2221 }
2222
b12de528 2223 if (do_chunk_alloc) {
26ce2095 2224 /*
b12de528
QW
2225 * If we are changing raid levels, try to allocate a
2226 * corresponding block group with the new raid level.
26ce2095 2227 */
b12de528
QW
2228 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2229 if (alloc_flags != cache->flags) {
2230 ret = btrfs_chunk_alloc(trans, alloc_flags,
2231 CHUNK_ALLOC_FORCE);
2232 /*
2233 * ENOSPC is allowed here, we may have enough space
2234 * already allocated at the new raid level to carry on
2235 */
2236 if (ret == -ENOSPC)
2237 ret = 0;
2238 if (ret < 0)
2239 goto out;
2240 }
26ce2095
JB
2241 }
2242
a7a63acc 2243 ret = inc_block_group_ro(cache, 0);
b12de528
QW
2244 if (!do_chunk_alloc)
2245 goto unlock_out;
26ce2095
JB
2246 if (!ret)
2247 goto out;
2248 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2249 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2250 if (ret < 0)
2251 goto out;
e11c0406 2252 ret = inc_block_group_ro(cache, 0);
26ce2095
JB
2253out:
2254 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2255 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2256 mutex_lock(&fs_info->chunk_mutex);
2257 check_system_chunk(trans, alloc_flags);
2258 mutex_unlock(&fs_info->chunk_mutex);
2259 }
b12de528 2260unlock_out:
26ce2095
JB
2261 mutex_unlock(&fs_info->ro_block_group_mutex);
2262
2263 btrfs_end_transaction(trans);
2264 return ret;
2265}
2266
32da5386 2267void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
26ce2095
JB
2268{
2269 struct btrfs_space_info *sinfo = cache->space_info;
2270 u64 num_bytes;
2271
2272 BUG_ON(!cache->ro);
2273
2274 spin_lock(&sinfo->lock);
2275 spin_lock(&cache->lock);
2276 if (!--cache->ro) {
b3470b5d 2277 num_bytes = cache->length - cache->reserved -
bf38be65 2278 cache->pinned - cache->bytes_super - cache->used;
26ce2095
JB
2279 sinfo->bytes_readonly -= num_bytes;
2280 list_del_init(&cache->ro_list);
2281 }
2282 spin_unlock(&cache->lock);
2283 spin_unlock(&sinfo->lock);
2284}
77745c05
JB
2285
2286static int write_one_cache_group(struct btrfs_trans_handle *trans,
2287 struct btrfs_path *path,
32da5386 2288 struct btrfs_block_group *cache)
77745c05
JB
2289{
2290 struct btrfs_fs_info *fs_info = trans->fs_info;
2291 int ret;
2292 struct btrfs_root *extent_root = fs_info->extent_root;
2293 unsigned long bi;
2294 struct extent_buffer *leaf;
bf38be65 2295 struct btrfs_block_group_item bgi;
b3470b5d
DS
2296 struct btrfs_key key;
2297
2298 key.objectid = cache->start;
2299 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2300 key.offset = cache->length;
77745c05 2301
b3470b5d 2302 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
77745c05
JB
2303 if (ret) {
2304 if (ret > 0)
2305 ret = -ENOENT;
2306 goto fail;
2307 }
2308
2309 leaf = path->nodes[0];
2310 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
de0dc456
DS
2311 btrfs_set_stack_block_group_used(&bgi, cache->used);
2312 btrfs_set_stack_block_group_chunk_objectid(&bgi,
3d976388 2313 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
de0dc456 2314 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
bf38be65 2315 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
77745c05
JB
2316 btrfs_mark_buffer_dirty(leaf);
2317fail:
2318 btrfs_release_path(path);
2319 return ret;
2320
2321}
2322
32da5386 2323static int cache_save_setup(struct btrfs_block_group *block_group,
77745c05
JB
2324 struct btrfs_trans_handle *trans,
2325 struct btrfs_path *path)
2326{
2327 struct btrfs_fs_info *fs_info = block_group->fs_info;
2328 struct btrfs_root *root = fs_info->tree_root;
2329 struct inode *inode = NULL;
2330 struct extent_changeset *data_reserved = NULL;
2331 u64 alloc_hint = 0;
2332 int dcs = BTRFS_DC_ERROR;
2333 u64 num_pages = 0;
2334 int retries = 0;
2335 int ret = 0;
2336
2337 /*
2338 * If this block group is smaller than 100 megs don't bother caching the
2339 * block group.
2340 */
b3470b5d 2341 if (block_group->length < (100 * SZ_1M)) {
77745c05
JB
2342 spin_lock(&block_group->lock);
2343 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2344 spin_unlock(&block_group->lock);
2345 return 0;
2346 }
2347
2348 if (trans->aborted)
2349 return 0;
2350again:
2351 inode = lookup_free_space_inode(block_group, path);
2352 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2353 ret = PTR_ERR(inode);
2354 btrfs_release_path(path);
2355 goto out;
2356 }
2357
2358 if (IS_ERR(inode)) {
2359 BUG_ON(retries);
2360 retries++;
2361
2362 if (block_group->ro)
2363 goto out_free;
2364
2365 ret = create_free_space_inode(trans, block_group, path);
2366 if (ret)
2367 goto out_free;
2368 goto again;
2369 }
2370
2371 /*
2372 * We want to set the generation to 0, that way if anything goes wrong
2373 * from here on out we know not to trust this cache when we load up next
2374 * time.
2375 */
2376 BTRFS_I(inode)->generation = 0;
2377 ret = btrfs_update_inode(trans, root, inode);
2378 if (ret) {
2379 /*
2380 * So theoretically we could recover from this, simply set the
2381 * super cache generation to 0 so we know to invalidate the
2382 * cache, but then we'd have to keep track of the block groups
2383 * that fail this way so we know we _have_ to reset this cache
2384 * before the next commit or risk reading stale cache. So to
2385 * limit our exposure to horrible edge cases lets just abort the
2386 * transaction, this only happens in really bad situations
2387 * anyway.
2388 */
2389 btrfs_abort_transaction(trans, ret);
2390 goto out_put;
2391 }
2392 WARN_ON(ret);
2393
2394 /* We've already setup this transaction, go ahead and exit */
2395 if (block_group->cache_generation == trans->transid &&
2396 i_size_read(inode)) {
2397 dcs = BTRFS_DC_SETUP;
2398 goto out_put;
2399 }
2400
2401 if (i_size_read(inode) > 0) {
2402 ret = btrfs_check_trunc_cache_free_space(fs_info,
2403 &fs_info->global_block_rsv);
2404 if (ret)
2405 goto out_put;
2406
2407 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2408 if (ret)
2409 goto out_put;
2410 }
2411
2412 spin_lock(&block_group->lock);
2413 if (block_group->cached != BTRFS_CACHE_FINISHED ||
2414 !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2415 /*
2416 * don't bother trying to write stuff out _if_
2417 * a) we're not cached,
2418 * b) we're with nospace_cache mount option,
2419 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2420 */
2421 dcs = BTRFS_DC_WRITTEN;
2422 spin_unlock(&block_group->lock);
2423 goto out_put;
2424 }
2425 spin_unlock(&block_group->lock);
2426
2427 /*
2428 * We hit an ENOSPC when setting up the cache in this transaction, just
2429 * skip doing the setup, we've already cleared the cache so we're safe.
2430 */
2431 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2432 ret = -ENOSPC;
2433 goto out_put;
2434 }
2435
2436 /*
2437 * Try to preallocate enough space based on how big the block group is.
2438 * Keep in mind this has to include any pinned space which could end up
2439 * taking up quite a bit since it's not folded into the other space
2440 * cache.
2441 */
b3470b5d 2442 num_pages = div_u64(block_group->length, SZ_256M);
77745c05
JB
2443 if (!num_pages)
2444 num_pages = 1;
2445
2446 num_pages *= 16;
2447 num_pages *= PAGE_SIZE;
2448
2449 ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2450 if (ret)
2451 goto out_put;
2452
2453 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2454 num_pages, num_pages,
2455 &alloc_hint);
2456 /*
2457 * Our cache requires contiguous chunks so that we don't modify a bunch
2458 * of metadata or split extents when writing the cache out, which means
2459 * we can enospc if we are heavily fragmented in addition to just normal
2460 * out of space conditions. So if we hit this just skip setting up any
2461 * other block groups for this transaction, maybe we'll unpin enough
2462 * space the next time around.
2463 */
2464 if (!ret)
2465 dcs = BTRFS_DC_SETUP;
2466 else if (ret == -ENOSPC)
2467 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2468
2469out_put:
2470 iput(inode);
2471out_free:
2472 btrfs_release_path(path);
2473out:
2474 spin_lock(&block_group->lock);
2475 if (!ret && dcs == BTRFS_DC_SETUP)
2476 block_group->cache_generation = trans->transid;
2477 block_group->disk_cache_state = dcs;
2478 spin_unlock(&block_group->lock);
2479
2480 extent_changeset_free(data_reserved);
2481 return ret;
2482}
2483
2484int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2485{
2486 struct btrfs_fs_info *fs_info = trans->fs_info;
32da5386 2487 struct btrfs_block_group *cache, *tmp;
77745c05
JB
2488 struct btrfs_transaction *cur_trans = trans->transaction;
2489 struct btrfs_path *path;
2490
2491 if (list_empty(&cur_trans->dirty_bgs) ||
2492 !btrfs_test_opt(fs_info, SPACE_CACHE))
2493 return 0;
2494
2495 path = btrfs_alloc_path();
2496 if (!path)
2497 return -ENOMEM;
2498
2499 /* Could add new block groups, use _safe just in case */
2500 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2501 dirty_list) {
2502 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2503 cache_save_setup(cache, trans, path);
2504 }
2505
2506 btrfs_free_path(path);
2507 return 0;
2508}
2509
2510/*
2511 * Transaction commit does final block group cache writeback during a critical
2512 * section where nothing is allowed to change the FS. This is required in
2513 * order for the cache to actually match the block group, but can introduce a
2514 * lot of latency into the commit.
2515 *
2516 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2517 * There's a chance we'll have to redo some of it if the block group changes
2518 * again during the commit, but it greatly reduces the commit latency by
2519 * getting rid of the easy block groups while we're still allowing others to
2520 * join the commit.
2521 */
2522int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2523{
2524 struct btrfs_fs_info *fs_info = trans->fs_info;
32da5386 2525 struct btrfs_block_group *cache;
77745c05
JB
2526 struct btrfs_transaction *cur_trans = trans->transaction;
2527 int ret = 0;
2528 int should_put;
2529 struct btrfs_path *path = NULL;
2530 LIST_HEAD(dirty);
2531 struct list_head *io = &cur_trans->io_bgs;
2532 int num_started = 0;
2533 int loops = 0;
2534
2535 spin_lock(&cur_trans->dirty_bgs_lock);
2536 if (list_empty(&cur_trans->dirty_bgs)) {
2537 spin_unlock(&cur_trans->dirty_bgs_lock);
2538 return 0;
2539 }
2540 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2541 spin_unlock(&cur_trans->dirty_bgs_lock);
2542
2543again:
2544 /* Make sure all the block groups on our dirty list actually exist */
2545 btrfs_create_pending_block_groups(trans);
2546
2547 if (!path) {
2548 path = btrfs_alloc_path();
2549 if (!path)
2550 return -ENOMEM;
2551 }
2552
2553 /*
2554 * cache_write_mutex is here only to save us from balance or automatic
2555 * removal of empty block groups deleting this block group while we are
2556 * writing out the cache
2557 */
2558 mutex_lock(&trans->transaction->cache_write_mutex);
2559 while (!list_empty(&dirty)) {
2560 bool drop_reserve = true;
2561
32da5386 2562 cache = list_first_entry(&dirty, struct btrfs_block_group,
77745c05
JB
2563 dirty_list);
2564 /*
2565 * This can happen if something re-dirties a block group that
2566 * is already under IO. Just wait for it to finish and then do
2567 * it all again
2568 */
2569 if (!list_empty(&cache->io_list)) {
2570 list_del_init(&cache->io_list);
2571 btrfs_wait_cache_io(trans, cache, path);
2572 btrfs_put_block_group(cache);
2573 }
2574
2575
2576 /*
2577 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2578 * it should update the cache_state. Don't delete until after
2579 * we wait.
2580 *
2581 * Since we're not running in the commit critical section
2582 * we need the dirty_bgs_lock to protect from update_block_group
2583 */
2584 spin_lock(&cur_trans->dirty_bgs_lock);
2585 list_del_init(&cache->dirty_list);
2586 spin_unlock(&cur_trans->dirty_bgs_lock);
2587
2588 should_put = 1;
2589
2590 cache_save_setup(cache, trans, path);
2591
2592 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2593 cache->io_ctl.inode = NULL;
2594 ret = btrfs_write_out_cache(trans, cache, path);
2595 if (ret == 0 && cache->io_ctl.inode) {
2596 num_started++;
2597 should_put = 0;
2598
2599 /*
2600 * The cache_write_mutex is protecting the
2601 * io_list, also refer to the definition of
2602 * btrfs_transaction::io_bgs for more details
2603 */
2604 list_add_tail(&cache->io_list, io);
2605 } else {
2606 /*
2607 * If we failed to write the cache, the
2608 * generation will be bad and life goes on
2609 */
2610 ret = 0;
2611 }
2612 }
2613 if (!ret) {
2614 ret = write_one_cache_group(trans, path, cache);
2615 /*
2616 * Our block group might still be attached to the list
2617 * of new block groups in the transaction handle of some
2618 * other task (struct btrfs_trans_handle->new_bgs). This
2619 * means its block group item isn't yet in the extent
2620 * tree. If this happens ignore the error, as we will
2621 * try again later in the critical section of the
2622 * transaction commit.
2623 */
2624 if (ret == -ENOENT) {
2625 ret = 0;
2626 spin_lock(&cur_trans->dirty_bgs_lock);
2627 if (list_empty(&cache->dirty_list)) {
2628 list_add_tail(&cache->dirty_list,
2629 &cur_trans->dirty_bgs);
2630 btrfs_get_block_group(cache);
2631 drop_reserve = false;
2632 }
2633 spin_unlock(&cur_trans->dirty_bgs_lock);
2634 } else if (ret) {
2635 btrfs_abort_transaction(trans, ret);
2636 }
2637 }
2638
2639 /* If it's not on the io list, we need to put the block group */
2640 if (should_put)
2641 btrfs_put_block_group(cache);
2642 if (drop_reserve)
2643 btrfs_delayed_refs_rsv_release(fs_info, 1);
2644
2645 if (ret)
2646 break;
2647
2648 /*
2649 * Avoid blocking other tasks for too long. It might even save
2650 * us from writing caches for block groups that are going to be
2651 * removed.
2652 */
2653 mutex_unlock(&trans->transaction->cache_write_mutex);
2654 mutex_lock(&trans->transaction->cache_write_mutex);
2655 }
2656 mutex_unlock(&trans->transaction->cache_write_mutex);
2657
2658 /*
2659 * Go through delayed refs for all the stuff we've just kicked off
2660 * and then loop back (just once)
2661 */
2662 ret = btrfs_run_delayed_refs(trans, 0);
2663 if (!ret && loops == 0) {
2664 loops++;
2665 spin_lock(&cur_trans->dirty_bgs_lock);
2666 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2667 /*
2668 * dirty_bgs_lock protects us from concurrent block group
2669 * deletes too (not just cache_write_mutex).
2670 */
2671 if (!list_empty(&dirty)) {
2672 spin_unlock(&cur_trans->dirty_bgs_lock);
2673 goto again;
2674 }
2675 spin_unlock(&cur_trans->dirty_bgs_lock);
2676 } else if (ret < 0) {
2677 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2678 }
2679
2680 btrfs_free_path(path);
2681 return ret;
2682}
2683
2684int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2685{
2686 struct btrfs_fs_info *fs_info = trans->fs_info;
32da5386 2687 struct btrfs_block_group *cache;
77745c05
JB
2688 struct btrfs_transaction *cur_trans = trans->transaction;
2689 int ret = 0;
2690 int should_put;
2691 struct btrfs_path *path;
2692 struct list_head *io = &cur_trans->io_bgs;
2693 int num_started = 0;
2694
2695 path = btrfs_alloc_path();
2696 if (!path)
2697 return -ENOMEM;
2698
2699 /*
2700 * Even though we are in the critical section of the transaction commit,
2701 * we can still have concurrent tasks adding elements to this
2702 * transaction's list of dirty block groups. These tasks correspond to
2703 * endio free space workers started when writeback finishes for a
2704 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2705 * allocate new block groups as a result of COWing nodes of the root
2706 * tree when updating the free space inode. The writeback for the space
2707 * caches is triggered by an earlier call to
2708 * btrfs_start_dirty_block_groups() and iterations of the following
2709 * loop.
2710 * Also we want to do the cache_save_setup first and then run the
2711 * delayed refs to make sure we have the best chance at doing this all
2712 * in one shot.
2713 */
2714 spin_lock(&cur_trans->dirty_bgs_lock);
2715 while (!list_empty(&cur_trans->dirty_bgs)) {
2716 cache = list_first_entry(&cur_trans->dirty_bgs,
32da5386 2717 struct btrfs_block_group,
77745c05
JB
2718 dirty_list);
2719
2720 /*
2721 * This can happen if cache_save_setup re-dirties a block group
2722 * that is already under IO. Just wait for it to finish and
2723 * then do it all again
2724 */
2725 if (!list_empty(&cache->io_list)) {
2726 spin_unlock(&cur_trans->dirty_bgs_lock);
2727 list_del_init(&cache->io_list);
2728 btrfs_wait_cache_io(trans, cache, path);
2729 btrfs_put_block_group(cache);
2730 spin_lock(&cur_trans->dirty_bgs_lock);
2731 }
2732
2733 /*
2734 * Don't remove from the dirty list until after we've waited on
2735 * any pending IO
2736 */
2737 list_del_init(&cache->dirty_list);
2738 spin_unlock(&cur_trans->dirty_bgs_lock);
2739 should_put = 1;
2740
2741 cache_save_setup(cache, trans, path);
2742
2743 if (!ret)
2744 ret = btrfs_run_delayed_refs(trans,
2745 (unsigned long) -1);
2746
2747 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2748 cache->io_ctl.inode = NULL;
2749 ret = btrfs_write_out_cache(trans, cache, path);
2750 if (ret == 0 && cache->io_ctl.inode) {
2751 num_started++;
2752 should_put = 0;
2753 list_add_tail(&cache->io_list, io);
2754 } else {
2755 /*
2756 * If we failed to write the cache, the
2757 * generation will be bad and life goes on
2758 */
2759 ret = 0;
2760 }
2761 }
2762 if (!ret) {
2763 ret = write_one_cache_group(trans, path, cache);
2764 /*
2765 * One of the free space endio workers might have
2766 * created a new block group while updating a free space
2767 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2768 * and hasn't released its transaction handle yet, in
2769 * which case the new block group is still attached to
2770 * its transaction handle and its creation has not
2771 * finished yet (no block group item in the extent tree
2772 * yet, etc). If this is the case, wait for all free
2773 * space endio workers to finish and retry. This is a
2774 * a very rare case so no need for a more efficient and
2775 * complex approach.
2776 */
2777 if (ret == -ENOENT) {
2778 wait_event(cur_trans->writer_wait,
2779 atomic_read(&cur_trans->num_writers) == 1);
2780 ret = write_one_cache_group(trans, path, cache);
2781 }
2782 if (ret)
2783 btrfs_abort_transaction(trans, ret);
2784 }
2785
2786 /* If its not on the io list, we need to put the block group */
2787 if (should_put)
2788 btrfs_put_block_group(cache);
2789 btrfs_delayed_refs_rsv_release(fs_info, 1);
2790 spin_lock(&cur_trans->dirty_bgs_lock);
2791 }
2792 spin_unlock(&cur_trans->dirty_bgs_lock);
2793
2794 /*
2795 * Refer to the definition of io_bgs member for details why it's safe
2796 * to use it without any locking
2797 */
2798 while (!list_empty(io)) {
32da5386 2799 cache = list_first_entry(io, struct btrfs_block_group,
77745c05
JB
2800 io_list);
2801 list_del_init(&cache->io_list);
2802 btrfs_wait_cache_io(trans, cache, path);
2803 btrfs_put_block_group(cache);
2804 }
2805
2806 btrfs_free_path(path);
2807 return ret;
2808}
606d1bf1
JB
2809
2810int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2811 u64 bytenr, u64 num_bytes, int alloc)
2812{
2813 struct btrfs_fs_info *info = trans->fs_info;
32da5386 2814 struct btrfs_block_group *cache = NULL;
606d1bf1
JB
2815 u64 total = num_bytes;
2816 u64 old_val;
2817 u64 byte_in_group;
2818 int factor;
2819 int ret = 0;
2820
2821 /* Block accounting for super block */
2822 spin_lock(&info->delalloc_root_lock);
2823 old_val = btrfs_super_bytes_used(info->super_copy);
2824 if (alloc)
2825 old_val += num_bytes;
2826 else
2827 old_val -= num_bytes;
2828 btrfs_set_super_bytes_used(info->super_copy, old_val);
2829 spin_unlock(&info->delalloc_root_lock);
2830
2831 while (total) {
2832 cache = btrfs_lookup_block_group(info, bytenr);
2833 if (!cache) {
2834 ret = -ENOENT;
2835 break;
2836 }
2837 factor = btrfs_bg_type_to_factor(cache->flags);
2838
2839 /*
2840 * If this block group has free space cache written out, we
2841 * need to make sure to load it if we are removing space. This
2842 * is because we need the unpinning stage to actually add the
2843 * space back to the block group, otherwise we will leak space.
2844 */
32da5386 2845 if (!alloc && !btrfs_block_group_done(cache))
606d1bf1
JB
2846 btrfs_cache_block_group(cache, 1);
2847
b3470b5d
DS
2848 byte_in_group = bytenr - cache->start;
2849 WARN_ON(byte_in_group > cache->length);
606d1bf1
JB
2850
2851 spin_lock(&cache->space_info->lock);
2852 spin_lock(&cache->lock);
2853
2854 if (btrfs_test_opt(info, SPACE_CACHE) &&
2855 cache->disk_cache_state < BTRFS_DC_CLEAR)
2856 cache->disk_cache_state = BTRFS_DC_CLEAR;
2857
bf38be65 2858 old_val = cache->used;
b3470b5d 2859 num_bytes = min(total, cache->length - byte_in_group);
606d1bf1
JB
2860 if (alloc) {
2861 old_val += num_bytes;
bf38be65 2862 cache->used = old_val;
606d1bf1
JB
2863 cache->reserved -= num_bytes;
2864 cache->space_info->bytes_reserved -= num_bytes;
2865 cache->space_info->bytes_used += num_bytes;
2866 cache->space_info->disk_used += num_bytes * factor;
2867 spin_unlock(&cache->lock);
2868 spin_unlock(&cache->space_info->lock);
2869 } else {
2870 old_val -= num_bytes;
bf38be65 2871 cache->used = old_val;
606d1bf1
JB
2872 cache->pinned += num_bytes;
2873 btrfs_space_info_update_bytes_pinned(info,
2874 cache->space_info, num_bytes);
2875 cache->space_info->bytes_used -= num_bytes;
2876 cache->space_info->disk_used -= num_bytes * factor;
2877 spin_unlock(&cache->lock);
2878 spin_unlock(&cache->space_info->lock);
2879
606d1bf1
JB
2880 percpu_counter_add_batch(
2881 &cache->space_info->total_bytes_pinned,
2882 num_bytes,
2883 BTRFS_TOTAL_BYTES_PINNED_BATCH);
2884 set_extent_dirty(info->pinned_extents,
2885 bytenr, bytenr + num_bytes - 1,
2886 GFP_NOFS | __GFP_NOFAIL);
2887 }
2888
2889 spin_lock(&trans->transaction->dirty_bgs_lock);
2890 if (list_empty(&cache->dirty_list)) {
2891 list_add_tail(&cache->dirty_list,
2892 &trans->transaction->dirty_bgs);
2893 trans->delayed_ref_updates++;
2894 btrfs_get_block_group(cache);
2895 }
2896 spin_unlock(&trans->transaction->dirty_bgs_lock);
2897
2898 /*
2899 * No longer have used bytes in this block group, queue it for
2900 * deletion. We do this after adding the block group to the
2901 * dirty list to avoid races between cleaner kthread and space
2902 * cache writeout.
2903 */
6e80d4f8
DZ
2904 if (!alloc && old_val == 0) {
2905 if (!btrfs_test_opt(info, DISCARD_ASYNC))
2906 btrfs_mark_bg_unused(cache);
2907 }
606d1bf1
JB
2908
2909 btrfs_put_block_group(cache);
2910 total -= num_bytes;
2911 bytenr += num_bytes;
2912 }
2913
2914 /* Modified block groups are accounted for in the delayed_refs_rsv. */
2915 btrfs_update_delayed_refs_rsv(trans);
2916 return ret;
2917}
2918
2919/**
2920 * btrfs_add_reserved_bytes - update the block_group and space info counters
2921 * @cache: The cache we are manipulating
2922 * @ram_bytes: The number of bytes of file content, and will be same to
2923 * @num_bytes except for the compress path.
2924 * @num_bytes: The number of bytes in question
2925 * @delalloc: The blocks are allocated for the delalloc write
2926 *
2927 * This is called by the allocator when it reserves space. If this is a
2928 * reservation and the block group has become read only we cannot make the
2929 * reservation and return -EAGAIN, otherwise this function always succeeds.
2930 */
32da5386 2931int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
606d1bf1
JB
2932 u64 ram_bytes, u64 num_bytes, int delalloc)
2933{
2934 struct btrfs_space_info *space_info = cache->space_info;
2935 int ret = 0;
2936
2937 spin_lock(&space_info->lock);
2938 spin_lock(&cache->lock);
2939 if (cache->ro) {
2940 ret = -EAGAIN;
2941 } else {
2942 cache->reserved += num_bytes;
2943 space_info->bytes_reserved += num_bytes;
a43c3835
JB
2944 trace_btrfs_space_reservation(cache->fs_info, "space_info",
2945 space_info->flags, num_bytes, 1);
606d1bf1
JB
2946 btrfs_space_info_update_bytes_may_use(cache->fs_info,
2947 space_info, -ram_bytes);
2948 if (delalloc)
2949 cache->delalloc_bytes += num_bytes;
2950 }
2951 spin_unlock(&cache->lock);
2952 spin_unlock(&space_info->lock);
2953 return ret;
2954}
2955
2956/**
2957 * btrfs_free_reserved_bytes - update the block_group and space info counters
2958 * @cache: The cache we are manipulating
2959 * @num_bytes: The number of bytes in question
2960 * @delalloc: The blocks are allocated for the delalloc write
2961 *
2962 * This is called by somebody who is freeing space that was never actually used
2963 * on disk. For example if you reserve some space for a new leaf in transaction
2964 * A and before transaction A commits you free that leaf, you call this with
2965 * reserve set to 0 in order to clear the reservation.
2966 */
32da5386 2967void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
606d1bf1
JB
2968 u64 num_bytes, int delalloc)
2969{
2970 struct btrfs_space_info *space_info = cache->space_info;
2971
2972 spin_lock(&space_info->lock);
2973 spin_lock(&cache->lock);
2974 if (cache->ro)
2975 space_info->bytes_readonly += num_bytes;
2976 cache->reserved -= num_bytes;
2977 space_info->bytes_reserved -= num_bytes;
2978 space_info->max_extent_size = 0;
2979
2980 if (delalloc)
2981 cache->delalloc_bytes -= num_bytes;
2982 spin_unlock(&cache->lock);
2983 spin_unlock(&space_info->lock);
2984}
07730d87
JB
2985
2986static void force_metadata_allocation(struct btrfs_fs_info *info)
2987{
2988 struct list_head *head = &info->space_info;
2989 struct btrfs_space_info *found;
2990
2991 rcu_read_lock();
2992 list_for_each_entry_rcu(found, head, list) {
2993 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2994 found->force_alloc = CHUNK_ALLOC_FORCE;
2995 }
2996 rcu_read_unlock();
2997}
2998
2999static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
3000 struct btrfs_space_info *sinfo, int force)
3001{
3002 u64 bytes_used = btrfs_space_info_used(sinfo, false);
3003 u64 thresh;
3004
3005 if (force == CHUNK_ALLOC_FORCE)
3006 return 1;
3007
3008 /*
3009 * in limited mode, we want to have some free space up to
3010 * about 1% of the FS size.
3011 */
3012 if (force == CHUNK_ALLOC_LIMITED) {
3013 thresh = btrfs_super_total_bytes(fs_info->super_copy);
3014 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
3015
3016 if (sinfo->total_bytes - bytes_used < thresh)
3017 return 1;
3018 }
3019
3020 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
3021 return 0;
3022 return 1;
3023}
3024
3025int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3026{
3027 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3028
3029 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3030}
3031
3032/*
3033 * If force is CHUNK_ALLOC_FORCE:
3034 * - return 1 if it successfully allocates a chunk,
3035 * - return errors including -ENOSPC otherwise.
3036 * If force is NOT CHUNK_ALLOC_FORCE:
3037 * - return 0 if it doesn't need to allocate a new chunk,
3038 * - return 1 if it successfully allocates a chunk,
3039 * - return errors including -ENOSPC otherwise.
3040 */
3041int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
3042 enum btrfs_chunk_alloc_enum force)
3043{
3044 struct btrfs_fs_info *fs_info = trans->fs_info;
3045 struct btrfs_space_info *space_info;
3046 bool wait_for_alloc = false;
3047 bool should_alloc = false;
3048 int ret = 0;
3049
3050 /* Don't re-enter if we're already allocating a chunk */
3051 if (trans->allocating_chunk)
3052 return -ENOSPC;
3053
3054 space_info = btrfs_find_space_info(fs_info, flags);
3055 ASSERT(space_info);
3056
3057 do {
3058 spin_lock(&space_info->lock);
3059 if (force < space_info->force_alloc)
3060 force = space_info->force_alloc;
3061 should_alloc = should_alloc_chunk(fs_info, space_info, force);
3062 if (space_info->full) {
3063 /* No more free physical space */
3064 if (should_alloc)
3065 ret = -ENOSPC;
3066 else
3067 ret = 0;
3068 spin_unlock(&space_info->lock);
3069 return ret;
3070 } else if (!should_alloc) {
3071 spin_unlock(&space_info->lock);
3072 return 0;
3073 } else if (space_info->chunk_alloc) {
3074 /*
3075 * Someone is already allocating, so we need to block
3076 * until this someone is finished and then loop to
3077 * recheck if we should continue with our allocation
3078 * attempt.
3079 */
3080 wait_for_alloc = true;
3081 spin_unlock(&space_info->lock);
3082 mutex_lock(&fs_info->chunk_mutex);
3083 mutex_unlock(&fs_info->chunk_mutex);
3084 } else {
3085 /* Proceed with allocation */
3086 space_info->chunk_alloc = 1;
3087 wait_for_alloc = false;
3088 spin_unlock(&space_info->lock);
3089 }
3090
3091 cond_resched();
3092 } while (wait_for_alloc);
3093
3094 mutex_lock(&fs_info->chunk_mutex);
3095 trans->allocating_chunk = true;
3096
3097 /*
3098 * If we have mixed data/metadata chunks we want to make sure we keep
3099 * allocating mixed chunks instead of individual chunks.
3100 */
3101 if (btrfs_mixed_space_info(space_info))
3102 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3103
3104 /*
3105 * if we're doing a data chunk, go ahead and make sure that
3106 * we keep a reasonable number of metadata chunks allocated in the
3107 * FS as well.
3108 */
3109 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3110 fs_info->data_chunk_allocations++;
3111 if (!(fs_info->data_chunk_allocations %
3112 fs_info->metadata_ratio))
3113 force_metadata_allocation(fs_info);
3114 }
3115
3116 /*
3117 * Check if we have enough space in SYSTEM chunk because we may need
3118 * to update devices.
3119 */
3120 check_system_chunk(trans, flags);
3121
3122 ret = btrfs_alloc_chunk(trans, flags);
3123 trans->allocating_chunk = false;
3124
3125 spin_lock(&space_info->lock);
3126 if (ret < 0) {
3127 if (ret == -ENOSPC)
3128 space_info->full = 1;
3129 else
3130 goto out;
3131 } else {
3132 ret = 1;
3133 space_info->max_extent_size = 0;
3134 }
3135
3136 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3137out:
3138 space_info->chunk_alloc = 0;
3139 spin_unlock(&space_info->lock);
3140 mutex_unlock(&fs_info->chunk_mutex);
3141 /*
3142 * When we allocate a new chunk we reserve space in the chunk block
3143 * reserve to make sure we can COW nodes/leafs in the chunk tree or
3144 * add new nodes/leafs to it if we end up needing to do it when
3145 * inserting the chunk item and updating device items as part of the
3146 * second phase of chunk allocation, performed by
3147 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
3148 * large number of new block groups to create in our transaction
3149 * handle's new_bgs list to avoid exhausting the chunk block reserve
3150 * in extreme cases - like having a single transaction create many new
3151 * block groups when starting to write out the free space caches of all
3152 * the block groups that were made dirty during the lifetime of the
3153 * transaction.
3154 */
3155 if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
3156 btrfs_create_pending_block_groups(trans);
3157
3158 return ret;
3159}
3160
3161static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
3162{
3163 u64 num_dev;
3164
3165 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
3166 if (!num_dev)
3167 num_dev = fs_info->fs_devices->rw_devices;
3168
3169 return num_dev;
3170}
3171
3172/*
a9143bd3 3173 * Reserve space in the system space for allocating or removing a chunk
07730d87
JB
3174 */
3175void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3176{
3177 struct btrfs_fs_info *fs_info = trans->fs_info;
3178 struct btrfs_space_info *info;
3179 u64 left;
3180 u64 thresh;
3181 int ret = 0;
3182 u64 num_devs;
3183
3184 /*
3185 * Needed because we can end up allocating a system chunk and for an
3186 * atomic and race free space reservation in the chunk block reserve.
3187 */
3188 lockdep_assert_held(&fs_info->chunk_mutex);
3189
3190 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3191 spin_lock(&info->lock);
3192 left = info->total_bytes - btrfs_space_info_used(info, true);
3193 spin_unlock(&info->lock);
3194
3195 num_devs = get_profile_num_devs(fs_info, type);
3196
3197 /* num_devs device items to update and 1 chunk item to add or remove */
2bd36e7b
JB
3198 thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3199 btrfs_calc_insert_metadata_size(fs_info, 1);
07730d87
JB
3200
3201 if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3202 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3203 left, thresh, type);
3204 btrfs_dump_space_info(fs_info, info, 0, 0);
3205 }
3206
3207 if (left < thresh) {
3208 u64 flags = btrfs_system_alloc_profile(fs_info);
3209
3210 /*
3211 * Ignore failure to create system chunk. We might end up not
3212 * needing it, as we might not need to COW all nodes/leafs from
3213 * the paths we visit in the chunk tree (they were already COWed
3214 * or created in the current transaction for example).
3215 */
3216 ret = btrfs_alloc_chunk(trans, flags);
3217 }
3218
3219 if (!ret) {
3220 ret = btrfs_block_rsv_add(fs_info->chunk_root,
3221 &fs_info->chunk_block_rsv,
3222 thresh, BTRFS_RESERVE_NO_FLUSH);
3223 if (!ret)
3224 trans->chunk_bytes_reserved += thresh;
3225 }
3226}
3227
3e43c279
JB
3228void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3229{
32da5386 3230 struct btrfs_block_group *block_group;
3e43c279
JB
3231 u64 last = 0;
3232
3233 while (1) {
3234 struct inode *inode;
3235
3236 block_group = btrfs_lookup_first_block_group(info, last);
3237 while (block_group) {
3238 btrfs_wait_block_group_cache_done(block_group);
3239 spin_lock(&block_group->lock);
3240 if (block_group->iref)
3241 break;
3242 spin_unlock(&block_group->lock);
3243 block_group = btrfs_next_block_group(block_group);
3244 }
3245 if (!block_group) {
3246 if (last == 0)
3247 break;
3248 last = 0;
3249 continue;
3250 }
3251
3252 inode = block_group->inode;
3253 block_group->iref = 0;
3254 block_group->inode = NULL;
3255 spin_unlock(&block_group->lock);
3256 ASSERT(block_group->io_ctl.inode == NULL);
3257 iput(inode);
b3470b5d 3258 last = block_group->start + block_group->length;
3e43c279
JB
3259 btrfs_put_block_group(block_group);
3260 }
3261}
3262
3263/*
3264 * Must be called only after stopping all workers, since we could have block
3265 * group caching kthreads running, and therefore they could race with us if we
3266 * freed the block groups before stopping them.
3267 */
3268int btrfs_free_block_groups(struct btrfs_fs_info *info)
3269{
32da5386 3270 struct btrfs_block_group *block_group;
3e43c279
JB
3271 struct btrfs_space_info *space_info;
3272 struct btrfs_caching_control *caching_ctl;
3273 struct rb_node *n;
3274
3275 down_write(&info->commit_root_sem);
3276 while (!list_empty(&info->caching_block_groups)) {
3277 caching_ctl = list_entry(info->caching_block_groups.next,
3278 struct btrfs_caching_control, list);
3279 list_del(&caching_ctl->list);
3280 btrfs_put_caching_control(caching_ctl);
3281 }
3282 up_write(&info->commit_root_sem);
3283
3284 spin_lock(&info->unused_bgs_lock);
3285 while (!list_empty(&info->unused_bgs)) {
3286 block_group = list_first_entry(&info->unused_bgs,
32da5386 3287 struct btrfs_block_group,
3e43c279
JB
3288 bg_list);
3289 list_del_init(&block_group->bg_list);
3290 btrfs_put_block_group(block_group);
3291 }
3292 spin_unlock(&info->unused_bgs_lock);
3293
3294 spin_lock(&info->block_group_cache_lock);
3295 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
32da5386 3296 block_group = rb_entry(n, struct btrfs_block_group,
3e43c279
JB
3297 cache_node);
3298 rb_erase(&block_group->cache_node,
3299 &info->block_group_cache_tree);
3300 RB_CLEAR_NODE(&block_group->cache_node);
3301 spin_unlock(&info->block_group_cache_lock);
3302
3303 down_write(&block_group->space_info->groups_sem);
3304 list_del(&block_group->list);
3305 up_write(&block_group->space_info->groups_sem);
3306
3307 /*
3308 * We haven't cached this block group, which means we could
3309 * possibly have excluded extents on this block group.
3310 */
3311 if (block_group->cached == BTRFS_CACHE_NO ||
3312 block_group->cached == BTRFS_CACHE_ERROR)
3313 btrfs_free_excluded_extents(block_group);
3314
3315 btrfs_remove_free_space_cache(block_group);
3316 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3317 ASSERT(list_empty(&block_group->dirty_list));
3318 ASSERT(list_empty(&block_group->io_list));
3319 ASSERT(list_empty(&block_group->bg_list));
3320 ASSERT(atomic_read(&block_group->count) == 1);
3321 btrfs_put_block_group(block_group);
3322
3323 spin_lock(&info->block_group_cache_lock);
3324 }
3325 spin_unlock(&info->block_group_cache_lock);
3326
3327 /*
3328 * Now that all the block groups are freed, go through and free all the
3329 * space_info structs. This is only called during the final stages of
3330 * unmount, and so we know nobody is using them. We call
3331 * synchronize_rcu() once before we start, just to be on the safe side.
3332 */
3333 synchronize_rcu();
3334
3335 btrfs_release_global_block_rsv(info);
3336
3337 while (!list_empty(&info->space_info)) {
3338 space_info = list_entry(info->space_info.next,
3339 struct btrfs_space_info,
3340 list);
3341
3342 /*
3343 * Do not hide this behind enospc_debug, this is actually
3344 * important and indicates a real bug if this happens.
3345 */
3346 if (WARN_ON(space_info->bytes_pinned > 0 ||
3347 space_info->bytes_reserved > 0 ||
3348 space_info->bytes_may_use > 0))
3349 btrfs_dump_space_info(info, space_info, 0, 0);
3350 list_del(&space_info->list);
3351 btrfs_sysfs_remove_space_info(space_info);
3352 }
3353 return 0;
3354}