]> git.ipfire.org Git - people/arne_f/kernel.git/blame - fs/btrfs/extent-tree.c
Btrfs: protect orphan block rsv with spin_lock
[people/arne_f/kernel.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
5a0e3ad6 25#include <linux/slab.h>
dff51cd1 26#include <linux/ratelimit.h>
4b4e25f2 27#include "compat.h"
74493f7a 28#include "hash.h"
fec577fb
CM
29#include "ctree.h"
30#include "disk-io.h"
31#include "print-tree.h"
e089f05c 32#include "transaction.h"
0b86a832 33#include "volumes.h"
925baedd 34#include "locking.h"
fa9c0d79 35#include "free-space-cache.h"
fec577fb 36
0e4f8f88
CM
37/* control flags for do_chunk_alloc's force field
38 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
39 * if we really need one.
40 *
41 * CHUNK_ALLOC_FORCE means it must try to allocate one
42 *
43 * CHUNK_ALLOC_LIMITED means to only try and allocate one
44 * if we have very few chunks already allocated. This is
45 * used as part of the clustering code to help make sure
46 * we have a good pool of storage to cluster in, without
47 * filling the FS with empty chunks
48 *
49 */
50enum {
51 CHUNK_ALLOC_NO_FORCE = 0,
52 CHUNK_ALLOC_FORCE = 1,
53 CHUNK_ALLOC_LIMITED = 2,
54};
55
fb25e914
JB
56/*
57 * Control how reservations are dealt with.
58 *
59 * RESERVE_FREE - freeing a reservation.
60 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
61 * ENOSPC accounting
62 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
63 * bytes_may_use as the ENOSPC accounting is done elsewhere
64 */
65enum {
66 RESERVE_FREE = 0,
67 RESERVE_ALLOC = 1,
68 RESERVE_ALLOC_NO_ACCOUNT = 2,
69};
70
f3465ca4
JB
71static int update_block_group(struct btrfs_trans_handle *trans,
72 struct btrfs_root *root,
f0486c68 73 u64 bytenr, u64 num_bytes, int alloc);
5d4f98a2
YZ
74static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
75 struct btrfs_root *root,
76 u64 bytenr, u64 num_bytes, u64 parent,
77 u64 root_objectid, u64 owner_objectid,
78 u64 owner_offset, int refs_to_drop,
79 struct btrfs_delayed_extent_op *extra_op);
80static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
81 struct extent_buffer *leaf,
82 struct btrfs_extent_item *ei);
83static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
84 struct btrfs_root *root,
85 u64 parent, u64 root_objectid,
86 u64 flags, u64 owner, u64 offset,
87 struct btrfs_key *ins, int ref_mod);
88static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, struct btrfs_disk_key *key,
92 int level, struct btrfs_key *ins);
6a63209f
JB
93static int do_chunk_alloc(struct btrfs_trans_handle *trans,
94 struct btrfs_root *extent_root, u64 alloc_bytes,
95 u64 flags, int force);
11833d66
YZ
96static int find_next_key(struct btrfs_path *path, int level,
97 struct btrfs_key *key);
9ed74f2d
JB
98static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
99 int dump_block_groups);
fb25e914
JB
100static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
101 u64 num_bytes, int reserve);
6a63209f 102
817d52f8
JB
103static noinline int
104block_group_cache_done(struct btrfs_block_group_cache *cache)
105{
106 smp_mb();
107 return cache->cached == BTRFS_CACHE_FINISHED;
108}
109
0f9dd46c
JB
110static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
111{
112 return (cache->flags & bits) == bits;
113}
114
62a45b60 115static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
11dfe35a
JB
116{
117 atomic_inc(&cache->count);
118}
119
120void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
121{
f0486c68
YZ
122 if (atomic_dec_and_test(&cache->count)) {
123 WARN_ON(cache->pinned > 0);
124 WARN_ON(cache->reserved > 0);
34d52cb6 125 kfree(cache->free_space_ctl);
11dfe35a 126 kfree(cache);
f0486c68 127 }
11dfe35a
JB
128}
129
0f9dd46c
JB
130/*
131 * this adds the block group to the fs_info rb tree for the block group
132 * cache
133 */
b2950863 134static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
135 struct btrfs_block_group_cache *block_group)
136{
137 struct rb_node **p;
138 struct rb_node *parent = NULL;
139 struct btrfs_block_group_cache *cache;
140
141 spin_lock(&info->block_group_cache_lock);
142 p = &info->block_group_cache_tree.rb_node;
143
144 while (*p) {
145 parent = *p;
146 cache = rb_entry(parent, struct btrfs_block_group_cache,
147 cache_node);
148 if (block_group->key.objectid < cache->key.objectid) {
149 p = &(*p)->rb_left;
150 } else if (block_group->key.objectid > cache->key.objectid) {
151 p = &(*p)->rb_right;
152 } else {
153 spin_unlock(&info->block_group_cache_lock);
154 return -EEXIST;
155 }
156 }
157
158 rb_link_node(&block_group->cache_node, parent, p);
159 rb_insert_color(&block_group->cache_node,
160 &info->block_group_cache_tree);
161 spin_unlock(&info->block_group_cache_lock);
162
163 return 0;
164}
165
166/*
167 * This will return the block group at or after bytenr if contains is 0, else
168 * it will return the block group that contains the bytenr
169 */
170static struct btrfs_block_group_cache *
171block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
172 int contains)
173{
174 struct btrfs_block_group_cache *cache, *ret = NULL;
175 struct rb_node *n;
176 u64 end, start;
177
178 spin_lock(&info->block_group_cache_lock);
179 n = info->block_group_cache_tree.rb_node;
180
181 while (n) {
182 cache = rb_entry(n, struct btrfs_block_group_cache,
183 cache_node);
184 end = cache->key.objectid + cache->key.offset - 1;
185 start = cache->key.objectid;
186
187 if (bytenr < start) {
188 if (!contains && (!ret || start < ret->key.objectid))
189 ret = cache;
190 n = n->rb_left;
191 } else if (bytenr > start) {
192 if (contains && bytenr <= end) {
193 ret = cache;
194 break;
195 }
196 n = n->rb_right;
197 } else {
198 ret = cache;
199 break;
200 }
201 }
d2fb3437 202 if (ret)
11dfe35a 203 btrfs_get_block_group(ret);
0f9dd46c
JB
204 spin_unlock(&info->block_group_cache_lock);
205
206 return ret;
207}
208
11833d66
YZ
209static int add_excluded_extent(struct btrfs_root *root,
210 u64 start, u64 num_bytes)
817d52f8 211{
11833d66
YZ
212 u64 end = start + num_bytes - 1;
213 set_extent_bits(&root->fs_info->freed_extents[0],
214 start, end, EXTENT_UPTODATE, GFP_NOFS);
215 set_extent_bits(&root->fs_info->freed_extents[1],
216 start, end, EXTENT_UPTODATE, GFP_NOFS);
217 return 0;
218}
817d52f8 219
11833d66
YZ
220static void free_excluded_extents(struct btrfs_root *root,
221 struct btrfs_block_group_cache *cache)
222{
223 u64 start, end;
817d52f8 224
11833d66
YZ
225 start = cache->key.objectid;
226 end = start + cache->key.offset - 1;
227
228 clear_extent_bits(&root->fs_info->freed_extents[0],
229 start, end, EXTENT_UPTODATE, GFP_NOFS);
230 clear_extent_bits(&root->fs_info->freed_extents[1],
231 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
232}
233
11833d66
YZ
234static int exclude_super_stripes(struct btrfs_root *root,
235 struct btrfs_block_group_cache *cache)
817d52f8 236{
817d52f8
JB
237 u64 bytenr;
238 u64 *logical;
239 int stripe_len;
240 int i, nr, ret;
241
06b2331f
YZ
242 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
243 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
244 cache->bytes_super += stripe_len;
245 ret = add_excluded_extent(root, cache->key.objectid,
246 stripe_len);
247 BUG_ON(ret);
248 }
249
817d52f8
JB
250 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
251 bytenr = btrfs_sb_offset(i);
252 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
253 cache->key.objectid, bytenr,
254 0, &logical, &nr, &stripe_len);
255 BUG_ON(ret);
11833d66 256
817d52f8 257 while (nr--) {
1b2da372 258 cache->bytes_super += stripe_len;
11833d66
YZ
259 ret = add_excluded_extent(root, logical[nr],
260 stripe_len);
261 BUG_ON(ret);
817d52f8 262 }
11833d66 263
817d52f8
JB
264 kfree(logical);
265 }
817d52f8
JB
266 return 0;
267}
268
11833d66
YZ
269static struct btrfs_caching_control *
270get_caching_control(struct btrfs_block_group_cache *cache)
271{
272 struct btrfs_caching_control *ctl;
273
274 spin_lock(&cache->lock);
275 if (cache->cached != BTRFS_CACHE_STARTED) {
276 spin_unlock(&cache->lock);
277 return NULL;
278 }
279
dde5abee
JB
280 /* We're loading it the fast way, so we don't have a caching_ctl. */
281 if (!cache->caching_ctl) {
282 spin_unlock(&cache->lock);
11833d66
YZ
283 return NULL;
284 }
285
286 ctl = cache->caching_ctl;
287 atomic_inc(&ctl->count);
288 spin_unlock(&cache->lock);
289 return ctl;
290}
291
292static void put_caching_control(struct btrfs_caching_control *ctl)
293{
294 if (atomic_dec_and_test(&ctl->count))
295 kfree(ctl);
296}
297
0f9dd46c
JB
298/*
299 * this is only called by cache_block_group, since we could have freed extents
300 * we need to check the pinned_extents for any extents that can't be used yet
301 * since their free space will be released as soon as the transaction commits.
302 */
817d52f8 303static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
304 struct btrfs_fs_info *info, u64 start, u64 end)
305{
817d52f8 306 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
307 int ret;
308
309 while (start < end) {
11833d66 310 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 311 &extent_start, &extent_end,
11833d66 312 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
313 if (ret)
314 break;
315
06b2331f 316 if (extent_start <= start) {
0f9dd46c
JB
317 start = extent_end + 1;
318 } else if (extent_start > start && extent_start < end) {
319 size = extent_start - start;
817d52f8 320 total_added += size;
ea6a478e
JB
321 ret = btrfs_add_free_space(block_group, start,
322 size);
0f9dd46c
JB
323 BUG_ON(ret);
324 start = extent_end + 1;
325 } else {
326 break;
327 }
328 }
329
330 if (start < end) {
331 size = end - start;
817d52f8 332 total_added += size;
ea6a478e 333 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
334 BUG_ON(ret);
335 }
336
817d52f8 337 return total_added;
0f9dd46c
JB
338}
339
bab39bf9 340static noinline void caching_thread(struct btrfs_work *work)
e37c9e69 341{
bab39bf9
JB
342 struct btrfs_block_group_cache *block_group;
343 struct btrfs_fs_info *fs_info;
344 struct btrfs_caching_control *caching_ctl;
345 struct btrfs_root *extent_root;
e37c9e69 346 struct btrfs_path *path;
5f39d397 347 struct extent_buffer *leaf;
11833d66 348 struct btrfs_key key;
817d52f8 349 u64 total_found = 0;
11833d66
YZ
350 u64 last = 0;
351 u32 nritems;
352 int ret = 0;
f510cfec 353
bab39bf9
JB
354 caching_ctl = container_of(work, struct btrfs_caching_control, work);
355 block_group = caching_ctl->block_group;
356 fs_info = block_group->fs_info;
357 extent_root = fs_info->extent_root;
358
e37c9e69
CM
359 path = btrfs_alloc_path();
360 if (!path)
bab39bf9 361 goto out;
7d7d6068 362
817d52f8 363 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 364
5cd57b2c 365 /*
817d52f8
JB
366 * We don't want to deadlock with somebody trying to allocate a new
367 * extent for the extent root while also trying to search the extent
368 * root to add free space. So we skip locking and search the commit
369 * root, since its read-only
5cd57b2c
CM
370 */
371 path->skip_locking = 1;
817d52f8 372 path->search_commit_root = 1;
026fd317 373 path->reada = 1;
817d52f8 374
e4404d6e 375 key.objectid = last;
e37c9e69 376 key.offset = 0;
11833d66 377 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 378again:
11833d66 379 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
380 /* need to make sure the commit_root doesn't disappear */
381 down_read(&fs_info->extent_commit_sem);
382
11833d66 383 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 384 if (ret < 0)
ef8bbdfe 385 goto err;
a512bbf8 386
11833d66
YZ
387 leaf = path->nodes[0];
388 nritems = btrfs_header_nritems(leaf);
389
d397712b 390 while (1) {
7841cb28 391 if (btrfs_fs_closing(fs_info) > 1) {
f25784b3 392 last = (u64)-1;
817d52f8 393 break;
f25784b3 394 }
817d52f8 395
11833d66
YZ
396 if (path->slots[0] < nritems) {
397 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
398 } else {
399 ret = find_next_key(path, 0, &key);
400 if (ret)
e37c9e69 401 break;
817d52f8 402
589d8ade
JB
403 if (need_resched() ||
404 btrfs_next_leaf(extent_root, path)) {
405 caching_ctl->progress = last;
ff5714cc 406 btrfs_release_path(path);
589d8ade
JB
407 up_read(&fs_info->extent_commit_sem);
408 mutex_unlock(&caching_ctl->mutex);
11833d66 409 cond_resched();
589d8ade
JB
410 goto again;
411 }
412 leaf = path->nodes[0];
413 nritems = btrfs_header_nritems(leaf);
414 continue;
11833d66 415 }
817d52f8 416
11833d66
YZ
417 if (key.objectid < block_group->key.objectid) {
418 path->slots[0]++;
817d52f8 419 continue;
e37c9e69 420 }
0f9dd46c 421
e37c9e69 422 if (key.objectid >= block_group->key.objectid +
0f9dd46c 423 block_group->key.offset)
e37c9e69 424 break;
7d7d6068 425
11833d66 426 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
427 total_found += add_new_free_space(block_group,
428 fs_info, last,
429 key.objectid);
7d7d6068 430 last = key.objectid + key.offset;
817d52f8 431
11833d66
YZ
432 if (total_found > (1024 * 1024 * 2)) {
433 total_found = 0;
434 wake_up(&caching_ctl->wait);
435 }
817d52f8 436 }
e37c9e69
CM
437 path->slots[0]++;
438 }
817d52f8 439 ret = 0;
e37c9e69 440
817d52f8
JB
441 total_found += add_new_free_space(block_group, fs_info, last,
442 block_group->key.objectid +
443 block_group->key.offset);
11833d66 444 caching_ctl->progress = (u64)-1;
817d52f8
JB
445
446 spin_lock(&block_group->lock);
11833d66 447 block_group->caching_ctl = NULL;
817d52f8
JB
448 block_group->cached = BTRFS_CACHE_FINISHED;
449 spin_unlock(&block_group->lock);
0f9dd46c 450
54aa1f4d 451err:
e37c9e69 452 btrfs_free_path(path);
276e680d 453 up_read(&fs_info->extent_commit_sem);
817d52f8 454
11833d66
YZ
455 free_excluded_extents(extent_root, block_group);
456
457 mutex_unlock(&caching_ctl->mutex);
bab39bf9 458out:
11833d66
YZ
459 wake_up(&caching_ctl->wait);
460
461 put_caching_control(caching_ctl);
11dfe35a 462 btrfs_put_block_group(block_group);
817d52f8
JB
463}
464
9d66e233
JB
465static int cache_block_group(struct btrfs_block_group_cache *cache,
466 struct btrfs_trans_handle *trans,
b8399dee 467 struct btrfs_root *root,
9d66e233 468 int load_cache_only)
817d52f8 469{
291c7d2f 470 DEFINE_WAIT(wait);
11833d66
YZ
471 struct btrfs_fs_info *fs_info = cache->fs_info;
472 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
473 int ret = 0;
474
291c7d2f
JB
475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
11833d66 517 return 0;
291c7d2f
JB
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
11833d66 523
9d66e233
JB
524 /*
525 * We can't do the read from on-disk cache during a commit since we need
b8399dee
JB
526 * to have the normal tree locking. Also if we are currently trying to
527 * allocate blocks for the tree root we can't do the fast caching since
528 * we likely hold important locks.
9d66e233 529 */
f7039b1d 530 if (trans && (!trans->transaction->in_commit) &&
73bc1876
JB
531 (root && root != root->fs_info->tree_root) &&
532 btrfs_test_opt(root, SPACE_CACHE)) {
9d66e233
JB
533 ret = load_free_space_cache(fs_info, cache);
534
535 spin_lock(&cache->lock);
536 if (ret == 1) {
291c7d2f 537 cache->caching_ctl = NULL;
9d66e233
JB
538 cache->cached = BTRFS_CACHE_FINISHED;
539 cache->last_byte_to_unpin = (u64)-1;
540 } else {
291c7d2f
JB
541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
9d66e233
JB
547 }
548 spin_unlock(&cache->lock);
291c7d2f 549 wake_up(&caching_ctl->wait);
3c14874a 550 if (ret == 1) {
291c7d2f 551 put_caching_control(caching_ctl);
3c14874a 552 free_excluded_extents(fs_info->extent_root, cache);
9d66e233 553 return 0;
3c14874a 554 }
291c7d2f
JB
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
9d66e233
JB
569 }
570
291c7d2f
JB
571 if (load_cache_only) {
572 put_caching_control(caching_ctl);
11833d66 573 return 0;
817d52f8 574 }
817d52f8 575
11833d66 576 down_write(&fs_info->extent_commit_sem);
291c7d2f 577 atomic_inc(&caching_ctl->count);
11833d66
YZ
578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
579 up_write(&fs_info->extent_commit_sem);
580
11dfe35a 581 btrfs_get_block_group(cache);
11833d66 582
bab39bf9 583 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
817d52f8 584
ef8bbdfe 585 return ret;
e37c9e69
CM
586}
587
0f9dd46c
JB
588/*
589 * return the block group that starts at or after bytenr
590 */
d397712b
CM
591static struct btrfs_block_group_cache *
592btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 593{
0f9dd46c 594 struct btrfs_block_group_cache *cache;
0ef3e66b 595
0f9dd46c 596 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 597
0f9dd46c 598 return cache;
0ef3e66b
CM
599}
600
0f9dd46c 601/*
9f55684c 602 * return the block group that contains the given bytenr
0f9dd46c 603 */
d397712b
CM
604struct btrfs_block_group_cache *btrfs_lookup_block_group(
605 struct btrfs_fs_info *info,
606 u64 bytenr)
be744175 607{
0f9dd46c 608 struct btrfs_block_group_cache *cache;
be744175 609
0f9dd46c 610 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 611
0f9dd46c 612 return cache;
be744175 613}
0b86a832 614
0f9dd46c
JB
615static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
616 u64 flags)
6324fbf3 617{
0f9dd46c 618 struct list_head *head = &info->space_info;
0f9dd46c 619 struct btrfs_space_info *found;
4184ea7f 620
52ba6929 621 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
b742bb82 622
4184ea7f
CM
623 rcu_read_lock();
624 list_for_each_entry_rcu(found, head, list) {
67377734 625 if (found->flags & flags) {
4184ea7f 626 rcu_read_unlock();
0f9dd46c 627 return found;
4184ea7f 628 }
0f9dd46c 629 }
4184ea7f 630 rcu_read_unlock();
0f9dd46c 631 return NULL;
6324fbf3
CM
632}
633
4184ea7f
CM
634/*
635 * after adding space to the filesystem, we need to clear the full flags
636 * on all the space infos.
637 */
638void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
639{
640 struct list_head *head = &info->space_info;
641 struct btrfs_space_info *found;
642
643 rcu_read_lock();
644 list_for_each_entry_rcu(found, head, list)
645 found->full = 0;
646 rcu_read_unlock();
647}
648
80eb234a
JB
649static u64 div_factor(u64 num, int factor)
650{
651 if (factor == 10)
652 return num;
653 num *= factor;
654 do_div(num, 10);
655 return num;
656}
657
e5bc2458
CM
658static u64 div_factor_fine(u64 num, int factor)
659{
660 if (factor == 100)
661 return num;
662 num *= factor;
663 do_div(num, 100);
664 return num;
665}
666
d2fb3437
YZ
667u64 btrfs_find_block_group(struct btrfs_root *root,
668 u64 search_start, u64 search_hint, int owner)
cd1bc465 669{
96b5179d 670 struct btrfs_block_group_cache *cache;
cd1bc465 671 u64 used;
d2fb3437
YZ
672 u64 last = max(search_hint, search_start);
673 u64 group_start = 0;
31f3c99b 674 int full_search = 0;
d2fb3437 675 int factor = 9;
0ef3e66b 676 int wrapped = 0;
31f3c99b 677again:
e8569813
ZY
678 while (1) {
679 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
680 if (!cache)
681 break;
96b5179d 682
c286ac48 683 spin_lock(&cache->lock);
96b5179d
CM
684 last = cache->key.objectid + cache->key.offset;
685 used = btrfs_block_group_used(&cache->item);
686
d2fb3437
YZ
687 if ((full_search || !cache->ro) &&
688 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 689 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
690 div_factor(cache->key.offset, factor)) {
691 group_start = cache->key.objectid;
c286ac48 692 spin_unlock(&cache->lock);
fa9c0d79 693 btrfs_put_block_group(cache);
8790d502
CM
694 goto found;
695 }
6324fbf3 696 }
c286ac48 697 spin_unlock(&cache->lock);
fa9c0d79 698 btrfs_put_block_group(cache);
de428b63 699 cond_resched();
cd1bc465 700 }
0ef3e66b
CM
701 if (!wrapped) {
702 last = search_start;
703 wrapped = 1;
704 goto again;
705 }
706 if (!full_search && factor < 10) {
be744175 707 last = search_start;
31f3c99b 708 full_search = 1;
0ef3e66b 709 factor = 10;
31f3c99b
CM
710 goto again;
711 }
be744175 712found:
d2fb3437 713 return group_start;
925baedd 714}
0f9dd46c 715
e02119d5 716/* simple helper to search for an existing extent at a given offset */
31840ae1 717int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
718{
719 int ret;
720 struct btrfs_key key;
31840ae1 721 struct btrfs_path *path;
e02119d5 722
31840ae1 723 path = btrfs_alloc_path();
d8926bb3
MF
724 if (!path)
725 return -ENOMEM;
726
e02119d5
CM
727 key.objectid = start;
728 key.offset = len;
729 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
730 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
731 0, 0);
31840ae1 732 btrfs_free_path(path);
7bb86316
CM
733 return ret;
734}
735
a22285a6
YZ
736/*
737 * helper function to lookup reference count and flags of extent.
738 *
739 * the head node for delayed ref is used to store the sum of all the
740 * reference count modifications queued up in the rbtree. the head
741 * node may also store the extent flags to set. This way you can check
742 * to see what the reference count and extent flags would be if all of
743 * the delayed refs are not processed.
744 */
745int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
746 struct btrfs_root *root, u64 bytenr,
747 u64 num_bytes, u64 *refs, u64 *flags)
748{
749 struct btrfs_delayed_ref_head *head;
750 struct btrfs_delayed_ref_root *delayed_refs;
751 struct btrfs_path *path;
752 struct btrfs_extent_item *ei;
753 struct extent_buffer *leaf;
754 struct btrfs_key key;
755 u32 item_size;
756 u64 num_refs;
757 u64 extent_flags;
758 int ret;
759
760 path = btrfs_alloc_path();
761 if (!path)
762 return -ENOMEM;
763
764 key.objectid = bytenr;
765 key.type = BTRFS_EXTENT_ITEM_KEY;
766 key.offset = num_bytes;
767 if (!trans) {
768 path->skip_locking = 1;
769 path->search_commit_root = 1;
770 }
771again:
772 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773 &key, path, 0, 0);
774 if (ret < 0)
775 goto out_free;
776
777 if (ret == 0) {
778 leaf = path->nodes[0];
779 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
780 if (item_size >= sizeof(*ei)) {
781 ei = btrfs_item_ptr(leaf, path->slots[0],
782 struct btrfs_extent_item);
783 num_refs = btrfs_extent_refs(leaf, ei);
784 extent_flags = btrfs_extent_flags(leaf, ei);
785 } else {
786#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
787 struct btrfs_extent_item_v0 *ei0;
788 BUG_ON(item_size != sizeof(*ei0));
789 ei0 = btrfs_item_ptr(leaf, path->slots[0],
790 struct btrfs_extent_item_v0);
791 num_refs = btrfs_extent_refs_v0(leaf, ei0);
792 /* FIXME: this isn't correct for data */
793 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
794#else
795 BUG();
796#endif
797 }
798 BUG_ON(num_refs == 0);
799 } else {
800 num_refs = 0;
801 extent_flags = 0;
802 ret = 0;
803 }
804
805 if (!trans)
806 goto out;
807
808 delayed_refs = &trans->transaction->delayed_refs;
809 spin_lock(&delayed_refs->lock);
810 head = btrfs_find_delayed_ref_head(trans, bytenr);
811 if (head) {
812 if (!mutex_trylock(&head->mutex)) {
813 atomic_inc(&head->node.refs);
814 spin_unlock(&delayed_refs->lock);
815
b3b4aa74 816 btrfs_release_path(path);
a22285a6 817
8cc33e5c
DS
818 /*
819 * Mutex was contended, block until it's released and try
820 * again
821 */
a22285a6
YZ
822 mutex_lock(&head->mutex);
823 mutex_unlock(&head->mutex);
824 btrfs_put_delayed_ref(&head->node);
825 goto again;
826 }
827 if (head->extent_op && head->extent_op->update_flags)
828 extent_flags |= head->extent_op->flags_to_set;
829 else
830 BUG_ON(num_refs == 0);
831
832 num_refs += head->node.ref_mod;
833 mutex_unlock(&head->mutex);
834 }
835 spin_unlock(&delayed_refs->lock);
836out:
837 WARN_ON(num_refs == 0);
838 if (refs)
839 *refs = num_refs;
840 if (flags)
841 *flags = extent_flags;
842out_free:
843 btrfs_free_path(path);
844 return ret;
845}
846
d8d5f3e1
CM
847/*
848 * Back reference rules. Back refs have three main goals:
849 *
850 * 1) differentiate between all holders of references to an extent so that
851 * when a reference is dropped we can make sure it was a valid reference
852 * before freeing the extent.
853 *
854 * 2) Provide enough information to quickly find the holders of an extent
855 * if we notice a given block is corrupted or bad.
856 *
857 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
858 * maintenance. This is actually the same as #2, but with a slightly
859 * different use case.
860 *
5d4f98a2
YZ
861 * There are two kinds of back refs. The implicit back refs is optimized
862 * for pointers in non-shared tree blocks. For a given pointer in a block,
863 * back refs of this kind provide information about the block's owner tree
864 * and the pointer's key. These information allow us to find the block by
865 * b-tree searching. The full back refs is for pointers in tree blocks not
866 * referenced by their owner trees. The location of tree block is recorded
867 * in the back refs. Actually the full back refs is generic, and can be
868 * used in all cases the implicit back refs is used. The major shortcoming
869 * of the full back refs is its overhead. Every time a tree block gets
870 * COWed, we have to update back refs entry for all pointers in it.
871 *
872 * For a newly allocated tree block, we use implicit back refs for
873 * pointers in it. This means most tree related operations only involve
874 * implicit back refs. For a tree block created in old transaction, the
875 * only way to drop a reference to it is COW it. So we can detect the
876 * event that tree block loses its owner tree's reference and do the
877 * back refs conversion.
878 *
879 * When a tree block is COW'd through a tree, there are four cases:
880 *
881 * The reference count of the block is one and the tree is the block's
882 * owner tree. Nothing to do in this case.
883 *
884 * The reference count of the block is one and the tree is not the
885 * block's owner tree. In this case, full back refs is used for pointers
886 * in the block. Remove these full back refs, add implicit back refs for
887 * every pointers in the new block.
888 *
889 * The reference count of the block is greater than one and the tree is
890 * the block's owner tree. In this case, implicit back refs is used for
891 * pointers in the block. Add full back refs for every pointers in the
892 * block, increase lower level extents' reference counts. The original
893 * implicit back refs are entailed to the new block.
894 *
895 * The reference count of the block is greater than one and the tree is
896 * not the block's owner tree. Add implicit back refs for every pointer in
897 * the new block, increase lower level extents' reference count.
898 *
899 * Back Reference Key composing:
900 *
901 * The key objectid corresponds to the first byte in the extent,
902 * The key type is used to differentiate between types of back refs.
903 * There are different meanings of the key offset for different types
904 * of back refs.
905 *
d8d5f3e1
CM
906 * File extents can be referenced by:
907 *
908 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 909 * - different files inside a single subvolume
d8d5f3e1
CM
910 * - different offsets inside a file (bookend extents in file.c)
911 *
5d4f98a2 912 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
913 *
914 * - Objectid of the subvolume root
d8d5f3e1 915 * - objectid of the file holding the reference
5d4f98a2
YZ
916 * - original offset in the file
917 * - how many bookend extents
d8d5f3e1 918 *
5d4f98a2
YZ
919 * The key offset for the implicit back refs is hash of the first
920 * three fields.
d8d5f3e1 921 *
5d4f98a2 922 * The extent ref structure for the full back refs has field for:
d8d5f3e1 923 *
5d4f98a2 924 * - number of pointers in the tree leaf
d8d5f3e1 925 *
5d4f98a2
YZ
926 * The key offset for the implicit back refs is the first byte of
927 * the tree leaf
d8d5f3e1 928 *
5d4f98a2
YZ
929 * When a file extent is allocated, The implicit back refs is used.
930 * the fields are filled in:
d8d5f3e1 931 *
5d4f98a2 932 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 933 *
5d4f98a2
YZ
934 * When a file extent is removed file truncation, we find the
935 * corresponding implicit back refs and check the following fields:
d8d5f3e1 936 *
5d4f98a2 937 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 938 *
5d4f98a2 939 * Btree extents can be referenced by:
d8d5f3e1 940 *
5d4f98a2 941 * - Different subvolumes
d8d5f3e1 942 *
5d4f98a2
YZ
943 * Both the implicit back refs and the full back refs for tree blocks
944 * only consist of key. The key offset for the implicit back refs is
945 * objectid of block's owner tree. The key offset for the full back refs
946 * is the first byte of parent block.
d8d5f3e1 947 *
5d4f98a2
YZ
948 * When implicit back refs is used, information about the lowest key and
949 * level of the tree block are required. These information are stored in
950 * tree block info structure.
d8d5f3e1 951 */
31840ae1 952
5d4f98a2
YZ
953#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
954static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct btrfs_path *path,
957 u64 owner, u32 extra_size)
7bb86316 958{
5d4f98a2
YZ
959 struct btrfs_extent_item *item;
960 struct btrfs_extent_item_v0 *ei0;
961 struct btrfs_extent_ref_v0 *ref0;
962 struct btrfs_tree_block_info *bi;
963 struct extent_buffer *leaf;
7bb86316 964 struct btrfs_key key;
5d4f98a2
YZ
965 struct btrfs_key found_key;
966 u32 new_size = sizeof(*item);
967 u64 refs;
968 int ret;
969
970 leaf = path->nodes[0];
971 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
972
973 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
974 ei0 = btrfs_item_ptr(leaf, path->slots[0],
975 struct btrfs_extent_item_v0);
976 refs = btrfs_extent_refs_v0(leaf, ei0);
977
978 if (owner == (u64)-1) {
979 while (1) {
980 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
981 ret = btrfs_next_leaf(root, path);
982 if (ret < 0)
983 return ret;
984 BUG_ON(ret > 0);
985 leaf = path->nodes[0];
986 }
987 btrfs_item_key_to_cpu(leaf, &found_key,
988 path->slots[0]);
989 BUG_ON(key.objectid != found_key.objectid);
990 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
991 path->slots[0]++;
992 continue;
993 }
994 ref0 = btrfs_item_ptr(leaf, path->slots[0],
995 struct btrfs_extent_ref_v0);
996 owner = btrfs_ref_objectid_v0(leaf, ref0);
997 break;
998 }
999 }
b3b4aa74 1000 btrfs_release_path(path);
5d4f98a2
YZ
1001
1002 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1003 new_size += sizeof(*bi);
1004
1005 new_size -= sizeof(*ei0);
1006 ret = btrfs_search_slot(trans, root, &key, path,
1007 new_size + extra_size, 1);
1008 if (ret < 0)
1009 return ret;
1010 BUG_ON(ret);
1011
1012 ret = btrfs_extend_item(trans, root, path, new_size);
5d4f98a2
YZ
1013
1014 leaf = path->nodes[0];
1015 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1016 btrfs_set_extent_refs(leaf, item, refs);
1017 /* FIXME: get real generation */
1018 btrfs_set_extent_generation(leaf, item, 0);
1019 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1020 btrfs_set_extent_flags(leaf, item,
1021 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1022 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1023 bi = (struct btrfs_tree_block_info *)(item + 1);
1024 /* FIXME: get first key of the block */
1025 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1026 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1027 } else {
1028 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1029 }
1030 btrfs_mark_buffer_dirty(leaf);
1031 return 0;
1032}
1033#endif
1034
1035static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1036{
1037 u32 high_crc = ~(u32)0;
1038 u32 low_crc = ~(u32)0;
1039 __le64 lenum;
1040
1041 lenum = cpu_to_le64(root_objectid);
163e783e 1042 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 1043 lenum = cpu_to_le64(owner);
163e783e 1044 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 1045 lenum = cpu_to_le64(offset);
163e783e 1046 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
1047
1048 return ((u64)high_crc << 31) ^ (u64)low_crc;
1049}
1050
1051static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1052 struct btrfs_extent_data_ref *ref)
1053{
1054 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1055 btrfs_extent_data_ref_objectid(leaf, ref),
1056 btrfs_extent_data_ref_offset(leaf, ref));
1057}
1058
1059static int match_extent_data_ref(struct extent_buffer *leaf,
1060 struct btrfs_extent_data_ref *ref,
1061 u64 root_objectid, u64 owner, u64 offset)
1062{
1063 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1064 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1065 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1066 return 0;
1067 return 1;
1068}
1069
1070static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1071 struct btrfs_root *root,
1072 struct btrfs_path *path,
1073 u64 bytenr, u64 parent,
1074 u64 root_objectid,
1075 u64 owner, u64 offset)
1076{
1077 struct btrfs_key key;
1078 struct btrfs_extent_data_ref *ref;
31840ae1 1079 struct extent_buffer *leaf;
5d4f98a2 1080 u32 nritems;
74493f7a 1081 int ret;
5d4f98a2
YZ
1082 int recow;
1083 int err = -ENOENT;
74493f7a 1084
31840ae1 1085 key.objectid = bytenr;
5d4f98a2
YZ
1086 if (parent) {
1087 key.type = BTRFS_SHARED_DATA_REF_KEY;
1088 key.offset = parent;
1089 } else {
1090 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1091 key.offset = hash_extent_data_ref(root_objectid,
1092 owner, offset);
1093 }
1094again:
1095 recow = 0;
1096 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097 if (ret < 0) {
1098 err = ret;
1099 goto fail;
1100 }
31840ae1 1101
5d4f98a2
YZ
1102 if (parent) {
1103 if (!ret)
1104 return 0;
1105#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106 key.type = BTRFS_EXTENT_REF_V0_KEY;
b3b4aa74 1107 btrfs_release_path(path);
5d4f98a2
YZ
1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 if (ret < 0) {
1110 err = ret;
1111 goto fail;
1112 }
1113 if (!ret)
1114 return 0;
1115#endif
1116 goto fail;
31840ae1
ZY
1117 }
1118
1119 leaf = path->nodes[0];
5d4f98a2
YZ
1120 nritems = btrfs_header_nritems(leaf);
1121 while (1) {
1122 if (path->slots[0] >= nritems) {
1123 ret = btrfs_next_leaf(root, path);
1124 if (ret < 0)
1125 err = ret;
1126 if (ret)
1127 goto fail;
1128
1129 leaf = path->nodes[0];
1130 nritems = btrfs_header_nritems(leaf);
1131 recow = 1;
1132 }
1133
1134 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1135 if (key.objectid != bytenr ||
1136 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1137 goto fail;
1138
1139 ref = btrfs_item_ptr(leaf, path->slots[0],
1140 struct btrfs_extent_data_ref);
1141
1142 if (match_extent_data_ref(leaf, ref, root_objectid,
1143 owner, offset)) {
1144 if (recow) {
b3b4aa74 1145 btrfs_release_path(path);
5d4f98a2
YZ
1146 goto again;
1147 }
1148 err = 0;
1149 break;
1150 }
1151 path->slots[0]++;
31840ae1 1152 }
5d4f98a2
YZ
1153fail:
1154 return err;
31840ae1
ZY
1155}
1156
5d4f98a2
YZ
1157static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root,
1159 struct btrfs_path *path,
1160 u64 bytenr, u64 parent,
1161 u64 root_objectid, u64 owner,
1162 u64 offset, int refs_to_add)
31840ae1
ZY
1163{
1164 struct btrfs_key key;
1165 struct extent_buffer *leaf;
5d4f98a2 1166 u32 size;
31840ae1
ZY
1167 u32 num_refs;
1168 int ret;
74493f7a 1169
74493f7a 1170 key.objectid = bytenr;
5d4f98a2
YZ
1171 if (parent) {
1172 key.type = BTRFS_SHARED_DATA_REF_KEY;
1173 key.offset = parent;
1174 size = sizeof(struct btrfs_shared_data_ref);
1175 } else {
1176 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1177 key.offset = hash_extent_data_ref(root_objectid,
1178 owner, offset);
1179 size = sizeof(struct btrfs_extent_data_ref);
1180 }
74493f7a 1181
5d4f98a2
YZ
1182 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1183 if (ret && ret != -EEXIST)
1184 goto fail;
1185
1186 leaf = path->nodes[0];
1187 if (parent) {
1188 struct btrfs_shared_data_ref *ref;
31840ae1 1189 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
1190 struct btrfs_shared_data_ref);
1191 if (ret == 0) {
1192 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1193 } else {
1194 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1195 num_refs += refs_to_add;
1196 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 1197 }
5d4f98a2
YZ
1198 } else {
1199 struct btrfs_extent_data_ref *ref;
1200 while (ret == -EEXIST) {
1201 ref = btrfs_item_ptr(leaf, path->slots[0],
1202 struct btrfs_extent_data_ref);
1203 if (match_extent_data_ref(leaf, ref, root_objectid,
1204 owner, offset))
1205 break;
b3b4aa74 1206 btrfs_release_path(path);
5d4f98a2
YZ
1207 key.offset++;
1208 ret = btrfs_insert_empty_item(trans, root, path, &key,
1209 size);
1210 if (ret && ret != -EEXIST)
1211 goto fail;
31840ae1 1212
5d4f98a2
YZ
1213 leaf = path->nodes[0];
1214 }
1215 ref = btrfs_item_ptr(leaf, path->slots[0],
1216 struct btrfs_extent_data_ref);
1217 if (ret == 0) {
1218 btrfs_set_extent_data_ref_root(leaf, ref,
1219 root_objectid);
1220 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1221 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1222 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1223 } else {
1224 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1225 num_refs += refs_to_add;
1226 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 1227 }
31840ae1 1228 }
5d4f98a2
YZ
1229 btrfs_mark_buffer_dirty(leaf);
1230 ret = 0;
1231fail:
b3b4aa74 1232 btrfs_release_path(path);
7bb86316 1233 return ret;
74493f7a
CM
1234}
1235
5d4f98a2
YZ
1236static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1237 struct btrfs_root *root,
1238 struct btrfs_path *path,
1239 int refs_to_drop)
31840ae1 1240{
5d4f98a2
YZ
1241 struct btrfs_key key;
1242 struct btrfs_extent_data_ref *ref1 = NULL;
1243 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 1244 struct extent_buffer *leaf;
5d4f98a2 1245 u32 num_refs = 0;
31840ae1
ZY
1246 int ret = 0;
1247
1248 leaf = path->nodes[0];
5d4f98a2
YZ
1249 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250
1251 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1252 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1253 struct btrfs_extent_data_ref);
1254 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1256 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1257 struct btrfs_shared_data_ref);
1258 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1261 struct btrfs_extent_ref_v0 *ref0;
1262 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1263 struct btrfs_extent_ref_v0);
1264 num_refs = btrfs_ref_count_v0(leaf, ref0);
1265#endif
1266 } else {
1267 BUG();
1268 }
1269
56bec294
CM
1270 BUG_ON(num_refs < refs_to_drop);
1271 num_refs -= refs_to_drop;
5d4f98a2 1272
31840ae1
ZY
1273 if (num_refs == 0) {
1274 ret = btrfs_del_item(trans, root, path);
1275 } else {
5d4f98a2
YZ
1276 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1277 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1278 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1279 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1280#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281 else {
1282 struct btrfs_extent_ref_v0 *ref0;
1283 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284 struct btrfs_extent_ref_v0);
1285 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1286 }
1287#endif
31840ae1
ZY
1288 btrfs_mark_buffer_dirty(leaf);
1289 }
31840ae1
ZY
1290 return ret;
1291}
1292
5d4f98a2
YZ
1293static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1294 struct btrfs_path *path,
1295 struct btrfs_extent_inline_ref *iref)
15916de8 1296{
5d4f98a2
YZ
1297 struct btrfs_key key;
1298 struct extent_buffer *leaf;
1299 struct btrfs_extent_data_ref *ref1;
1300 struct btrfs_shared_data_ref *ref2;
1301 u32 num_refs = 0;
1302
1303 leaf = path->nodes[0];
1304 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1305 if (iref) {
1306 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1307 BTRFS_EXTENT_DATA_REF_KEY) {
1308 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1309 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1310 } else {
1311 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1312 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1313 }
1314 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1315 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1316 struct btrfs_extent_data_ref);
1317 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1318 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1319 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1320 struct btrfs_shared_data_ref);
1321 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1322#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1323 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1324 struct btrfs_extent_ref_v0 *ref0;
1325 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1326 struct btrfs_extent_ref_v0);
1327 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1328#endif
5d4f98a2
YZ
1329 } else {
1330 WARN_ON(1);
1331 }
1332 return num_refs;
1333}
15916de8 1334
5d4f98a2
YZ
1335static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1336 struct btrfs_root *root,
1337 struct btrfs_path *path,
1338 u64 bytenr, u64 parent,
1339 u64 root_objectid)
1f3c79a2 1340{
5d4f98a2 1341 struct btrfs_key key;
1f3c79a2 1342 int ret;
1f3c79a2 1343
5d4f98a2
YZ
1344 key.objectid = bytenr;
1345 if (parent) {
1346 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1347 key.offset = parent;
1348 } else {
1349 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1350 key.offset = root_objectid;
1f3c79a2
LH
1351 }
1352
5d4f98a2
YZ
1353 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1354 if (ret > 0)
1355 ret = -ENOENT;
1356#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 if (ret == -ENOENT && parent) {
b3b4aa74 1358 btrfs_release_path(path);
5d4f98a2
YZ
1359 key.type = BTRFS_EXTENT_REF_V0_KEY;
1360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1361 if (ret > 0)
1362 ret = -ENOENT;
1363 }
1f3c79a2 1364#endif
5d4f98a2 1365 return ret;
1f3c79a2
LH
1366}
1367
5d4f98a2
YZ
1368static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1369 struct btrfs_root *root,
1370 struct btrfs_path *path,
1371 u64 bytenr, u64 parent,
1372 u64 root_objectid)
31840ae1 1373{
5d4f98a2 1374 struct btrfs_key key;
31840ae1 1375 int ret;
31840ae1 1376
5d4f98a2
YZ
1377 key.objectid = bytenr;
1378 if (parent) {
1379 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1380 key.offset = parent;
1381 } else {
1382 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1383 key.offset = root_objectid;
1384 }
1385
1386 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
b3b4aa74 1387 btrfs_release_path(path);
31840ae1
ZY
1388 return ret;
1389}
1390
5d4f98a2 1391static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1392{
5d4f98a2
YZ
1393 int type;
1394 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1395 if (parent > 0)
1396 type = BTRFS_SHARED_BLOCK_REF_KEY;
1397 else
1398 type = BTRFS_TREE_BLOCK_REF_KEY;
1399 } else {
1400 if (parent > 0)
1401 type = BTRFS_SHARED_DATA_REF_KEY;
1402 else
1403 type = BTRFS_EXTENT_DATA_REF_KEY;
1404 }
1405 return type;
31840ae1 1406}
56bec294 1407
2c47e605
YZ
1408static int find_next_key(struct btrfs_path *path, int level,
1409 struct btrfs_key *key)
56bec294 1410
02217ed2 1411{
2c47e605 1412 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1413 if (!path->nodes[level])
1414 break;
5d4f98a2
YZ
1415 if (path->slots[level] + 1 >=
1416 btrfs_header_nritems(path->nodes[level]))
1417 continue;
1418 if (level == 0)
1419 btrfs_item_key_to_cpu(path->nodes[level], key,
1420 path->slots[level] + 1);
1421 else
1422 btrfs_node_key_to_cpu(path->nodes[level], key,
1423 path->slots[level] + 1);
1424 return 0;
1425 }
1426 return 1;
1427}
037e6390 1428
5d4f98a2
YZ
1429/*
1430 * look for inline back ref. if back ref is found, *ref_ret is set
1431 * to the address of inline back ref, and 0 is returned.
1432 *
1433 * if back ref isn't found, *ref_ret is set to the address where it
1434 * should be inserted, and -ENOENT is returned.
1435 *
1436 * if insert is true and there are too many inline back refs, the path
1437 * points to the extent item, and -EAGAIN is returned.
1438 *
1439 * NOTE: inline back refs are ordered in the same way that back ref
1440 * items in the tree are ordered.
1441 */
1442static noinline_for_stack
1443int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1444 struct btrfs_root *root,
1445 struct btrfs_path *path,
1446 struct btrfs_extent_inline_ref **ref_ret,
1447 u64 bytenr, u64 num_bytes,
1448 u64 parent, u64 root_objectid,
1449 u64 owner, u64 offset, int insert)
1450{
1451 struct btrfs_key key;
1452 struct extent_buffer *leaf;
1453 struct btrfs_extent_item *ei;
1454 struct btrfs_extent_inline_ref *iref;
1455 u64 flags;
1456 u64 item_size;
1457 unsigned long ptr;
1458 unsigned long end;
1459 int extra_size;
1460 int type;
1461 int want;
1462 int ret;
1463 int err = 0;
26b8003f 1464
db94535d 1465 key.objectid = bytenr;
31840ae1 1466 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1467 key.offset = num_bytes;
31840ae1 1468
5d4f98a2
YZ
1469 want = extent_ref_type(parent, owner);
1470 if (insert) {
1471 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1472 path->keep_locks = 1;
5d4f98a2
YZ
1473 } else
1474 extra_size = -1;
1475 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1476 if (ret < 0) {
5d4f98a2
YZ
1477 err = ret;
1478 goto out;
1479 }
1480 BUG_ON(ret);
1481
1482 leaf = path->nodes[0];
1483 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1484#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1485 if (item_size < sizeof(*ei)) {
1486 if (!insert) {
1487 err = -ENOENT;
1488 goto out;
1489 }
1490 ret = convert_extent_item_v0(trans, root, path, owner,
1491 extra_size);
1492 if (ret < 0) {
1493 err = ret;
1494 goto out;
1495 }
1496 leaf = path->nodes[0];
1497 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1498 }
1499#endif
1500 BUG_ON(item_size < sizeof(*ei));
1501
5d4f98a2
YZ
1502 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1503 flags = btrfs_extent_flags(leaf, ei);
1504
1505 ptr = (unsigned long)(ei + 1);
1506 end = (unsigned long)ei + item_size;
1507
1508 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1509 ptr += sizeof(struct btrfs_tree_block_info);
1510 BUG_ON(ptr > end);
1511 } else {
1512 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1513 }
1514
1515 err = -ENOENT;
1516 while (1) {
1517 if (ptr >= end) {
1518 WARN_ON(ptr > end);
1519 break;
1520 }
1521 iref = (struct btrfs_extent_inline_ref *)ptr;
1522 type = btrfs_extent_inline_ref_type(leaf, iref);
1523 if (want < type)
1524 break;
1525 if (want > type) {
1526 ptr += btrfs_extent_inline_ref_size(type);
1527 continue;
1528 }
1529
1530 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1531 struct btrfs_extent_data_ref *dref;
1532 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1533 if (match_extent_data_ref(leaf, dref, root_objectid,
1534 owner, offset)) {
1535 err = 0;
1536 break;
1537 }
1538 if (hash_extent_data_ref_item(leaf, dref) <
1539 hash_extent_data_ref(root_objectid, owner, offset))
1540 break;
1541 } else {
1542 u64 ref_offset;
1543 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1544 if (parent > 0) {
1545 if (parent == ref_offset) {
1546 err = 0;
1547 break;
1548 }
1549 if (ref_offset < parent)
1550 break;
1551 } else {
1552 if (root_objectid == ref_offset) {
1553 err = 0;
1554 break;
1555 }
1556 if (ref_offset < root_objectid)
1557 break;
1558 }
1559 }
1560 ptr += btrfs_extent_inline_ref_size(type);
1561 }
1562 if (err == -ENOENT && insert) {
1563 if (item_size + extra_size >=
1564 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1565 err = -EAGAIN;
1566 goto out;
1567 }
1568 /*
1569 * To add new inline back ref, we have to make sure
1570 * there is no corresponding back ref item.
1571 * For simplicity, we just do not add new inline back
1572 * ref if there is any kind of item for this block
1573 */
2c47e605
YZ
1574 if (find_next_key(path, 0, &key) == 0 &&
1575 key.objectid == bytenr &&
85d4198e 1576 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1577 err = -EAGAIN;
1578 goto out;
1579 }
1580 }
1581 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1582out:
85d4198e 1583 if (insert) {
5d4f98a2
YZ
1584 path->keep_locks = 0;
1585 btrfs_unlock_up_safe(path, 1);
1586 }
1587 return err;
1588}
1589
1590/*
1591 * helper to add new inline back ref
1592 */
1593static noinline_for_stack
1594int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root,
1596 struct btrfs_path *path,
1597 struct btrfs_extent_inline_ref *iref,
1598 u64 parent, u64 root_objectid,
1599 u64 owner, u64 offset, int refs_to_add,
1600 struct btrfs_delayed_extent_op *extent_op)
1601{
1602 struct extent_buffer *leaf;
1603 struct btrfs_extent_item *ei;
1604 unsigned long ptr;
1605 unsigned long end;
1606 unsigned long item_offset;
1607 u64 refs;
1608 int size;
1609 int type;
1610 int ret;
1611
1612 leaf = path->nodes[0];
1613 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1614 item_offset = (unsigned long)iref - (unsigned long)ei;
1615
1616 type = extent_ref_type(parent, owner);
1617 size = btrfs_extent_inline_ref_size(type);
1618
1619 ret = btrfs_extend_item(trans, root, path, size);
5d4f98a2
YZ
1620
1621 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622 refs = btrfs_extent_refs(leaf, ei);
1623 refs += refs_to_add;
1624 btrfs_set_extent_refs(leaf, ei, refs);
1625 if (extent_op)
1626 __run_delayed_extent_op(extent_op, leaf, ei);
1627
1628 ptr = (unsigned long)ei + item_offset;
1629 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1630 if (ptr < end - size)
1631 memmove_extent_buffer(leaf, ptr + size, ptr,
1632 end - size - ptr);
1633
1634 iref = (struct btrfs_extent_inline_ref *)ptr;
1635 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1636 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1637 struct btrfs_extent_data_ref *dref;
1638 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1639 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1640 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1641 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1642 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1643 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1644 struct btrfs_shared_data_ref *sref;
1645 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1647 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1648 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1649 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1650 } else {
1651 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1652 }
1653 btrfs_mark_buffer_dirty(leaf);
1654 return 0;
1655}
1656
1657static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1658 struct btrfs_root *root,
1659 struct btrfs_path *path,
1660 struct btrfs_extent_inline_ref **ref_ret,
1661 u64 bytenr, u64 num_bytes, u64 parent,
1662 u64 root_objectid, u64 owner, u64 offset)
1663{
1664 int ret;
1665
1666 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1667 bytenr, num_bytes, parent,
1668 root_objectid, owner, offset, 0);
1669 if (ret != -ENOENT)
54aa1f4d 1670 return ret;
5d4f98a2 1671
b3b4aa74 1672 btrfs_release_path(path);
5d4f98a2
YZ
1673 *ref_ret = NULL;
1674
1675 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1676 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1677 root_objectid);
1678 } else {
1679 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1680 root_objectid, owner, offset);
b9473439 1681 }
5d4f98a2
YZ
1682 return ret;
1683}
31840ae1 1684
5d4f98a2
YZ
1685/*
1686 * helper to update/remove inline back ref
1687 */
1688static noinline_for_stack
1689int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1690 struct btrfs_root *root,
1691 struct btrfs_path *path,
1692 struct btrfs_extent_inline_ref *iref,
1693 int refs_to_mod,
1694 struct btrfs_delayed_extent_op *extent_op)
1695{
1696 struct extent_buffer *leaf;
1697 struct btrfs_extent_item *ei;
1698 struct btrfs_extent_data_ref *dref = NULL;
1699 struct btrfs_shared_data_ref *sref = NULL;
1700 unsigned long ptr;
1701 unsigned long end;
1702 u32 item_size;
1703 int size;
1704 int type;
1705 int ret;
1706 u64 refs;
1707
1708 leaf = path->nodes[0];
1709 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1710 refs = btrfs_extent_refs(leaf, ei);
1711 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1712 refs += refs_to_mod;
1713 btrfs_set_extent_refs(leaf, ei, refs);
1714 if (extent_op)
1715 __run_delayed_extent_op(extent_op, leaf, ei);
1716
1717 type = btrfs_extent_inline_ref_type(leaf, iref);
1718
1719 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1720 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1721 refs = btrfs_extent_data_ref_count(leaf, dref);
1722 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1723 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1724 refs = btrfs_shared_data_ref_count(leaf, sref);
1725 } else {
1726 refs = 1;
1727 BUG_ON(refs_to_mod != -1);
56bec294 1728 }
31840ae1 1729
5d4f98a2
YZ
1730 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1731 refs += refs_to_mod;
1732
1733 if (refs > 0) {
1734 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1735 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1736 else
1737 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1738 } else {
1739 size = btrfs_extent_inline_ref_size(type);
1740 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1741 ptr = (unsigned long)iref;
1742 end = (unsigned long)ei + item_size;
1743 if (ptr + size < end)
1744 memmove_extent_buffer(leaf, ptr, ptr + size,
1745 end - ptr - size);
1746 item_size -= size;
1747 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
5d4f98a2
YZ
1748 }
1749 btrfs_mark_buffer_dirty(leaf);
1750 return 0;
1751}
1752
1753static noinline_for_stack
1754int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1755 struct btrfs_root *root,
1756 struct btrfs_path *path,
1757 u64 bytenr, u64 num_bytes, u64 parent,
1758 u64 root_objectid, u64 owner,
1759 u64 offset, int refs_to_add,
1760 struct btrfs_delayed_extent_op *extent_op)
1761{
1762 struct btrfs_extent_inline_ref *iref;
1763 int ret;
1764
1765 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1766 bytenr, num_bytes, parent,
1767 root_objectid, owner, offset, 1);
1768 if (ret == 0) {
1769 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1770 ret = update_inline_extent_backref(trans, root, path, iref,
1771 refs_to_add, extent_op);
1772 } else if (ret == -ENOENT) {
1773 ret = setup_inline_extent_backref(trans, root, path, iref,
1774 parent, root_objectid,
1775 owner, offset, refs_to_add,
1776 extent_op);
771ed689 1777 }
5d4f98a2
YZ
1778 return ret;
1779}
31840ae1 1780
5d4f98a2
YZ
1781static int insert_extent_backref(struct btrfs_trans_handle *trans,
1782 struct btrfs_root *root,
1783 struct btrfs_path *path,
1784 u64 bytenr, u64 parent, u64 root_objectid,
1785 u64 owner, u64 offset, int refs_to_add)
1786{
1787 int ret;
1788 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789 BUG_ON(refs_to_add != 1);
1790 ret = insert_tree_block_ref(trans, root, path, bytenr,
1791 parent, root_objectid);
1792 } else {
1793 ret = insert_extent_data_ref(trans, root, path, bytenr,
1794 parent, root_objectid,
1795 owner, offset, refs_to_add);
1796 }
1797 return ret;
1798}
56bec294 1799
5d4f98a2
YZ
1800static int remove_extent_backref(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_path *path,
1803 struct btrfs_extent_inline_ref *iref,
1804 int refs_to_drop, int is_data)
1805{
1806 int ret;
b9473439 1807
5d4f98a2
YZ
1808 BUG_ON(!is_data && refs_to_drop != 1);
1809 if (iref) {
1810 ret = update_inline_extent_backref(trans, root, path, iref,
1811 -refs_to_drop, NULL);
1812 } else if (is_data) {
1813 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1814 } else {
1815 ret = btrfs_del_item(trans, root, path);
1816 }
1817 return ret;
1818}
1819
5378e607 1820static int btrfs_issue_discard(struct block_device *bdev,
5d4f98a2
YZ
1821 u64 start, u64 len)
1822{
5378e607 1823 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
5d4f98a2 1824}
5d4f98a2
YZ
1825
1826static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 1827 u64 num_bytes, u64 *actual_bytes)
5d4f98a2 1828{
5d4f98a2 1829 int ret;
5378e607 1830 u64 discarded_bytes = 0;
a1d3c478 1831 struct btrfs_bio *bbio = NULL;
5d4f98a2 1832
e244a0ae 1833
5d4f98a2 1834 /* Tell the block device(s) that the sectors can be discarded */
5378e607 1835 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
a1d3c478 1836 bytenr, &num_bytes, &bbio, 0);
5d4f98a2 1837 if (!ret) {
a1d3c478 1838 struct btrfs_bio_stripe *stripe = bbio->stripes;
5d4f98a2
YZ
1839 int i;
1840
5d4f98a2 1841
a1d3c478 1842 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
d5e2003c
JB
1843 if (!stripe->dev->can_discard)
1844 continue;
1845
5378e607
LD
1846 ret = btrfs_issue_discard(stripe->dev->bdev,
1847 stripe->physical,
1848 stripe->length);
1849 if (!ret)
1850 discarded_bytes += stripe->length;
1851 else if (ret != -EOPNOTSUPP)
1852 break;
d5e2003c
JB
1853
1854 /*
1855 * Just in case we get back EOPNOTSUPP for some reason,
1856 * just ignore the return value so we don't screw up
1857 * people calling discard_extent.
1858 */
1859 ret = 0;
5d4f98a2 1860 }
a1d3c478 1861 kfree(bbio);
5d4f98a2 1862 }
5378e607
LD
1863
1864 if (actual_bytes)
1865 *actual_bytes = discarded_bytes;
1866
5d4f98a2
YZ
1867
1868 return ret;
5d4f98a2
YZ
1869}
1870
1871int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1872 struct btrfs_root *root,
1873 u64 bytenr, u64 num_bytes, u64 parent,
66d7e7f0 1874 u64 root_objectid, u64 owner, u64 offset, int for_cow)
5d4f98a2
YZ
1875{
1876 int ret;
66d7e7f0
AJ
1877 struct btrfs_fs_info *fs_info = root->fs_info;
1878
5d4f98a2
YZ
1879 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1880 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1881
1882 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
66d7e7f0
AJ
1883 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1884 num_bytes,
5d4f98a2 1885 parent, root_objectid, (int)owner,
66d7e7f0 1886 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
5d4f98a2 1887 } else {
66d7e7f0
AJ
1888 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1889 num_bytes,
5d4f98a2 1890 parent, root_objectid, owner, offset,
66d7e7f0 1891 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
5d4f98a2
YZ
1892 }
1893 return ret;
1894}
1895
1896static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root,
1898 u64 bytenr, u64 num_bytes,
1899 u64 parent, u64 root_objectid,
1900 u64 owner, u64 offset, int refs_to_add,
1901 struct btrfs_delayed_extent_op *extent_op)
1902{
1903 struct btrfs_path *path;
1904 struct extent_buffer *leaf;
1905 struct btrfs_extent_item *item;
1906 u64 refs;
1907 int ret;
1908 int err = 0;
1909
1910 path = btrfs_alloc_path();
1911 if (!path)
1912 return -ENOMEM;
1913
1914 path->reada = 1;
1915 path->leave_spinning = 1;
1916 /* this will setup the path even if it fails to insert the back ref */
1917 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1918 path, bytenr, num_bytes, parent,
1919 root_objectid, owner, offset,
1920 refs_to_add, extent_op);
1921 if (ret == 0)
1922 goto out;
1923
1924 if (ret != -EAGAIN) {
1925 err = ret;
1926 goto out;
1927 }
1928
1929 leaf = path->nodes[0];
1930 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1931 refs = btrfs_extent_refs(leaf, item);
1932 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1933 if (extent_op)
1934 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1935
5d4f98a2 1936 btrfs_mark_buffer_dirty(leaf);
b3b4aa74 1937 btrfs_release_path(path);
56bec294
CM
1938
1939 path->reada = 1;
b9473439
CM
1940 path->leave_spinning = 1;
1941
56bec294
CM
1942 /* now insert the actual backref */
1943 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1944 path, bytenr, parent, root_objectid,
1945 owner, offset, refs_to_add);
56bec294 1946 BUG_ON(ret);
5d4f98a2 1947out:
56bec294 1948 btrfs_free_path(path);
5d4f98a2 1949 return err;
56bec294
CM
1950}
1951
5d4f98a2
YZ
1952static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1953 struct btrfs_root *root,
1954 struct btrfs_delayed_ref_node *node,
1955 struct btrfs_delayed_extent_op *extent_op,
1956 int insert_reserved)
56bec294 1957{
5d4f98a2
YZ
1958 int ret = 0;
1959 struct btrfs_delayed_data_ref *ref;
1960 struct btrfs_key ins;
1961 u64 parent = 0;
1962 u64 ref_root = 0;
1963 u64 flags = 0;
1964
1965 ins.objectid = node->bytenr;
1966 ins.offset = node->num_bytes;
1967 ins.type = BTRFS_EXTENT_ITEM_KEY;
1968
1969 ref = btrfs_delayed_node_to_data_ref(node);
1970 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1971 parent = ref->parent;
1972 else
1973 ref_root = ref->root;
1974
1975 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1976 if (extent_op) {
1977 BUG_ON(extent_op->update_key);
1978 flags |= extent_op->flags_to_set;
1979 }
1980 ret = alloc_reserved_file_extent(trans, root,
1981 parent, ref_root, flags,
1982 ref->objectid, ref->offset,
1983 &ins, node->ref_mod);
5d4f98a2
YZ
1984 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1985 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1986 node->num_bytes, parent,
1987 ref_root, ref->objectid,
1988 ref->offset, node->ref_mod,
1989 extent_op);
1990 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1991 ret = __btrfs_free_extent(trans, root, node->bytenr,
1992 node->num_bytes, parent,
1993 ref_root, ref->objectid,
1994 ref->offset, node->ref_mod,
1995 extent_op);
1996 } else {
1997 BUG();
1998 }
1999 return ret;
2000}
2001
2002static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2003 struct extent_buffer *leaf,
2004 struct btrfs_extent_item *ei)
2005{
2006 u64 flags = btrfs_extent_flags(leaf, ei);
2007 if (extent_op->update_flags) {
2008 flags |= extent_op->flags_to_set;
2009 btrfs_set_extent_flags(leaf, ei, flags);
2010 }
2011
2012 if (extent_op->update_key) {
2013 struct btrfs_tree_block_info *bi;
2014 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2015 bi = (struct btrfs_tree_block_info *)(ei + 1);
2016 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2017 }
2018}
2019
2020static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2021 struct btrfs_root *root,
2022 struct btrfs_delayed_ref_node *node,
2023 struct btrfs_delayed_extent_op *extent_op)
2024{
2025 struct btrfs_key key;
2026 struct btrfs_path *path;
2027 struct btrfs_extent_item *ei;
2028 struct extent_buffer *leaf;
2029 u32 item_size;
56bec294 2030 int ret;
5d4f98a2
YZ
2031 int err = 0;
2032
2033 path = btrfs_alloc_path();
2034 if (!path)
2035 return -ENOMEM;
2036
2037 key.objectid = node->bytenr;
2038 key.type = BTRFS_EXTENT_ITEM_KEY;
2039 key.offset = node->num_bytes;
2040
2041 path->reada = 1;
2042 path->leave_spinning = 1;
2043 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2044 path, 0, 1);
2045 if (ret < 0) {
2046 err = ret;
2047 goto out;
2048 }
2049 if (ret > 0) {
2050 err = -EIO;
2051 goto out;
2052 }
2053
2054 leaf = path->nodes[0];
2055 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2056#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2057 if (item_size < sizeof(*ei)) {
2058 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2059 path, (u64)-1, 0);
2060 if (ret < 0) {
2061 err = ret;
2062 goto out;
2063 }
2064 leaf = path->nodes[0];
2065 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2066 }
2067#endif
2068 BUG_ON(item_size < sizeof(*ei));
2069 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2070 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 2071
5d4f98a2
YZ
2072 btrfs_mark_buffer_dirty(leaf);
2073out:
2074 btrfs_free_path(path);
2075 return err;
56bec294
CM
2076}
2077
5d4f98a2
YZ
2078static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2079 struct btrfs_root *root,
2080 struct btrfs_delayed_ref_node *node,
2081 struct btrfs_delayed_extent_op *extent_op,
2082 int insert_reserved)
56bec294
CM
2083{
2084 int ret = 0;
5d4f98a2
YZ
2085 struct btrfs_delayed_tree_ref *ref;
2086 struct btrfs_key ins;
2087 u64 parent = 0;
2088 u64 ref_root = 0;
56bec294 2089
5d4f98a2
YZ
2090 ins.objectid = node->bytenr;
2091 ins.offset = node->num_bytes;
2092 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 2093
5d4f98a2
YZ
2094 ref = btrfs_delayed_node_to_tree_ref(node);
2095 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2096 parent = ref->parent;
2097 else
2098 ref_root = ref->root;
2099
2100 BUG_ON(node->ref_mod != 1);
2101 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2102 BUG_ON(!extent_op || !extent_op->update_flags ||
2103 !extent_op->update_key);
2104 ret = alloc_reserved_tree_block(trans, root,
2105 parent, ref_root,
2106 extent_op->flags_to_set,
2107 &extent_op->key,
2108 ref->level, &ins);
5d4f98a2
YZ
2109 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2110 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2111 node->num_bytes, parent, ref_root,
2112 ref->level, 0, 1, extent_op);
2113 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2114 ret = __btrfs_free_extent(trans, root, node->bytenr,
2115 node->num_bytes, parent, ref_root,
2116 ref->level, 0, 1, extent_op);
2117 } else {
2118 BUG();
2119 }
56bec294
CM
2120 return ret;
2121}
2122
2123/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
2124static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2125 struct btrfs_root *root,
2126 struct btrfs_delayed_ref_node *node,
2127 struct btrfs_delayed_extent_op *extent_op,
2128 int insert_reserved)
56bec294
CM
2129{
2130 int ret;
5d4f98a2 2131 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
2132 struct btrfs_delayed_ref_head *head;
2133 /*
2134 * we've hit the end of the chain and we were supposed
2135 * to insert this extent into the tree. But, it got
2136 * deleted before we ever needed to insert it, so all
2137 * we have to do is clean up the accounting
2138 */
5d4f98a2
YZ
2139 BUG_ON(extent_op);
2140 head = btrfs_delayed_node_to_head(node);
56bec294 2141 if (insert_reserved) {
f0486c68
YZ
2142 btrfs_pin_extent(root, node->bytenr,
2143 node->num_bytes, 1);
5d4f98a2
YZ
2144 if (head->is_data) {
2145 ret = btrfs_del_csums(trans, root,
2146 node->bytenr,
2147 node->num_bytes);
2148 BUG_ON(ret);
2149 }
56bec294 2150 }
56bec294
CM
2151 mutex_unlock(&head->mutex);
2152 return 0;
2153 }
2154
5d4f98a2
YZ
2155 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2156 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2158 insert_reserved);
2159 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2160 node->type == BTRFS_SHARED_DATA_REF_KEY)
2161 ret = run_delayed_data_ref(trans, root, node, extent_op,
2162 insert_reserved);
2163 else
2164 BUG();
2165 return ret;
56bec294
CM
2166}
2167
2168static noinline struct btrfs_delayed_ref_node *
2169select_delayed_ref(struct btrfs_delayed_ref_head *head)
2170{
2171 struct rb_node *node;
2172 struct btrfs_delayed_ref_node *ref;
2173 int action = BTRFS_ADD_DELAYED_REF;
2174again:
2175 /*
2176 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2177 * this prevents ref count from going down to zero when
2178 * there still are pending delayed ref.
2179 */
2180 node = rb_prev(&head->node.rb_node);
2181 while (1) {
2182 if (!node)
2183 break;
2184 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2185 rb_node);
2186 if (ref->bytenr != head->node.bytenr)
2187 break;
5d4f98a2 2188 if (ref->action == action)
56bec294
CM
2189 return ref;
2190 node = rb_prev(node);
2191 }
2192 if (action == BTRFS_ADD_DELAYED_REF) {
2193 action = BTRFS_DROP_DELAYED_REF;
2194 goto again;
2195 }
2196 return NULL;
2197}
2198
c3e69d58
CM
2199static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2200 struct btrfs_root *root,
2201 struct list_head *cluster)
56bec294 2202{
56bec294
CM
2203 struct btrfs_delayed_ref_root *delayed_refs;
2204 struct btrfs_delayed_ref_node *ref;
2205 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 2206 struct btrfs_delayed_extent_op *extent_op;
56bec294 2207 int ret;
c3e69d58 2208 int count = 0;
56bec294 2209 int must_insert_reserved = 0;
56bec294
CM
2210
2211 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
2212 while (1) {
2213 if (!locked_ref) {
c3e69d58
CM
2214 /* pick a new head ref from the cluster list */
2215 if (list_empty(cluster))
56bec294 2216 break;
56bec294 2217
c3e69d58
CM
2218 locked_ref = list_entry(cluster->next,
2219 struct btrfs_delayed_ref_head, cluster);
2220
2221 /* grab the lock that says we are going to process
2222 * all the refs for this head */
2223 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224
2225 /*
2226 * we may have dropped the spin lock to get the head
2227 * mutex lock, and that might have given someone else
2228 * time to free the head. If that's true, it has been
2229 * removed from our list and we can move on.
2230 */
2231 if (ret == -EAGAIN) {
2232 locked_ref = NULL;
2233 count++;
2234 continue;
56bec294
CM
2235 }
2236 }
a28ec197 2237
d1270cd9
AJ
2238 /*
2239 * locked_ref is the head node, so we have to go one
2240 * node back for any delayed ref updates
2241 */
2242 ref = select_delayed_ref(locked_ref);
2243
2244 if (ref && ref->seq &&
2245 btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2246 /*
2247 * there are still refs with lower seq numbers in the
2248 * process of being added. Don't run this ref yet.
2249 */
2250 list_del_init(&locked_ref->cluster);
2251 mutex_unlock(&locked_ref->mutex);
2252 locked_ref = NULL;
2253 delayed_refs->num_heads_ready++;
2254 spin_unlock(&delayed_refs->lock);
2255 cond_resched();
2256 spin_lock(&delayed_refs->lock);
2257 continue;
2258 }
2259
56bec294
CM
2260 /*
2261 * record the must insert reserved flag before we
2262 * drop the spin lock.
2263 */
2264 must_insert_reserved = locked_ref->must_insert_reserved;
2265 locked_ref->must_insert_reserved = 0;
7bb86316 2266
5d4f98a2
YZ
2267 extent_op = locked_ref->extent_op;
2268 locked_ref->extent_op = NULL;
2269
56bec294
CM
2270 if (!ref) {
2271 /* All delayed refs have been processed, Go ahead
2272 * and send the head node to run_one_delayed_ref,
2273 * so that any accounting fixes can happen
2274 */
2275 ref = &locked_ref->node;
5d4f98a2
YZ
2276
2277 if (extent_op && must_insert_reserved) {
2278 kfree(extent_op);
2279 extent_op = NULL;
2280 }
2281
2282 if (extent_op) {
2283 spin_unlock(&delayed_refs->lock);
2284
2285 ret = run_delayed_extent_op(trans, root,
2286 ref, extent_op);
2287 BUG_ON(ret);
2288 kfree(extent_op);
2289
203bf287 2290 goto next;
5d4f98a2
YZ
2291 }
2292
c3e69d58 2293 list_del_init(&locked_ref->cluster);
56bec294
CM
2294 locked_ref = NULL;
2295 }
02217ed2 2296
56bec294
CM
2297 ref->in_tree = 0;
2298 rb_erase(&ref->rb_node, &delayed_refs->root);
2299 delayed_refs->num_entries--;
a168650c
JS
2300 /*
2301 * we modified num_entries, but as we're currently running
2302 * delayed refs, skip
2303 * wake_up(&delayed_refs->seq_wait);
2304 * here.
2305 */
56bec294 2306 spin_unlock(&delayed_refs->lock);
925baedd 2307
5d4f98a2 2308 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2309 must_insert_reserved);
2310 BUG_ON(ret);
eb099670 2311
5d4f98a2
YZ
2312 btrfs_put_delayed_ref(ref);
2313 kfree(extent_op);
c3e69d58 2314 count++;
203bf287
CM
2315next:
2316 do_chunk_alloc(trans, root->fs_info->extent_root,
2317 2 * 1024 * 1024,
2318 btrfs_get_alloc_profile(root, 0),
2319 CHUNK_ALLOC_NO_FORCE);
c3e69d58
CM
2320 cond_resched();
2321 spin_lock(&delayed_refs->lock);
2322 }
2323 return count;
2324}
2325
a168650c
JS
2326
2327static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2328 unsigned long num_refs)
2329{
2330 struct list_head *first_seq = delayed_refs->seq_head.next;
2331
2332 spin_unlock(&delayed_refs->lock);
2333 pr_debug("waiting for more refs (num %ld, first %p)\n",
2334 num_refs, first_seq);
2335 wait_event(delayed_refs->seq_wait,
2336 num_refs != delayed_refs->num_entries ||
2337 delayed_refs->seq_head.next != first_seq);
2338 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2339 delayed_refs->num_entries, delayed_refs->seq_head.next);
2340 spin_lock(&delayed_refs->lock);
2341}
2342
c3e69d58
CM
2343/*
2344 * this starts processing the delayed reference count updates and
2345 * extent insertions we have queued up so far. count can be
2346 * 0, which means to process everything in the tree at the start
2347 * of the run (but not newly added entries), or it can be some target
2348 * number you'd like to process.
2349 */
2350int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2351 struct btrfs_root *root, unsigned long count)
2352{
2353 struct rb_node *node;
2354 struct btrfs_delayed_ref_root *delayed_refs;
2355 struct btrfs_delayed_ref_node *ref;
2356 struct list_head cluster;
2357 int ret;
a168650c 2358 u64 delayed_start;
c3e69d58
CM
2359 int run_all = count == (unsigned long)-1;
2360 int run_most = 0;
a168650c
JS
2361 unsigned long num_refs = 0;
2362 int consider_waiting;
c3e69d58
CM
2363
2364 if (root == root->fs_info->extent_root)
2365 root = root->fs_info->tree_root;
2366
203bf287
CM
2367 do_chunk_alloc(trans, root->fs_info->extent_root,
2368 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2369 CHUNK_ALLOC_NO_FORCE);
2370
c3e69d58
CM
2371 delayed_refs = &trans->transaction->delayed_refs;
2372 INIT_LIST_HEAD(&cluster);
2373again:
a168650c 2374 consider_waiting = 0;
c3e69d58
CM
2375 spin_lock(&delayed_refs->lock);
2376 if (count == 0) {
2377 count = delayed_refs->num_entries * 2;
2378 run_most = 1;
2379 }
2380 while (1) {
2381 if (!(run_all || run_most) &&
2382 delayed_refs->num_heads_ready < 64)
2383 break;
eb099670 2384
56bec294 2385 /*
c3e69d58
CM
2386 * go find something we can process in the rbtree. We start at
2387 * the beginning of the tree, and then build a cluster
2388 * of refs to process starting at the first one we are able to
2389 * lock
56bec294 2390 */
a168650c 2391 delayed_start = delayed_refs->run_delayed_start;
c3e69d58
CM
2392 ret = btrfs_find_ref_cluster(trans, &cluster,
2393 delayed_refs->run_delayed_start);
2394 if (ret)
56bec294
CM
2395 break;
2396
a168650c
JS
2397 if (delayed_start >= delayed_refs->run_delayed_start) {
2398 if (consider_waiting == 0) {
2399 /*
2400 * btrfs_find_ref_cluster looped. let's do one
2401 * more cycle. if we don't run any delayed ref
2402 * during that cycle (because we can't because
2403 * all of them are blocked) and if the number of
2404 * refs doesn't change, we avoid busy waiting.
2405 */
2406 consider_waiting = 1;
2407 num_refs = delayed_refs->num_entries;
2408 } else {
2409 wait_for_more_refs(delayed_refs, num_refs);
2410 /*
2411 * after waiting, things have changed. we
2412 * dropped the lock and someone else might have
2413 * run some refs, built new clusters and so on.
2414 * therefore, we restart staleness detection.
2415 */
2416 consider_waiting = 0;
2417 }
2418 }
2419
c3e69d58
CM
2420 ret = run_clustered_refs(trans, root, &cluster);
2421 BUG_ON(ret < 0);
2422
2423 count -= min_t(unsigned long, ret, count);
2424
2425 if (count == 0)
2426 break;
a168650c
JS
2427
2428 if (ret || delayed_refs->run_delayed_start == 0) {
2429 /* refs were run, let's reset staleness detection */
2430 consider_waiting = 0;
2431 }
eb099670 2432 }
c3e69d58 2433
56bec294 2434 if (run_all) {
56bec294 2435 node = rb_first(&delayed_refs->root);
c3e69d58 2436 if (!node)
56bec294 2437 goto out;
c3e69d58 2438 count = (unsigned long)-1;
e9d0b13b 2439
56bec294
CM
2440 while (node) {
2441 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2442 rb_node);
2443 if (btrfs_delayed_ref_is_head(ref)) {
2444 struct btrfs_delayed_ref_head *head;
5caf2a00 2445
56bec294
CM
2446 head = btrfs_delayed_node_to_head(ref);
2447 atomic_inc(&ref->refs);
2448
2449 spin_unlock(&delayed_refs->lock);
8cc33e5c
DS
2450 /*
2451 * Mutex was contended, block until it's
2452 * released and try again
2453 */
56bec294
CM
2454 mutex_lock(&head->mutex);
2455 mutex_unlock(&head->mutex);
2456
2457 btrfs_put_delayed_ref(ref);
1887be66 2458 cond_resched();
56bec294
CM
2459 goto again;
2460 }
2461 node = rb_next(node);
2462 }
2463 spin_unlock(&delayed_refs->lock);
56bec294
CM
2464 schedule_timeout(1);
2465 goto again;
5f39d397 2466 }
54aa1f4d 2467out:
c3e69d58 2468 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2469 return 0;
2470}
2471
5d4f98a2
YZ
2472int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root,
2474 u64 bytenr, u64 num_bytes, u64 flags,
2475 int is_data)
2476{
2477 struct btrfs_delayed_extent_op *extent_op;
2478 int ret;
2479
2480 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2481 if (!extent_op)
2482 return -ENOMEM;
2483
2484 extent_op->flags_to_set = flags;
2485 extent_op->update_flags = 1;
2486 extent_op->update_key = 0;
2487 extent_op->is_data = is_data ? 1 : 0;
2488
66d7e7f0
AJ
2489 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2490 num_bytes, extent_op);
5d4f98a2
YZ
2491 if (ret)
2492 kfree(extent_op);
2493 return ret;
2494}
2495
2496static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2497 struct btrfs_root *root,
2498 struct btrfs_path *path,
2499 u64 objectid, u64 offset, u64 bytenr)
2500{
2501 struct btrfs_delayed_ref_head *head;
2502 struct btrfs_delayed_ref_node *ref;
2503 struct btrfs_delayed_data_ref *data_ref;
2504 struct btrfs_delayed_ref_root *delayed_refs;
2505 struct rb_node *node;
2506 int ret = 0;
2507
2508 ret = -ENOENT;
2509 delayed_refs = &trans->transaction->delayed_refs;
2510 spin_lock(&delayed_refs->lock);
2511 head = btrfs_find_delayed_ref_head(trans, bytenr);
2512 if (!head)
2513 goto out;
2514
2515 if (!mutex_trylock(&head->mutex)) {
2516 atomic_inc(&head->node.refs);
2517 spin_unlock(&delayed_refs->lock);
2518
b3b4aa74 2519 btrfs_release_path(path);
5d4f98a2 2520
8cc33e5c
DS
2521 /*
2522 * Mutex was contended, block until it's released and let
2523 * caller try again
2524 */
5d4f98a2
YZ
2525 mutex_lock(&head->mutex);
2526 mutex_unlock(&head->mutex);
2527 btrfs_put_delayed_ref(&head->node);
2528 return -EAGAIN;
2529 }
2530
2531 node = rb_prev(&head->node.rb_node);
2532 if (!node)
2533 goto out_unlock;
2534
2535 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2536
2537 if (ref->bytenr != bytenr)
2538 goto out_unlock;
2539
2540 ret = 1;
2541 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2542 goto out_unlock;
2543
2544 data_ref = btrfs_delayed_node_to_data_ref(ref);
2545
2546 node = rb_prev(node);
2547 if (node) {
2548 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2549 if (ref->bytenr == bytenr)
2550 goto out_unlock;
2551 }
2552
2553 if (data_ref->root != root->root_key.objectid ||
2554 data_ref->objectid != objectid || data_ref->offset != offset)
2555 goto out_unlock;
2556
2557 ret = 0;
2558out_unlock:
2559 mutex_unlock(&head->mutex);
2560out:
2561 spin_unlock(&delayed_refs->lock);
2562 return ret;
2563}
2564
2565static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2566 struct btrfs_root *root,
2567 struct btrfs_path *path,
2568 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2569{
2570 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2571 struct extent_buffer *leaf;
5d4f98a2
YZ
2572 struct btrfs_extent_data_ref *ref;
2573 struct btrfs_extent_inline_ref *iref;
2574 struct btrfs_extent_item *ei;
f321e491 2575 struct btrfs_key key;
5d4f98a2 2576 u32 item_size;
be20aa9d 2577 int ret;
925baedd 2578
be20aa9d 2579 key.objectid = bytenr;
31840ae1 2580 key.offset = (u64)-1;
f321e491 2581 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2582
be20aa9d
CM
2583 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2584 if (ret < 0)
2585 goto out;
2586 BUG_ON(ret == 0);
80ff3856
YZ
2587
2588 ret = -ENOENT;
2589 if (path->slots[0] == 0)
31840ae1 2590 goto out;
be20aa9d 2591
31840ae1 2592 path->slots[0]--;
f321e491 2593 leaf = path->nodes[0];
5d4f98a2 2594 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2595
5d4f98a2 2596 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2597 goto out;
f321e491 2598
5d4f98a2
YZ
2599 ret = 1;
2600 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2601#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2602 if (item_size < sizeof(*ei)) {
2603 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2604 goto out;
2605 }
2606#endif
2607 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2608
5d4f98a2
YZ
2609 if (item_size != sizeof(*ei) +
2610 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2611 goto out;
be20aa9d 2612
5d4f98a2
YZ
2613 if (btrfs_extent_generation(leaf, ei) <=
2614 btrfs_root_last_snapshot(&root->root_item))
2615 goto out;
2616
2617 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2618 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2619 BTRFS_EXTENT_DATA_REF_KEY)
2620 goto out;
2621
2622 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2623 if (btrfs_extent_refs(leaf, ei) !=
2624 btrfs_extent_data_ref_count(leaf, ref) ||
2625 btrfs_extent_data_ref_root(leaf, ref) !=
2626 root->root_key.objectid ||
2627 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2628 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2629 goto out;
2630
2631 ret = 0;
2632out:
2633 return ret;
2634}
2635
2636int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2637 struct btrfs_root *root,
2638 u64 objectid, u64 offset, u64 bytenr)
2639{
2640 struct btrfs_path *path;
2641 int ret;
2642 int ret2;
2643
2644 path = btrfs_alloc_path();
2645 if (!path)
2646 return -ENOENT;
2647
2648 do {
2649 ret = check_committed_ref(trans, root, path, objectid,
2650 offset, bytenr);
2651 if (ret && ret != -ENOENT)
f321e491 2652 goto out;
80ff3856 2653
5d4f98a2
YZ
2654 ret2 = check_delayed_ref(trans, root, path, objectid,
2655 offset, bytenr);
2656 } while (ret2 == -EAGAIN);
2657
2658 if (ret2 && ret2 != -ENOENT) {
2659 ret = ret2;
2660 goto out;
f321e491 2661 }
5d4f98a2
YZ
2662
2663 if (ret != -ENOENT || ret2 != -ENOENT)
2664 ret = 0;
be20aa9d 2665out:
80ff3856 2666 btrfs_free_path(path);
f0486c68
YZ
2667 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2668 WARN_ON(ret > 0);
f321e491 2669 return ret;
be20aa9d 2670}
c5739bba 2671
5d4f98a2 2672static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2673 struct btrfs_root *root,
5d4f98a2 2674 struct extent_buffer *buf,
66d7e7f0 2675 int full_backref, int inc, int for_cow)
31840ae1
ZY
2676{
2677 u64 bytenr;
5d4f98a2
YZ
2678 u64 num_bytes;
2679 u64 parent;
31840ae1 2680 u64 ref_root;
31840ae1 2681 u32 nritems;
31840ae1
ZY
2682 struct btrfs_key key;
2683 struct btrfs_file_extent_item *fi;
2684 int i;
2685 int level;
2686 int ret = 0;
31840ae1 2687 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
66d7e7f0 2688 u64, u64, u64, u64, u64, u64, int);
31840ae1
ZY
2689
2690 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2691 nritems = btrfs_header_nritems(buf);
2692 level = btrfs_header_level(buf);
2693
5d4f98a2
YZ
2694 if (!root->ref_cows && level == 0)
2695 return 0;
31840ae1 2696
5d4f98a2
YZ
2697 if (inc)
2698 process_func = btrfs_inc_extent_ref;
2699 else
2700 process_func = btrfs_free_extent;
31840ae1 2701
5d4f98a2
YZ
2702 if (full_backref)
2703 parent = buf->start;
2704 else
2705 parent = 0;
2706
2707 for (i = 0; i < nritems; i++) {
31840ae1 2708 if (level == 0) {
5d4f98a2 2709 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2710 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2711 continue;
5d4f98a2 2712 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2713 struct btrfs_file_extent_item);
2714 if (btrfs_file_extent_type(buf, fi) ==
2715 BTRFS_FILE_EXTENT_INLINE)
2716 continue;
2717 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2718 if (bytenr == 0)
2719 continue;
5d4f98a2
YZ
2720
2721 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2722 key.offset -= btrfs_file_extent_offset(buf, fi);
2723 ret = process_func(trans, root, bytenr, num_bytes,
2724 parent, ref_root, key.objectid,
66d7e7f0 2725 key.offset, for_cow);
31840ae1
ZY
2726 if (ret)
2727 goto fail;
2728 } else {
5d4f98a2
YZ
2729 bytenr = btrfs_node_blockptr(buf, i);
2730 num_bytes = btrfs_level_size(root, level - 1);
2731 ret = process_func(trans, root, bytenr, num_bytes,
66d7e7f0
AJ
2732 parent, ref_root, level - 1, 0,
2733 for_cow);
31840ae1
ZY
2734 if (ret)
2735 goto fail;
2736 }
2737 }
2738 return 0;
2739fail:
5d4f98a2
YZ
2740 BUG();
2741 return ret;
2742}
2743
2744int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
66d7e7f0 2745 struct extent_buffer *buf, int full_backref, int for_cow)
5d4f98a2 2746{
66d7e7f0 2747 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
5d4f98a2
YZ
2748}
2749
2750int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
66d7e7f0 2751 struct extent_buffer *buf, int full_backref, int for_cow)
5d4f98a2 2752{
66d7e7f0 2753 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
31840ae1
ZY
2754}
2755
9078a3e1
CM
2756static int write_one_cache_group(struct btrfs_trans_handle *trans,
2757 struct btrfs_root *root,
2758 struct btrfs_path *path,
2759 struct btrfs_block_group_cache *cache)
2760{
2761 int ret;
9078a3e1 2762 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2763 unsigned long bi;
2764 struct extent_buffer *leaf;
9078a3e1 2765
9078a3e1 2766 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2767 if (ret < 0)
2768 goto fail;
9078a3e1 2769 BUG_ON(ret);
5f39d397
CM
2770
2771 leaf = path->nodes[0];
2772 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2773 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2774 btrfs_mark_buffer_dirty(leaf);
b3b4aa74 2775 btrfs_release_path(path);
54aa1f4d 2776fail:
9078a3e1
CM
2777 if (ret)
2778 return ret;
9078a3e1
CM
2779 return 0;
2780
2781}
2782
4a8c9a62
YZ
2783static struct btrfs_block_group_cache *
2784next_block_group(struct btrfs_root *root,
2785 struct btrfs_block_group_cache *cache)
2786{
2787 struct rb_node *node;
2788 spin_lock(&root->fs_info->block_group_cache_lock);
2789 node = rb_next(&cache->cache_node);
2790 btrfs_put_block_group(cache);
2791 if (node) {
2792 cache = rb_entry(node, struct btrfs_block_group_cache,
2793 cache_node);
11dfe35a 2794 btrfs_get_block_group(cache);
4a8c9a62
YZ
2795 } else
2796 cache = NULL;
2797 spin_unlock(&root->fs_info->block_group_cache_lock);
2798 return cache;
2799}
2800
0af3d00b
JB
2801static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2802 struct btrfs_trans_handle *trans,
2803 struct btrfs_path *path)
2804{
2805 struct btrfs_root *root = block_group->fs_info->tree_root;
2806 struct inode *inode = NULL;
2807 u64 alloc_hint = 0;
2b20982e 2808 int dcs = BTRFS_DC_ERROR;
0af3d00b
JB
2809 int num_pages = 0;
2810 int retries = 0;
2811 int ret = 0;
2812
2813 /*
2814 * If this block group is smaller than 100 megs don't bother caching the
2815 * block group.
2816 */
2817 if (block_group->key.offset < (100 * 1024 * 1024)) {
2818 spin_lock(&block_group->lock);
2819 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2820 spin_unlock(&block_group->lock);
2821 return 0;
2822 }
2823
2824again:
2825 inode = lookup_free_space_inode(root, block_group, path);
2826 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2827 ret = PTR_ERR(inode);
b3b4aa74 2828 btrfs_release_path(path);
0af3d00b
JB
2829 goto out;
2830 }
2831
2832 if (IS_ERR(inode)) {
2833 BUG_ON(retries);
2834 retries++;
2835
2836 if (block_group->ro)
2837 goto out_free;
2838
2839 ret = create_free_space_inode(root, trans, block_group, path);
2840 if (ret)
2841 goto out_free;
2842 goto again;
2843 }
2844
5b0e95bf
JB
2845 /* We've already setup this transaction, go ahead and exit */
2846 if (block_group->cache_generation == trans->transid &&
2847 i_size_read(inode)) {
2848 dcs = BTRFS_DC_SETUP;
2849 goto out_put;
2850 }
2851
0af3d00b
JB
2852 /*
2853 * We want to set the generation to 0, that way if anything goes wrong
2854 * from here on out we know not to trust this cache when we load up next
2855 * time.
2856 */
2857 BTRFS_I(inode)->generation = 0;
2858 ret = btrfs_update_inode(trans, root, inode);
2859 WARN_ON(ret);
2860
2861 if (i_size_read(inode) > 0) {
2862 ret = btrfs_truncate_free_space_cache(root, trans, path,
2863 inode);
2864 if (ret)
2865 goto out_put;
2866 }
2867
2868 spin_lock(&block_group->lock);
2869 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2b20982e
JB
2870 /* We're not cached, don't bother trying to write stuff out */
2871 dcs = BTRFS_DC_WRITTEN;
0af3d00b
JB
2872 spin_unlock(&block_group->lock);
2873 goto out_put;
2874 }
2875 spin_unlock(&block_group->lock);
2876
2877 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2878 if (!num_pages)
2879 num_pages = 1;
2880
2881 /*
2882 * Just to make absolutely sure we have enough space, we're going to
2883 * preallocate 12 pages worth of space for each block group. In
2884 * practice we ought to use at most 8, but we need extra space so we can
2885 * add our header and have a terminator between the extents and the
2886 * bitmaps.
2887 */
2888 num_pages *= 16;
2889 num_pages *= PAGE_CACHE_SIZE;
2890
2891 ret = btrfs_check_data_free_space(inode, num_pages);
2892 if (ret)
2893 goto out_put;
2894
2895 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2896 num_pages, num_pages,
2897 &alloc_hint);
2b20982e
JB
2898 if (!ret)
2899 dcs = BTRFS_DC_SETUP;
0af3d00b 2900 btrfs_free_reserved_data_space(inode, num_pages);
c09544e0 2901
0af3d00b
JB
2902out_put:
2903 iput(inode);
2904out_free:
b3b4aa74 2905 btrfs_release_path(path);
0af3d00b
JB
2906out:
2907 spin_lock(&block_group->lock);
e65cbb94 2908 if (!ret && dcs == BTRFS_DC_SETUP)
5b0e95bf 2909 block_group->cache_generation = trans->transid;
2b20982e 2910 block_group->disk_cache_state = dcs;
0af3d00b
JB
2911 spin_unlock(&block_group->lock);
2912
2913 return ret;
2914}
2915
96b5179d
CM
2916int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root)
9078a3e1 2918{
4a8c9a62 2919 struct btrfs_block_group_cache *cache;
9078a3e1 2920 int err = 0;
9078a3e1 2921 struct btrfs_path *path;
96b5179d 2922 u64 last = 0;
9078a3e1
CM
2923
2924 path = btrfs_alloc_path();
2925 if (!path)
2926 return -ENOMEM;
2927
0af3d00b
JB
2928again:
2929 while (1) {
2930 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2931 while (cache) {
2932 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2933 break;
2934 cache = next_block_group(root, cache);
2935 }
2936 if (!cache) {
2937 if (last == 0)
2938 break;
2939 last = 0;
2940 continue;
2941 }
2942 err = cache_save_setup(cache, trans, path);
2943 last = cache->key.objectid + cache->key.offset;
2944 btrfs_put_block_group(cache);
2945 }
2946
d397712b 2947 while (1) {
4a8c9a62
YZ
2948 if (last == 0) {
2949 err = btrfs_run_delayed_refs(trans, root,
2950 (unsigned long)-1);
2951 BUG_ON(err);
0f9dd46c 2952 }
54aa1f4d 2953
4a8c9a62
YZ
2954 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2955 while (cache) {
0af3d00b
JB
2956 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2957 btrfs_put_block_group(cache);
2958 goto again;
2959 }
2960
4a8c9a62
YZ
2961 if (cache->dirty)
2962 break;
2963 cache = next_block_group(root, cache);
2964 }
2965 if (!cache) {
2966 if (last == 0)
2967 break;
2968 last = 0;
2969 continue;
2970 }
0f9dd46c 2971
0cb59c99
JB
2972 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2973 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
e8569813 2974 cache->dirty = 0;
4a8c9a62 2975 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2976
4a8c9a62
YZ
2977 err = write_one_cache_group(trans, root, path, cache);
2978 BUG_ON(err);
2979 btrfs_put_block_group(cache);
9078a3e1 2980 }
4a8c9a62 2981
0cb59c99
JB
2982 while (1) {
2983 /*
2984 * I don't think this is needed since we're just marking our
2985 * preallocated extent as written, but just in case it can't
2986 * hurt.
2987 */
2988 if (last == 0) {
2989 err = btrfs_run_delayed_refs(trans, root,
2990 (unsigned long)-1);
2991 BUG_ON(err);
2992 }
2993
2994 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2995 while (cache) {
2996 /*
2997 * Really this shouldn't happen, but it could if we
2998 * couldn't write the entire preallocated extent and
2999 * splitting the extent resulted in a new block.
3000 */
3001 if (cache->dirty) {
3002 btrfs_put_block_group(cache);
3003 goto again;
3004 }
3005 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3006 break;
3007 cache = next_block_group(root, cache);
3008 }
3009 if (!cache) {
3010 if (last == 0)
3011 break;
3012 last = 0;
3013 continue;
3014 }
3015
3016 btrfs_write_out_cache(root, trans, cache, path);
3017
3018 /*
3019 * If we didn't have an error then the cache state is still
3020 * NEED_WRITE, so we can set it to WRITTEN.
3021 */
3022 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3023 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3024 last = cache->key.objectid + cache->key.offset;
3025 btrfs_put_block_group(cache);
3026 }
3027
9078a3e1 3028 btrfs_free_path(path);
4a8c9a62 3029 return 0;
9078a3e1
CM
3030}
3031
d2fb3437
YZ
3032int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3033{
3034 struct btrfs_block_group_cache *block_group;
3035 int readonly = 0;
3036
3037 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3038 if (!block_group || block_group->ro)
3039 readonly = 1;
3040 if (block_group)
fa9c0d79 3041 btrfs_put_block_group(block_group);
d2fb3437
YZ
3042 return readonly;
3043}
3044
593060d7
CM
3045static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3046 u64 total_bytes, u64 bytes_used,
3047 struct btrfs_space_info **space_info)
3048{
3049 struct btrfs_space_info *found;
b742bb82
YZ
3050 int i;
3051 int factor;
3052
3053 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3054 BTRFS_BLOCK_GROUP_RAID10))
3055 factor = 2;
3056 else
3057 factor = 1;
593060d7
CM
3058
3059 found = __find_space_info(info, flags);
3060 if (found) {
25179201 3061 spin_lock(&found->lock);
593060d7 3062 found->total_bytes += total_bytes;
89a55897 3063 found->disk_total += total_bytes * factor;
593060d7 3064 found->bytes_used += bytes_used;
b742bb82 3065 found->disk_used += bytes_used * factor;
8f18cf13 3066 found->full = 0;
25179201 3067 spin_unlock(&found->lock);
593060d7
CM
3068 *space_info = found;
3069 return 0;
3070 }
c146afad 3071 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
3072 if (!found)
3073 return -ENOMEM;
3074
b742bb82
YZ
3075 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3076 INIT_LIST_HEAD(&found->block_groups[i]);
80eb234a 3077 init_rwsem(&found->groups_sem);
0f9dd46c 3078 spin_lock_init(&found->lock);
52ba6929 3079 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
593060d7 3080 found->total_bytes = total_bytes;
89a55897 3081 found->disk_total = total_bytes * factor;
593060d7 3082 found->bytes_used = bytes_used;
b742bb82 3083 found->disk_used = bytes_used * factor;
593060d7 3084 found->bytes_pinned = 0;
e8569813 3085 found->bytes_reserved = 0;
c146afad 3086 found->bytes_readonly = 0;
f0486c68 3087 found->bytes_may_use = 0;
593060d7 3088 found->full = 0;
0e4f8f88 3089 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3090 found->chunk_alloc = 0;
fdb5effd
JB
3091 found->flush = 0;
3092 init_waitqueue_head(&found->wait);
593060d7 3093 *space_info = found;
4184ea7f 3094 list_add_rcu(&found->list, &info->space_info);
593060d7
CM
3095 return 0;
3096}
3097
8790d502
CM
3098static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3099{
52ba6929 3100 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
a46d11a8
ID
3101
3102 /* chunk -> extended profile */
3103 if (extra_flags == 0)
3104 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3105
3106 if (flags & BTRFS_BLOCK_GROUP_DATA)
3107 fs_info->avail_data_alloc_bits |= extra_flags;
3108 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3109 fs_info->avail_metadata_alloc_bits |= extra_flags;
3110 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3111 fs_info->avail_system_alloc_bits |= extra_flags;
8790d502 3112}
593060d7 3113
a46d11a8
ID
3114/*
3115 * @flags: available profiles in extended format (see ctree.h)
3116 *
e4d8ec0f
ID
3117 * Returns reduced profile in chunk format. If profile changing is in
3118 * progress (either running or paused) picks the target profile (if it's
3119 * already available), otherwise falls back to plain reducing.
a46d11a8 3120 */
2b82032c 3121u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 3122{
cd02dca5
CM
3123 /*
3124 * we add in the count of missing devices because we want
3125 * to make sure that any RAID levels on a degraded FS
3126 * continue to be honored.
3127 */
3128 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3129 root->fs_info->fs_devices->missing_devices;
a061fc8d 3130
e4d8ec0f
ID
3131 /* pick restriper's target profile if it's available */
3132 spin_lock(&root->fs_info->balance_lock);
3133 if (root->fs_info->balance_ctl) {
3134 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3135 u64 tgt = 0;
3136
3137 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3138 (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3139 (flags & bctl->data.target)) {
3140 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3141 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3142 (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3143 (flags & bctl->sys.target)) {
3144 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3145 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3146 (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147 (flags & bctl->meta.target)) {
3148 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3149 }
3150
3151 if (tgt) {
3152 spin_unlock(&root->fs_info->balance_lock);
3153 flags = tgt;
3154 goto out;
3155 }
3156 }
3157 spin_unlock(&root->fs_info->balance_lock);
3158
a061fc8d
CM
3159 if (num_devices == 1)
3160 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3161 if (num_devices < 4)
3162 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3163
ec44a35c
CM
3164 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3165 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 3166 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 3167 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 3168 }
ec44a35c
CM
3169
3170 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 3171 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 3172 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 3173 }
ec44a35c
CM
3174
3175 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3176 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3177 (flags & BTRFS_BLOCK_GROUP_RAID10) |
a46d11a8 3178 (flags & BTRFS_BLOCK_GROUP_DUP))) {
ec44a35c 3179 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
a46d11a8
ID
3180 }
3181
e4d8ec0f 3182out:
a46d11a8
ID
3183 /* extended -> chunk profile */
3184 flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
ec44a35c
CM
3185 return flags;
3186}
3187
b742bb82 3188static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
6a63209f 3189{
b742bb82 3190 if (flags & BTRFS_BLOCK_GROUP_DATA)
6fef8df1 3191 flags |= root->fs_info->avail_data_alloc_bits;
b742bb82 3192 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
6fef8df1 3193 flags |= root->fs_info->avail_system_alloc_bits;
b742bb82 3194 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
6fef8df1
ID
3195 flags |= root->fs_info->avail_metadata_alloc_bits;
3196
b742bb82 3197 return btrfs_reduce_alloc_profile(root, flags);
6a63209f
JB
3198}
3199
6d07bcec 3200u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
9ed74f2d 3201{
b742bb82 3202 u64 flags;
9ed74f2d 3203
b742bb82
YZ
3204 if (data)
3205 flags = BTRFS_BLOCK_GROUP_DATA;
3206 else if (root == root->fs_info->chunk_root)
3207 flags = BTRFS_BLOCK_GROUP_SYSTEM;
9ed74f2d 3208 else
b742bb82 3209 flags = BTRFS_BLOCK_GROUP_METADATA;
9ed74f2d 3210
b742bb82 3211 return get_alloc_profile(root, flags);
6a63209f 3212}
9ed74f2d 3213
6a63209f
JB
3214void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3215{
6a63209f 3216 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
f0486c68 3217 BTRFS_BLOCK_GROUP_DATA);
9ed74f2d
JB
3218}
3219
6a63209f 3220/*
6a63209f
JB
3221 * This will check the space that the inode allocates from to make sure we have
3222 * enough space for bytes.
6a63209f 3223 */
0ca1f7ce 3224int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
6a63209f 3225{
6a63209f 3226 struct btrfs_space_info *data_sinfo;
0ca1f7ce 3227 struct btrfs_root *root = BTRFS_I(inode)->root;
ab6e2410 3228 u64 used;
0af3d00b 3229 int ret = 0, committed = 0, alloc_chunk = 1;
6a63209f 3230
6a63209f
JB
3231 /* make sure bytes are sectorsize aligned */
3232 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
6a63209f 3233
82d5902d
LZ
3234 if (root == root->fs_info->tree_root ||
3235 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
0af3d00b
JB
3236 alloc_chunk = 0;
3237 committed = 1;
3238 }
3239
6a63209f 3240 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3241 if (!data_sinfo)
3242 goto alloc;
9ed74f2d 3243
6a63209f
JB
3244again:
3245 /* make sure we have enough space to handle the data first */
3246 spin_lock(&data_sinfo->lock);
8929ecfa
YZ
3247 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3248 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3249 data_sinfo->bytes_may_use;
ab6e2410
JB
3250
3251 if (used + bytes > data_sinfo->total_bytes) {
4e06bdd6 3252 struct btrfs_trans_handle *trans;
9ed74f2d 3253
6a63209f
JB
3254 /*
3255 * if we don't have enough free bytes in this space then we need
3256 * to alloc a new chunk.
3257 */
0af3d00b 3258 if (!data_sinfo->full && alloc_chunk) {
6a63209f 3259 u64 alloc_target;
9ed74f2d 3260
0e4f8f88 3261 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
6a63209f 3262 spin_unlock(&data_sinfo->lock);
33b4d47f 3263alloc:
6a63209f 3264 alloc_target = btrfs_get_alloc_profile(root, 1);
7a7eaa40 3265 trans = btrfs_join_transaction(root);
a22285a6
YZ
3266 if (IS_ERR(trans))
3267 return PTR_ERR(trans);
9ed74f2d 3268
6a63209f
JB
3269 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3270 bytes + 2 * 1024 * 1024,
0e4f8f88
CM
3271 alloc_target,
3272 CHUNK_ALLOC_NO_FORCE);
6a63209f 3273 btrfs_end_transaction(trans, root);
d52a5b5f
MX
3274 if (ret < 0) {
3275 if (ret != -ENOSPC)
3276 return ret;
3277 else
3278 goto commit_trans;
3279 }
9ed74f2d 3280
33b4d47f
CM
3281 if (!data_sinfo) {
3282 btrfs_set_inode_space_info(root, inode);
3283 data_sinfo = BTRFS_I(inode)->space_info;
3284 }
6a63209f
JB
3285 goto again;
3286 }
f2bb8f5c
JB
3287
3288 /*
3289 * If we have less pinned bytes than we want to allocate then
3290 * don't bother committing the transaction, it won't help us.
3291 */
3292 if (data_sinfo->bytes_pinned < bytes)
3293 committed = 1;
6a63209f 3294 spin_unlock(&data_sinfo->lock);
6a63209f 3295
4e06bdd6 3296 /* commit the current transaction and try again */
d52a5b5f 3297commit_trans:
a4abeea4
JB
3298 if (!committed &&
3299 !atomic_read(&root->fs_info->open_ioctl_trans)) {
4e06bdd6 3300 committed = 1;
7a7eaa40 3301 trans = btrfs_join_transaction(root);
a22285a6
YZ
3302 if (IS_ERR(trans))
3303 return PTR_ERR(trans);
4e06bdd6
JB
3304 ret = btrfs_commit_transaction(trans, root);
3305 if (ret)
3306 return ret;
3307 goto again;
3308 }
9ed74f2d 3309
6a63209f
JB
3310 return -ENOSPC;
3311 }
3312 data_sinfo->bytes_may_use += bytes;
6a63209f 3313 spin_unlock(&data_sinfo->lock);
6a63209f 3314
9ed74f2d 3315 return 0;
9ed74f2d 3316}
6a63209f 3317
6a63209f 3318/*
fb25e914 3319 * Called if we need to clear a data reservation for this inode.
6a63209f 3320 */
0ca1f7ce 3321void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
e3ccfa98 3322{
0ca1f7ce 3323 struct btrfs_root *root = BTRFS_I(inode)->root;
6a63209f 3324 struct btrfs_space_info *data_sinfo;
e3ccfa98 3325
6a63209f
JB
3326 /* make sure bytes are sectorsize aligned */
3327 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
e3ccfa98 3328
6a63209f
JB
3329 data_sinfo = BTRFS_I(inode)->space_info;
3330 spin_lock(&data_sinfo->lock);
3331 data_sinfo->bytes_may_use -= bytes;
6a63209f 3332 spin_unlock(&data_sinfo->lock);
e3ccfa98
JB
3333}
3334
97e728d4 3335static void force_metadata_allocation(struct btrfs_fs_info *info)
e3ccfa98 3336{
97e728d4
JB
3337 struct list_head *head = &info->space_info;
3338 struct btrfs_space_info *found;
e3ccfa98 3339
97e728d4
JB
3340 rcu_read_lock();
3341 list_for_each_entry_rcu(found, head, list) {
3342 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
0e4f8f88 3343 found->force_alloc = CHUNK_ALLOC_FORCE;
e3ccfa98 3344 }
97e728d4 3345 rcu_read_unlock();
e3ccfa98
JB
3346}
3347
e5bc2458 3348static int should_alloc_chunk(struct btrfs_root *root,
0e4f8f88
CM
3349 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3350 int force)
32c00aff 3351{
fb25e914 3352 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
424499db 3353 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
0e4f8f88 3354 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
e5bc2458 3355 u64 thresh;
e3ccfa98 3356
0e4f8f88
CM
3357 if (force == CHUNK_ALLOC_FORCE)
3358 return 1;
3359
fb25e914
JB
3360 /*
3361 * We need to take into account the global rsv because for all intents
3362 * and purposes it's used space. Don't worry about locking the
3363 * global_rsv, it doesn't change except when the transaction commits.
3364 */
3365 num_allocated += global_rsv->size;
3366
0e4f8f88
CM
3367 /*
3368 * in limited mode, we want to have some free space up to
3369 * about 1% of the FS size.
3370 */
3371 if (force == CHUNK_ALLOC_LIMITED) {
6c41761f 3372 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
0e4f8f88
CM
3373 thresh = max_t(u64, 64 * 1024 * 1024,
3374 div_factor_fine(thresh, 1));
3375
3376 if (num_bytes - num_allocated < thresh)
3377 return 1;
3378 }
6c41761f 3379 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
0e4f8f88 3380
cf1d72c9
CM
3381 /* 256MB or 2% of the FS */
3382 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
e5bc2458 3383
cf1d72c9 3384 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
14ed0ca6 3385 return 0;
424499db 3386 return 1;
32c00aff
JB
3387}
3388
6324fbf3
CM
3389static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3390 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3391 u64 flags, int force)
9ed74f2d 3392{
6324fbf3 3393 struct btrfs_space_info *space_info;
97e728d4 3394 struct btrfs_fs_info *fs_info = extent_root->fs_info;
6d74119f 3395 int wait_for_alloc = 0;
9ed74f2d 3396 int ret = 0;
9ed74f2d 3397
70922617 3398 BUG_ON(!profile_is_valid(flags, 0));
ec44a35c 3399
6324fbf3 3400 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3401 if (!space_info) {
3402 ret = update_space_info(extent_root->fs_info, flags,
3403 0, 0, &space_info);
3404 BUG_ON(ret);
9ed74f2d 3405 }
6324fbf3 3406 BUG_ON(!space_info);
9ed74f2d 3407
6d74119f 3408again:
25179201 3409 spin_lock(&space_info->lock);
9ed74f2d 3410 if (space_info->force_alloc)
0e4f8f88 3411 force = space_info->force_alloc;
25179201
JB
3412 if (space_info->full) {
3413 spin_unlock(&space_info->lock);
6d74119f 3414 return 0;
9ed74f2d
JB
3415 }
3416
0e4f8f88 3417 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
25179201 3418 spin_unlock(&space_info->lock);
6d74119f
JB
3419 return 0;
3420 } else if (space_info->chunk_alloc) {
3421 wait_for_alloc = 1;
3422 } else {
3423 space_info->chunk_alloc = 1;
9ed74f2d 3424 }
0e4f8f88 3425
25179201 3426 spin_unlock(&space_info->lock);
9ed74f2d 3427
6d74119f
JB
3428 mutex_lock(&fs_info->chunk_mutex);
3429
3430 /*
3431 * The chunk_mutex is held throughout the entirety of a chunk
3432 * allocation, so once we've acquired the chunk_mutex we know that the
3433 * other guy is done and we need to recheck and see if we should
3434 * allocate.
3435 */
3436 if (wait_for_alloc) {
3437 mutex_unlock(&fs_info->chunk_mutex);
3438 wait_for_alloc = 0;
3439 goto again;
3440 }
3441
67377734
JB
3442 /*
3443 * If we have mixed data/metadata chunks we want to make sure we keep
3444 * allocating mixed chunks instead of individual chunks.
3445 */
3446 if (btrfs_mixed_space_info(space_info))
3447 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3448
97e728d4
JB
3449 /*
3450 * if we're doing a data chunk, go ahead and make sure that
3451 * we keep a reasonable number of metadata chunks allocated in the
3452 * FS as well.
3453 */
9ed74f2d 3454 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3455 fs_info->data_chunk_allocations++;
3456 if (!(fs_info->data_chunk_allocations %
3457 fs_info->metadata_ratio))
3458 force_metadata_allocation(fs_info);
9ed74f2d
JB
3459 }
3460
2b82032c 3461 ret = btrfs_alloc_chunk(trans, extent_root, flags);
92b8e897
MF
3462 if (ret < 0 && ret != -ENOSPC)
3463 goto out;
3464
9ed74f2d 3465 spin_lock(&space_info->lock);
9ed74f2d 3466 if (ret)
6324fbf3 3467 space_info->full = 1;
424499db
YZ
3468 else
3469 ret = 1;
6d74119f 3470
0e4f8f88 3471 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3472 space_info->chunk_alloc = 0;
9ed74f2d 3473 spin_unlock(&space_info->lock);
92b8e897 3474out:
c146afad 3475 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3476 return ret;
6324fbf3 3477}
9ed74f2d 3478
9ed74f2d 3479/*
5da9d01b 3480 * shrink metadata reservation for delalloc
9ed74f2d 3481 */
663350ac 3482static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
f104d044 3483 bool wait_ordered)
5da9d01b 3484{
0ca1f7ce 3485 struct btrfs_block_rsv *block_rsv;
0019f10d 3486 struct btrfs_space_info *space_info;
663350ac 3487 struct btrfs_trans_handle *trans;
5da9d01b
YZ
3488 u64 reserved;
3489 u64 max_reclaim;
3490 u64 reclaimed = 0;
b1953bce 3491 long time_left;
877da174 3492 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
b1953bce 3493 int loops = 0;
36e39c40 3494 unsigned long progress;
5da9d01b 3495
663350ac 3496 trans = (struct btrfs_trans_handle *)current->journal_info;
0ca1f7ce 3497 block_rsv = &root->fs_info->delalloc_block_rsv;
0019f10d 3498 space_info = block_rsv->space_info;
bf9022e0
CM
3499
3500 smp_mb();
fb25e914 3501 reserved = space_info->bytes_may_use;
36e39c40 3502 progress = space_info->reservation_progress;
5da9d01b
YZ
3503
3504 if (reserved == 0)
3505 return 0;
c4f675cd 3506
fdb5effd
JB
3507 smp_mb();
3508 if (root->fs_info->delalloc_bytes == 0) {
3509 if (trans)
3510 return 0;
3511 btrfs_wait_ordered_extents(root, 0, 0);
3512 return 0;
3513 }
3514
5da9d01b 3515 max_reclaim = min(reserved, to_reclaim);
877da174
JB
3516 nr_pages = max_t(unsigned long, nr_pages,
3517 max_reclaim >> PAGE_CACHE_SHIFT);
b1953bce 3518 while (loops < 1024) {
bf9022e0
CM
3519 /* have the flusher threads jump in and do some IO */
3520 smp_mb();
3521 nr_pages = min_t(unsigned long, nr_pages,
3522 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
0e175a18
CW
3523 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3524 WB_REASON_FS_FREE_SPACE);
5da9d01b 3525
0019f10d 3526 spin_lock(&space_info->lock);
fb25e914
JB
3527 if (reserved > space_info->bytes_may_use)
3528 reclaimed += reserved - space_info->bytes_may_use;
3529 reserved = space_info->bytes_may_use;
0019f10d 3530 spin_unlock(&space_info->lock);
5da9d01b 3531
36e39c40
CM
3532 loops++;
3533
5da9d01b
YZ
3534 if (reserved == 0 || reclaimed >= max_reclaim)
3535 break;
3536
3537 if (trans && trans->transaction->blocked)
3538 return -EAGAIN;
bf9022e0 3539
f104d044
JB
3540 if (wait_ordered && !trans) {
3541 btrfs_wait_ordered_extents(root, 0, 0);
3542 } else {
3543 time_left = schedule_timeout_interruptible(1);
b1953bce 3544
f104d044
JB
3545 /* We were interrupted, exit */
3546 if (time_left)
3547 break;
3548 }
b1953bce 3549
36e39c40
CM
3550 /* we've kicked the IO a few times, if anything has been freed,
3551 * exit. There is no sense in looping here for a long time
3552 * when we really need to commit the transaction, or there are
3553 * just too many writers without enough free space
3554 */
3555
3556 if (loops > 3) {
3557 smp_mb();
3558 if (progress != space_info->reservation_progress)
3559 break;
3560 }
bf9022e0 3561
5da9d01b 3562 }
f104d044 3563
5da9d01b
YZ
3564 return reclaimed >= to_reclaim;
3565}
3566
663350ac
JB
3567/**
3568 * maybe_commit_transaction - possibly commit the transaction if its ok to
3569 * @root - the root we're allocating for
3570 * @bytes - the number of bytes we want to reserve
3571 * @force - force the commit
8bb8ab2e 3572 *
663350ac
JB
3573 * This will check to make sure that committing the transaction will actually
3574 * get us somewhere and then commit the transaction if it does. Otherwise it
3575 * will return -ENOSPC.
8bb8ab2e 3576 */
663350ac
JB
3577static int may_commit_transaction(struct btrfs_root *root,
3578 struct btrfs_space_info *space_info,
3579 u64 bytes, int force)
3580{
3581 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3582 struct btrfs_trans_handle *trans;
3583
3584 trans = (struct btrfs_trans_handle *)current->journal_info;
3585 if (trans)
3586 return -EAGAIN;
3587
3588 if (force)
3589 goto commit;
3590
3591 /* See if there is enough pinned space to make this reservation */
3592 spin_lock(&space_info->lock);
3593 if (space_info->bytes_pinned >= bytes) {
3594 spin_unlock(&space_info->lock);
3595 goto commit;
3596 }
3597 spin_unlock(&space_info->lock);
3598
3599 /*
3600 * See if there is some space in the delayed insertion reservation for
3601 * this reservation.
3602 */
3603 if (space_info != delayed_rsv->space_info)
3604 return -ENOSPC;
3605
3606 spin_lock(&delayed_rsv->lock);
3607 if (delayed_rsv->size < bytes) {
3608 spin_unlock(&delayed_rsv->lock);
3609 return -ENOSPC;
3610 }
3611 spin_unlock(&delayed_rsv->lock);
3612
3613commit:
3614 trans = btrfs_join_transaction(root);
3615 if (IS_ERR(trans))
3616 return -ENOSPC;
3617
3618 return btrfs_commit_transaction(trans, root);
3619}
3620
4a92b1b8
JB
3621/**
3622 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3623 * @root - the root we're allocating for
3624 * @block_rsv - the block_rsv we're allocating for
3625 * @orig_bytes - the number of bytes we want
3626 * @flush - wether or not we can flush to make our reservation
8bb8ab2e 3627 *
4a92b1b8
JB
3628 * This will reserve orgi_bytes number of bytes from the space info associated
3629 * with the block_rsv. If there is not enough space it will make an attempt to
3630 * flush out space to make room. It will do this by flushing delalloc if
3631 * possible or committing the transaction. If flush is 0 then no attempts to
3632 * regain reservations will be made and this will fail if there is not enough
3633 * space already.
8bb8ab2e 3634 */
4a92b1b8 3635static int reserve_metadata_bytes(struct btrfs_root *root,
8bb8ab2e
JB
3636 struct btrfs_block_rsv *block_rsv,
3637 u64 orig_bytes, int flush)
9ed74f2d 3638{
f0486c68 3639 struct btrfs_space_info *space_info = block_rsv->space_info;
2bf64758 3640 u64 used;
8bb8ab2e
JB
3641 u64 num_bytes = orig_bytes;
3642 int retries = 0;
3643 int ret = 0;
38227933 3644 bool committed = false;
fdb5effd 3645 bool flushing = false;
f104d044 3646 bool wait_ordered = false;
9ed74f2d 3647
8bb8ab2e 3648again:
fdb5effd 3649 ret = 0;
8bb8ab2e 3650 spin_lock(&space_info->lock);
fdb5effd
JB
3651 /*
3652 * We only want to wait if somebody other than us is flushing and we are
3653 * actually alloed to flush.
3654 */
3655 while (flush && !flushing && space_info->flush) {
3656 spin_unlock(&space_info->lock);
3657 /*
3658 * If we have a trans handle we can't wait because the flusher
3659 * may have to commit the transaction, which would mean we would
3660 * deadlock since we are waiting for the flusher to finish, but
3661 * hold the current transaction open.
3662 */
663350ac 3663 if (current->journal_info)
fdb5effd
JB
3664 return -EAGAIN;
3665 ret = wait_event_interruptible(space_info->wait,
3666 !space_info->flush);
3667 /* Must have been interrupted, return */
3668 if (ret)
3669 return -EINTR;
3670
3671 spin_lock(&space_info->lock);
3672 }
3673
3674 ret = -ENOSPC;
2bf64758
JB
3675 used = space_info->bytes_used + space_info->bytes_reserved +
3676 space_info->bytes_pinned + space_info->bytes_readonly +
3677 space_info->bytes_may_use;
9ed74f2d 3678
8bb8ab2e
JB
3679 /*
3680 * The idea here is that we've not already over-reserved the block group
3681 * then we can go ahead and save our reservation first and then start
3682 * flushing if we need to. Otherwise if we've already overcommitted
3683 * lets start flushing stuff first and then come back and try to make
3684 * our reservation.
3685 */
2bf64758
JB
3686 if (used <= space_info->total_bytes) {
3687 if (used + orig_bytes <= space_info->total_bytes) {
fb25e914 3688 space_info->bytes_may_use += orig_bytes;
8bb8ab2e
JB
3689 ret = 0;
3690 } else {
3691 /*
3692 * Ok set num_bytes to orig_bytes since we aren't
3693 * overocmmitted, this way we only try and reclaim what
3694 * we need.
3695 */
3696 num_bytes = orig_bytes;
3697 }
3698 } else {
3699 /*
3700 * Ok we're over committed, set num_bytes to the overcommitted
3701 * amount plus the amount of bytes that we need for this
3702 * reservation.
3703 */
f104d044 3704 wait_ordered = true;
2bf64758 3705 num_bytes = used - space_info->total_bytes +
8bb8ab2e
JB
3706 (orig_bytes * (retries + 1));
3707 }
9ed74f2d 3708
36ba022a 3709 if (ret) {
2bf64758
JB
3710 u64 profile = btrfs_get_alloc_profile(root, 0);
3711 u64 avail;
3712
7e355b83
JB
3713 /*
3714 * If we have a lot of space that's pinned, don't bother doing
3715 * the overcommit dance yet and just commit the transaction.
3716 */
3717 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3718 do_div(avail, 10);
663350ac 3719 if (space_info->bytes_pinned >= avail && flush && !committed) {
7e355b83
JB
3720 space_info->flush = 1;
3721 flushing = true;
3722 spin_unlock(&space_info->lock);
663350ac
JB
3723 ret = may_commit_transaction(root, space_info,
3724 orig_bytes, 1);
3725 if (ret)
3726 goto out;
3727 committed = true;
3728 goto again;
7e355b83
JB
3729 }
3730
2bf64758
JB
3731 spin_lock(&root->fs_info->free_chunk_lock);
3732 avail = root->fs_info->free_chunk_space;
3733
3734 /*
3735 * If we have dup, raid1 or raid10 then only half of the free
3736 * space is actually useable.
3737 */
3738 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3739 BTRFS_BLOCK_GROUP_RAID1 |
3740 BTRFS_BLOCK_GROUP_RAID10))
3741 avail >>= 1;
3742
3743 /*
3744 * If we aren't flushing don't let us overcommit too much, say
3745 * 1/8th of the space. If we can flush, let it overcommit up to
3746 * 1/2 of the space.
3747 */
3748 if (flush)
3749 avail >>= 3;
3750 else
3751 avail >>= 1;
3752 spin_unlock(&root->fs_info->free_chunk_lock);
3753
9a82ca65 3754 if (used + num_bytes < space_info->total_bytes + avail) {
2bf64758
JB
3755 space_info->bytes_may_use += orig_bytes;
3756 ret = 0;
f104d044
JB
3757 } else {
3758 wait_ordered = true;
2bf64758
JB
3759 }
3760 }
3761
8bb8ab2e
JB
3762 /*
3763 * Couldn't make our reservation, save our place so while we're trying
3764 * to reclaim space we can actually use it instead of somebody else
3765 * stealing it from us.
3766 */
fdb5effd
JB
3767 if (ret && flush) {
3768 flushing = true;
3769 space_info->flush = 1;
8bb8ab2e 3770 }
9ed74f2d 3771
f0486c68 3772 spin_unlock(&space_info->lock);
9ed74f2d 3773
fdb5effd 3774 if (!ret || !flush)
8bb8ab2e 3775 goto out;
f0486c68 3776
8bb8ab2e
JB
3777 /*
3778 * We do synchronous shrinking since we don't actually unreserve
3779 * metadata until after the IO is completed.
3780 */
663350ac 3781 ret = shrink_delalloc(root, num_bytes, wait_ordered);
fdb5effd 3782 if (ret < 0)
8bb8ab2e 3783 goto out;
f0486c68 3784
75c195a2
CM
3785 ret = 0;
3786
8bb8ab2e
JB
3787 /*
3788 * So if we were overcommitted it's possible that somebody else flushed
3789 * out enough space and we simply didn't have enough space to reclaim,
3790 * so go back around and try again.
3791 */
3792 if (retries < 2) {
f104d044 3793 wait_ordered = true;
8bb8ab2e
JB
3794 retries++;
3795 goto again;
3796 }
f0486c68 3797
8bb8ab2e 3798 ret = -ENOSPC;
75c195a2
CM
3799 if (committed)
3800 goto out;
3801
663350ac 3802 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
38227933 3803 if (!ret) {
38227933 3804 committed = true;
8bb8ab2e 3805 goto again;
38227933 3806 }
8bb8ab2e
JB
3807
3808out:
fdb5effd 3809 if (flushing) {
8bb8ab2e 3810 spin_lock(&space_info->lock);
fdb5effd
JB
3811 space_info->flush = 0;
3812 wake_up_all(&space_info->wait);
8bb8ab2e 3813 spin_unlock(&space_info->lock);
f0486c68 3814 }
f0486c68
YZ
3815 return ret;
3816}
3817
3818static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root)
3820{
4c13d758
JB
3821 struct btrfs_block_rsv *block_rsv = NULL;
3822
3823 if (root->ref_cows || root == root->fs_info->csum_root)
f0486c68 3824 block_rsv = trans->block_rsv;
4c13d758
JB
3825
3826 if (!block_rsv)
f0486c68
YZ
3827 block_rsv = root->block_rsv;
3828
3829 if (!block_rsv)
3830 block_rsv = &root->fs_info->empty_block_rsv;
3831
3832 return block_rsv;
3833}
3834
3835static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3836 u64 num_bytes)
3837{
3838 int ret = -ENOSPC;
3839 spin_lock(&block_rsv->lock);
3840 if (block_rsv->reserved >= num_bytes) {
3841 block_rsv->reserved -= num_bytes;
3842 if (block_rsv->reserved < block_rsv->size)
3843 block_rsv->full = 0;
3844 ret = 0;
3845 }
3846 spin_unlock(&block_rsv->lock);
3847 return ret;
3848}
3849
3850static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3851 u64 num_bytes, int update_size)
3852{
3853 spin_lock(&block_rsv->lock);
3854 block_rsv->reserved += num_bytes;
3855 if (update_size)
3856 block_rsv->size += num_bytes;
3857 else if (block_rsv->reserved >= block_rsv->size)
3858 block_rsv->full = 1;
3859 spin_unlock(&block_rsv->lock);
3860}
3861
62a45b60
DS
3862static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3863 struct btrfs_block_rsv *dest, u64 num_bytes)
f0486c68
YZ
3864{
3865 struct btrfs_space_info *space_info = block_rsv->space_info;
3866
3867 spin_lock(&block_rsv->lock);
3868 if (num_bytes == (u64)-1)
3869 num_bytes = block_rsv->size;
3870 block_rsv->size -= num_bytes;
3871 if (block_rsv->reserved >= block_rsv->size) {
3872 num_bytes = block_rsv->reserved - block_rsv->size;
3873 block_rsv->reserved = block_rsv->size;
3874 block_rsv->full = 1;
3875 } else {
3876 num_bytes = 0;
3877 }
3878 spin_unlock(&block_rsv->lock);
3879
3880 if (num_bytes > 0) {
3881 if (dest) {
e9e22899
JB
3882 spin_lock(&dest->lock);
3883 if (!dest->full) {
3884 u64 bytes_to_add;
3885
3886 bytes_to_add = dest->size - dest->reserved;
3887 bytes_to_add = min(num_bytes, bytes_to_add);
3888 dest->reserved += bytes_to_add;
3889 if (dest->reserved >= dest->size)
3890 dest->full = 1;
3891 num_bytes -= bytes_to_add;
3892 }
3893 spin_unlock(&dest->lock);
3894 }
3895 if (num_bytes) {
f0486c68 3896 spin_lock(&space_info->lock);
fb25e914 3897 space_info->bytes_may_use -= num_bytes;
36e39c40 3898 space_info->reservation_progress++;
f0486c68 3899 spin_unlock(&space_info->lock);
4e06bdd6 3900 }
9ed74f2d 3901 }
f0486c68 3902}
4e06bdd6 3903
f0486c68
YZ
3904static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3905 struct btrfs_block_rsv *dst, u64 num_bytes)
3906{
3907 int ret;
9ed74f2d 3908
f0486c68
YZ
3909 ret = block_rsv_use_bytes(src, num_bytes);
3910 if (ret)
3911 return ret;
9ed74f2d 3912
f0486c68 3913 block_rsv_add_bytes(dst, num_bytes, 1);
9ed74f2d
JB
3914 return 0;
3915}
3916
f0486c68 3917void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
9ed74f2d 3918{
f0486c68
YZ
3919 memset(rsv, 0, sizeof(*rsv));
3920 spin_lock_init(&rsv->lock);
f0486c68
YZ
3921}
3922
3923struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3924{
3925 struct btrfs_block_rsv *block_rsv;
3926 struct btrfs_fs_info *fs_info = root->fs_info;
9ed74f2d 3927
f0486c68
YZ
3928 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3929 if (!block_rsv)
3930 return NULL;
9ed74f2d 3931
f0486c68 3932 btrfs_init_block_rsv(block_rsv);
f0486c68
YZ
3933 block_rsv->space_info = __find_space_info(fs_info,
3934 BTRFS_BLOCK_GROUP_METADATA);
f0486c68
YZ
3935 return block_rsv;
3936}
9ed74f2d 3937
f0486c68
YZ
3938void btrfs_free_block_rsv(struct btrfs_root *root,
3939 struct btrfs_block_rsv *rsv)
3940{
dabdb640
JB
3941 btrfs_block_rsv_release(root, rsv, (u64)-1);
3942 kfree(rsv);
9ed74f2d
JB
3943}
3944
61b520a9
MX
3945static inline int __block_rsv_add(struct btrfs_root *root,
3946 struct btrfs_block_rsv *block_rsv,
3947 u64 num_bytes, int flush)
9ed74f2d 3948{
f0486c68 3949 int ret;
9ed74f2d 3950
f0486c68
YZ
3951 if (num_bytes == 0)
3952 return 0;
8bb8ab2e 3953
61b520a9 3954 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
f0486c68
YZ
3955 if (!ret) {
3956 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3957 return 0;
3958 }
9ed74f2d 3959
f0486c68 3960 return ret;
f0486c68 3961}
9ed74f2d 3962
61b520a9
MX
3963int btrfs_block_rsv_add(struct btrfs_root *root,
3964 struct btrfs_block_rsv *block_rsv,
3965 u64 num_bytes)
3966{
3967 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3968}
3969
c06a0e12
JB
3970int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3971 struct btrfs_block_rsv *block_rsv,
3972 u64 num_bytes)
f0486c68 3973{
61b520a9 3974 return __block_rsv_add(root, block_rsv, num_bytes, 0);
f0486c68 3975}
9ed74f2d 3976
4a92b1b8 3977int btrfs_block_rsv_check(struct btrfs_root *root,
36ba022a 3978 struct btrfs_block_rsv *block_rsv, int min_factor)
f0486c68
YZ
3979{
3980 u64 num_bytes = 0;
f0486c68 3981 int ret = -ENOSPC;
9ed74f2d 3982
f0486c68
YZ
3983 if (!block_rsv)
3984 return 0;
9ed74f2d 3985
f0486c68 3986 spin_lock(&block_rsv->lock);
36ba022a
JB
3987 num_bytes = div_factor(block_rsv->size, min_factor);
3988 if (block_rsv->reserved >= num_bytes)
3989 ret = 0;
3990 spin_unlock(&block_rsv->lock);
9ed74f2d 3991
36ba022a
JB
3992 return ret;
3993}
3994
aa38a711
MX
3995static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3996 struct btrfs_block_rsv *block_rsv,
3997 u64 min_reserved, int flush)
36ba022a
JB
3998{
3999 u64 num_bytes = 0;
4000 int ret = -ENOSPC;
4001
4002 if (!block_rsv)
4003 return 0;
4004
4005 spin_lock(&block_rsv->lock);
4006 num_bytes = min_reserved;
13553e52 4007 if (block_rsv->reserved >= num_bytes)
f0486c68 4008 ret = 0;
13553e52 4009 else
f0486c68 4010 num_bytes -= block_rsv->reserved;
f0486c68 4011 spin_unlock(&block_rsv->lock);
13553e52 4012
f0486c68
YZ
4013 if (!ret)
4014 return 0;
4015
aa38a711 4016 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
dabdb640
JB
4017 if (!ret) {
4018 block_rsv_add_bytes(block_rsv, num_bytes, 0);
f0486c68 4019 return 0;
6a63209f 4020 }
9ed74f2d 4021
13553e52 4022 return ret;
f0486c68
YZ
4023}
4024
aa38a711
MX
4025int btrfs_block_rsv_refill(struct btrfs_root *root,
4026 struct btrfs_block_rsv *block_rsv,
4027 u64 min_reserved)
4028{
4029 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4030}
4031
4032int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4033 struct btrfs_block_rsv *block_rsv,
4034 u64 min_reserved)
4035{
4036 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4037}
4038
f0486c68
YZ
4039int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4040 struct btrfs_block_rsv *dst_rsv,
4041 u64 num_bytes)
4042{
4043 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4044}
4045
4046void btrfs_block_rsv_release(struct btrfs_root *root,
4047 struct btrfs_block_rsv *block_rsv,
4048 u64 num_bytes)
4049{
4050 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4051 if (global_rsv->full || global_rsv == block_rsv ||
4052 block_rsv->space_info != global_rsv->space_info)
4053 global_rsv = NULL;
4054 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
6a63209f
JB
4055}
4056
4057/*
8929ecfa
YZ
4058 * helper to calculate size of global block reservation.
4059 * the desired value is sum of space used by extent tree,
4060 * checksum tree and root tree
6a63209f 4061 */
8929ecfa 4062static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
6a63209f 4063{
8929ecfa
YZ
4064 struct btrfs_space_info *sinfo;
4065 u64 num_bytes;
4066 u64 meta_used;
4067 u64 data_used;
6c41761f 4068 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
6a63209f 4069
8929ecfa
YZ
4070 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4071 spin_lock(&sinfo->lock);
4072 data_used = sinfo->bytes_used;
4073 spin_unlock(&sinfo->lock);
33b4d47f 4074
8929ecfa
YZ
4075 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4076 spin_lock(&sinfo->lock);
6d48755d
JB
4077 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4078 data_used = 0;
8929ecfa
YZ
4079 meta_used = sinfo->bytes_used;
4080 spin_unlock(&sinfo->lock);
ab6e2410 4081
8929ecfa
YZ
4082 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4083 csum_size * 2;
4084 num_bytes += div64_u64(data_used + meta_used, 50);
4e06bdd6 4085
8929ecfa
YZ
4086 if (num_bytes * 3 > meta_used)
4087 num_bytes = div64_u64(meta_used, 3);
ab6e2410 4088
8929ecfa
YZ
4089 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4090}
6a63209f 4091
8929ecfa
YZ
4092static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4093{
4094 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4095 struct btrfs_space_info *sinfo = block_rsv->space_info;
4096 u64 num_bytes;
6a63209f 4097
8929ecfa 4098 num_bytes = calc_global_metadata_size(fs_info);
33b4d47f 4099
8929ecfa
YZ
4100 spin_lock(&block_rsv->lock);
4101 spin_lock(&sinfo->lock);
4e06bdd6 4102
8929ecfa 4103 block_rsv->size = num_bytes;
4e06bdd6 4104
8929ecfa 4105 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
6d48755d
JB
4106 sinfo->bytes_reserved + sinfo->bytes_readonly +
4107 sinfo->bytes_may_use;
8929ecfa
YZ
4108
4109 if (sinfo->total_bytes > num_bytes) {
4110 num_bytes = sinfo->total_bytes - num_bytes;
4111 block_rsv->reserved += num_bytes;
fb25e914 4112 sinfo->bytes_may_use += num_bytes;
6a63209f 4113 }
6a63209f 4114
8929ecfa
YZ
4115 if (block_rsv->reserved >= block_rsv->size) {
4116 num_bytes = block_rsv->reserved - block_rsv->size;
fb25e914 4117 sinfo->bytes_may_use -= num_bytes;
36e39c40 4118 sinfo->reservation_progress++;
8929ecfa
YZ
4119 block_rsv->reserved = block_rsv->size;
4120 block_rsv->full = 1;
4121 }
182608c8 4122
8929ecfa
YZ
4123 spin_unlock(&sinfo->lock);
4124 spin_unlock(&block_rsv->lock);
6a63209f
JB
4125}
4126
f0486c68 4127static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 4128{
f0486c68 4129 struct btrfs_space_info *space_info;
6a63209f 4130
f0486c68
YZ
4131 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4132 fs_info->chunk_block_rsv.space_info = space_info;
6a63209f 4133
f0486c68 4134 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
8929ecfa 4135 fs_info->global_block_rsv.space_info = space_info;
8929ecfa 4136 fs_info->delalloc_block_rsv.space_info = space_info;
f0486c68
YZ
4137 fs_info->trans_block_rsv.space_info = space_info;
4138 fs_info->empty_block_rsv.space_info = space_info;
6d668dda 4139 fs_info->delayed_block_rsv.space_info = space_info;
f0486c68 4140
8929ecfa
YZ
4141 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4142 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4143 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4144 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
f0486c68 4145 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
8929ecfa 4146
8929ecfa 4147 update_global_block_rsv(fs_info);
6a63209f
JB
4148}
4149
8929ecfa 4150static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 4151{
8929ecfa
YZ
4152 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
4153 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4154 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4155 WARN_ON(fs_info->trans_block_rsv.size > 0);
4156 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4157 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4158 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
6d668dda
JB
4159 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4160 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
fcb80c2a
JB
4161}
4162
a22285a6
YZ
4163void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4164 struct btrfs_root *root)
6a63209f 4165{
a22285a6
YZ
4166 if (!trans->bytes_reserved)
4167 return;
6a63209f 4168
b24e03db 4169 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
a22285a6
YZ
4170 trans->bytes_reserved = 0;
4171}
6a63209f 4172
d68fc57b
YZ
4173int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4174 struct inode *inode)
4175{
4176 struct btrfs_root *root = BTRFS_I(inode)->root;
4177 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4178 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4179
4180 /*
fcb80c2a
JB
4181 * We need to hold space in order to delete our orphan item once we've
4182 * added it, so this takes the reservation so we can release it later
4183 * when we are truly done with the orphan item.
d68fc57b 4184 */
ff5714cc 4185 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
d68fc57b 4186 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
6a63209f
JB
4187}
4188
d68fc57b 4189void btrfs_orphan_release_metadata(struct inode *inode)
97e728d4 4190{
d68fc57b 4191 struct btrfs_root *root = BTRFS_I(inode)->root;
ff5714cc 4192 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
d68fc57b
YZ
4193 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4194}
97e728d4 4195
a22285a6
YZ
4196int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4197 struct btrfs_pending_snapshot *pending)
4198{
4199 struct btrfs_root *root = pending->root;
4200 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4201 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4202 /*
4203 * two for root back/forward refs, two for directory entries
4204 * and one for root of the snapshot.
4205 */
16cdcec7 4206 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
a22285a6
YZ
4207 dst_rsv->space_info = src_rsv->space_info;
4208 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
97e728d4
JB
4209}
4210
7709cde3
JB
4211/**
4212 * drop_outstanding_extent - drop an outstanding extent
4213 * @inode: the inode we're dropping the extent for
4214 *
4215 * This is called when we are freeing up an outstanding extent, either called
4216 * after an error or after an extent is written. This will return the number of
4217 * reserved extents that need to be freed. This must be called with
4218 * BTRFS_I(inode)->lock held.
4219 */
9e0baf60
JB
4220static unsigned drop_outstanding_extent(struct inode *inode)
4221{
7fd2ae21 4222 unsigned drop_inode_space = 0;
9e0baf60
JB
4223 unsigned dropped_extents = 0;
4224
9e0baf60
JB
4225 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4226 BTRFS_I(inode)->outstanding_extents--;
4227
7fd2ae21
JB
4228 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4229 BTRFS_I(inode)->delalloc_meta_reserved) {
4230 drop_inode_space = 1;
4231 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4232 }
4233
9e0baf60
JB
4234 /*
4235 * If we have more or the same amount of outsanding extents than we have
4236 * reserved then we need to leave the reserved extents count alone.
4237 */
4238 if (BTRFS_I(inode)->outstanding_extents >=
4239 BTRFS_I(inode)->reserved_extents)
7fd2ae21 4240 return drop_inode_space;
9e0baf60
JB
4241
4242 dropped_extents = BTRFS_I(inode)->reserved_extents -
4243 BTRFS_I(inode)->outstanding_extents;
4244 BTRFS_I(inode)->reserved_extents -= dropped_extents;
7fd2ae21 4245 return dropped_extents + drop_inode_space;
9e0baf60
JB
4246}
4247
7709cde3
JB
4248/**
4249 * calc_csum_metadata_size - return the amount of metada space that must be
4250 * reserved/free'd for the given bytes.
4251 * @inode: the inode we're manipulating
4252 * @num_bytes: the number of bytes in question
4253 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4254 *
4255 * This adjusts the number of csum_bytes in the inode and then returns the
4256 * correct amount of metadata that must either be reserved or freed. We
4257 * calculate how many checksums we can fit into one leaf and then divide the
4258 * number of bytes that will need to be checksumed by this value to figure out
4259 * how many checksums will be required. If we are adding bytes then the number
4260 * may go up and we will return the number of additional bytes that must be
4261 * reserved. If it is going down we will return the number of bytes that must
4262 * be freed.
4263 *
4264 * This must be called with BTRFS_I(inode)->lock held.
4265 */
4266static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4267 int reserve)
6324fbf3 4268{
7709cde3
JB
4269 struct btrfs_root *root = BTRFS_I(inode)->root;
4270 u64 csum_size;
4271 int num_csums_per_leaf;
4272 int num_csums;
4273 int old_csums;
4274
4275 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4276 BTRFS_I(inode)->csum_bytes == 0)
4277 return 0;
4278
4279 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4280 if (reserve)
4281 BTRFS_I(inode)->csum_bytes += num_bytes;
4282 else
4283 BTRFS_I(inode)->csum_bytes -= num_bytes;
4284 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4285 num_csums_per_leaf = (int)div64_u64(csum_size,
4286 sizeof(struct btrfs_csum_item) +
4287 sizeof(struct btrfs_disk_key));
4288 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4289 num_csums = num_csums + num_csums_per_leaf - 1;
4290 num_csums = num_csums / num_csums_per_leaf;
4291
4292 old_csums = old_csums + num_csums_per_leaf - 1;
4293 old_csums = old_csums / num_csums_per_leaf;
4294
4295 /* No change, no need to reserve more */
4296 if (old_csums == num_csums)
4297 return 0;
4298
4299 if (reserve)
4300 return btrfs_calc_trans_metadata_size(root,
4301 num_csums - old_csums);
4302
4303 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
0ca1f7ce 4304}
c146afad 4305
0ca1f7ce
YZ
4306int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4307{
4308 struct btrfs_root *root = BTRFS_I(inode)->root;
4309 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
9e0baf60 4310 u64 to_reserve = 0;
660d3f6c 4311 u64 csum_bytes;
9e0baf60 4312 unsigned nr_extents = 0;
660d3f6c 4313 int extra_reserve = 0;
c09544e0 4314 int flush = 1;
0ca1f7ce 4315 int ret;
6324fbf3 4316
660d3f6c 4317 /* Need to be holding the i_mutex here if we aren't free space cache */
c09544e0
JB
4318 if (btrfs_is_free_space_inode(root, inode))
4319 flush = 0;
660d3f6c
JB
4320 else
4321 WARN_ON(!mutex_is_locked(&inode->i_mutex));
c09544e0
JB
4322
4323 if (flush && btrfs_transaction_in_commit(root->fs_info))
0ca1f7ce 4324 schedule_timeout(1);
ec44a35c 4325
0ca1f7ce 4326 num_bytes = ALIGN(num_bytes, root->sectorsize);
8bb8ab2e 4327
9e0baf60
JB
4328 spin_lock(&BTRFS_I(inode)->lock);
4329 BTRFS_I(inode)->outstanding_extents++;
4330
4331 if (BTRFS_I(inode)->outstanding_extents >
660d3f6c 4332 BTRFS_I(inode)->reserved_extents)
9e0baf60
JB
4333 nr_extents = BTRFS_I(inode)->outstanding_extents -
4334 BTRFS_I(inode)->reserved_extents;
57a45ced 4335
7fd2ae21
JB
4336 /*
4337 * Add an item to reserve for updating the inode when we complete the
4338 * delalloc io.
4339 */
4340 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4341 nr_extents++;
660d3f6c 4342 extra_reserve = 1;
593060d7 4343 }
7fd2ae21
JB
4344
4345 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
7709cde3 4346 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
660d3f6c 4347 csum_bytes = BTRFS_I(inode)->csum_bytes;
9e0baf60 4348 spin_unlock(&BTRFS_I(inode)->lock);
57a45ced 4349
36ba022a 4350 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
9e0baf60 4351 if (ret) {
7ed49f18 4352 u64 to_free = 0;
9e0baf60 4353 unsigned dropped;
7ed49f18 4354
7709cde3 4355 spin_lock(&BTRFS_I(inode)->lock);
9e0baf60 4356 dropped = drop_outstanding_extent(inode);
9e0baf60 4357 /*
660d3f6c
JB
4358 * If the inodes csum_bytes is the same as the original
4359 * csum_bytes then we know we haven't raced with any free()ers
4360 * so we can just reduce our inodes csum bytes and carry on.
4361 * Otherwise we have to do the normal free thing to account for
4362 * the case that the free side didn't free up its reserve
4363 * because of this outstanding reservation.
9e0baf60 4364 */
660d3f6c
JB
4365 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4366 calc_csum_metadata_size(inode, num_bytes, 0);
4367 else
4368 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4369 spin_unlock(&BTRFS_I(inode)->lock);
4370 if (dropped)
4371 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4372
7ed49f18
JB
4373 if (to_free)
4374 btrfs_block_rsv_release(root, block_rsv, to_free);
0ca1f7ce 4375 return ret;
9e0baf60 4376 }
25179201 4377
660d3f6c
JB
4378 spin_lock(&BTRFS_I(inode)->lock);
4379 if (extra_reserve) {
4380 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4381 nr_extents--;
4382 }
4383 BTRFS_I(inode)->reserved_extents += nr_extents;
4384 spin_unlock(&BTRFS_I(inode)->lock);
4385
0ca1f7ce
YZ
4386 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4387
0ca1f7ce
YZ
4388 return 0;
4389}
4390
7709cde3
JB
4391/**
4392 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4393 * @inode: the inode to release the reservation for
4394 * @num_bytes: the number of bytes we're releasing
4395 *
4396 * This will release the metadata reservation for an inode. This can be called
4397 * once we complete IO for a given set of bytes to release their metadata
4398 * reservations.
4399 */
0ca1f7ce
YZ
4400void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4401{
4402 struct btrfs_root *root = BTRFS_I(inode)->root;
9e0baf60
JB
4403 u64 to_free = 0;
4404 unsigned dropped;
0ca1f7ce
YZ
4405
4406 num_bytes = ALIGN(num_bytes, root->sectorsize);
7709cde3 4407 spin_lock(&BTRFS_I(inode)->lock);
9e0baf60 4408 dropped = drop_outstanding_extent(inode);
97e728d4 4409
7709cde3
JB
4410 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4411 spin_unlock(&BTRFS_I(inode)->lock);
9e0baf60
JB
4412 if (dropped > 0)
4413 to_free += btrfs_calc_trans_metadata_size(root, dropped);
0ca1f7ce
YZ
4414
4415 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4416 to_free);
4417}
4418
7709cde3
JB
4419/**
4420 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4421 * @inode: inode we're writing to
4422 * @num_bytes: the number of bytes we want to allocate
4423 *
4424 * This will do the following things
4425 *
4426 * o reserve space in the data space info for num_bytes
4427 * o reserve space in the metadata space info based on number of outstanding
4428 * extents and how much csums will be needed
4429 * o add to the inodes ->delalloc_bytes
4430 * o add it to the fs_info's delalloc inodes list.
4431 *
4432 * This will return 0 for success and -ENOSPC if there is no space left.
4433 */
0ca1f7ce
YZ
4434int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4435{
4436 int ret;
4437
4438 ret = btrfs_check_data_free_space(inode, num_bytes);
d397712b 4439 if (ret)
0ca1f7ce
YZ
4440 return ret;
4441
4442 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4443 if (ret) {
4444 btrfs_free_reserved_data_space(inode, num_bytes);
4445 return ret;
4446 }
4447
4448 return 0;
4449}
4450
7709cde3
JB
4451/**
4452 * btrfs_delalloc_release_space - release data and metadata space for delalloc
4453 * @inode: inode we're releasing space for
4454 * @num_bytes: the number of bytes we want to free up
4455 *
4456 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
4457 * called in the case that we don't need the metadata AND data reservations
4458 * anymore. So if there is an error or we insert an inline extent.
4459 *
4460 * This function will release the metadata space that was not used and will
4461 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4462 * list if there are no delalloc bytes left.
4463 */
0ca1f7ce
YZ
4464void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4465{
4466 btrfs_delalloc_release_metadata(inode, num_bytes);
4467 btrfs_free_reserved_data_space(inode, num_bytes);
6324fbf3
CM
4468}
4469
9078a3e1
CM
4470static int update_block_group(struct btrfs_trans_handle *trans,
4471 struct btrfs_root *root,
f0486c68 4472 u64 bytenr, u64 num_bytes, int alloc)
9078a3e1 4473{
0af3d00b 4474 struct btrfs_block_group_cache *cache = NULL;
9078a3e1 4475 struct btrfs_fs_info *info = root->fs_info;
db94535d 4476 u64 total = num_bytes;
9078a3e1 4477 u64 old_val;
db94535d 4478 u64 byte_in_group;
0af3d00b 4479 int factor;
3e1ad54f 4480
5d4f98a2
YZ
4481 /* block accounting for super block */
4482 spin_lock(&info->delalloc_lock);
6c41761f 4483 old_val = btrfs_super_bytes_used(info->super_copy);
5d4f98a2
YZ
4484 if (alloc)
4485 old_val += num_bytes;
4486 else
4487 old_val -= num_bytes;
6c41761f 4488 btrfs_set_super_bytes_used(info->super_copy, old_val);
5d4f98a2
YZ
4489 spin_unlock(&info->delalloc_lock);
4490
d397712b 4491 while (total) {
db94535d 4492 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 4493 if (!cache)
9078a3e1 4494 return -1;
b742bb82
YZ
4495 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4496 BTRFS_BLOCK_GROUP_RAID1 |
4497 BTRFS_BLOCK_GROUP_RAID10))
4498 factor = 2;
4499 else
4500 factor = 1;
9d66e233
JB
4501 /*
4502 * If this block group has free space cache written out, we
4503 * need to make sure to load it if we are removing space. This
4504 * is because we need the unpinning stage to actually add the
4505 * space back to the block group, otherwise we will leak space.
4506 */
4507 if (!alloc && cache->cached == BTRFS_CACHE_NO)
b8399dee 4508 cache_block_group(cache, trans, NULL, 1);
0af3d00b 4509
db94535d
CM
4510 byte_in_group = bytenr - cache->key.objectid;
4511 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 4512
25179201 4513 spin_lock(&cache->space_info->lock);
c286ac48 4514 spin_lock(&cache->lock);
0af3d00b 4515
73bc1876 4516 if (btrfs_test_opt(root, SPACE_CACHE) &&
0af3d00b
JB
4517 cache->disk_cache_state < BTRFS_DC_CLEAR)
4518 cache->disk_cache_state = BTRFS_DC_CLEAR;
4519
0f9dd46c 4520 cache->dirty = 1;
9078a3e1 4521 old_val = btrfs_block_group_used(&cache->item);
db94535d 4522 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 4523 if (alloc) {
db94535d 4524 old_val += num_bytes;
11833d66
YZ
4525 btrfs_set_block_group_used(&cache->item, old_val);
4526 cache->reserved -= num_bytes;
11833d66 4527 cache->space_info->bytes_reserved -= num_bytes;
b742bb82
YZ
4528 cache->space_info->bytes_used += num_bytes;
4529 cache->space_info->disk_used += num_bytes * factor;
c286ac48 4530 spin_unlock(&cache->lock);
25179201 4531 spin_unlock(&cache->space_info->lock);
cd1bc465 4532 } else {
db94535d 4533 old_val -= num_bytes;
c286ac48 4534 btrfs_set_block_group_used(&cache->item, old_val);
f0486c68
YZ
4535 cache->pinned += num_bytes;
4536 cache->space_info->bytes_pinned += num_bytes;
6324fbf3 4537 cache->space_info->bytes_used -= num_bytes;
b742bb82 4538 cache->space_info->disk_used -= num_bytes * factor;
c286ac48 4539 spin_unlock(&cache->lock);
25179201 4540 spin_unlock(&cache->space_info->lock);
1f3c79a2 4541
f0486c68
YZ
4542 set_extent_dirty(info->pinned_extents,
4543 bytenr, bytenr + num_bytes - 1,
4544 GFP_NOFS | __GFP_NOFAIL);
cd1bc465 4545 }
fa9c0d79 4546 btrfs_put_block_group(cache);
db94535d
CM
4547 total -= num_bytes;
4548 bytenr += num_bytes;
9078a3e1
CM
4549 }
4550 return 0;
4551}
6324fbf3 4552
a061fc8d
CM
4553static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4554{
0f9dd46c 4555 struct btrfs_block_group_cache *cache;
d2fb3437 4556 u64 bytenr;
0f9dd46c
JB
4557
4558 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4559 if (!cache)
a061fc8d 4560 return 0;
0f9dd46c 4561
d2fb3437 4562 bytenr = cache->key.objectid;
fa9c0d79 4563 btrfs_put_block_group(cache);
d2fb3437
YZ
4564
4565 return bytenr;
a061fc8d
CM
4566}
4567
f0486c68
YZ
4568static int pin_down_extent(struct btrfs_root *root,
4569 struct btrfs_block_group_cache *cache,
4570 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 4571{
11833d66
YZ
4572 spin_lock(&cache->space_info->lock);
4573 spin_lock(&cache->lock);
4574 cache->pinned += num_bytes;
4575 cache->space_info->bytes_pinned += num_bytes;
4576 if (reserved) {
4577 cache->reserved -= num_bytes;
4578 cache->space_info->bytes_reserved -= num_bytes;
4579 }
4580 spin_unlock(&cache->lock);
4581 spin_unlock(&cache->space_info->lock);
68b38550 4582
f0486c68
YZ
4583 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4584 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4585 return 0;
4586}
68b38550 4587
f0486c68
YZ
4588/*
4589 * this function must be called within transaction
4590 */
4591int btrfs_pin_extent(struct btrfs_root *root,
4592 u64 bytenr, u64 num_bytes, int reserved)
4593{
4594 struct btrfs_block_group_cache *cache;
68b38550 4595
f0486c68
YZ
4596 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4597 BUG_ON(!cache);
4598
4599 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4600
4601 btrfs_put_block_group(cache);
11833d66
YZ
4602 return 0;
4603}
4604
f0486c68 4605/*
e688b725
CM
4606 * this function must be called within transaction
4607 */
4608int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4609 struct btrfs_root *root,
4610 u64 bytenr, u64 num_bytes)
4611{
4612 struct btrfs_block_group_cache *cache;
4613
4614 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4615 BUG_ON(!cache);
4616
4617 /*
4618 * pull in the free space cache (if any) so that our pin
4619 * removes the free space from the cache. We have load_only set
4620 * to one because the slow code to read in the free extents does check
4621 * the pinned extents.
4622 */
4623 cache_block_group(cache, trans, root, 1);
4624
4625 pin_down_extent(root, cache, bytenr, num_bytes, 0);
4626
4627 /* remove us from the free space cache (if we're there at all) */
4628 btrfs_remove_free_space(cache, bytenr, num_bytes);
4629 btrfs_put_block_group(cache);
4630 return 0;
4631}
4632
fb25e914
JB
4633/**
4634 * btrfs_update_reserved_bytes - update the block_group and space info counters
4635 * @cache: The cache we are manipulating
4636 * @num_bytes: The number of bytes in question
4637 * @reserve: One of the reservation enums
4638 *
4639 * This is called by the allocator when it reserves space, or by somebody who is
4640 * freeing space that was never actually used on disk. For example if you
4641 * reserve some space for a new leaf in transaction A and before transaction A
4642 * commits you free that leaf, you call this with reserve set to 0 in order to
4643 * clear the reservation.
4644 *
4645 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4646 * ENOSPC accounting. For data we handle the reservation through clearing the
4647 * delalloc bits in the io_tree. We have to do this since we could end up
4648 * allocating less disk space for the amount of data we have reserved in the
4649 * case of compression.
4650 *
4651 * If this is a reservation and the block group has become read only we cannot
4652 * make the reservation and return -EAGAIN, otherwise this function always
4653 * succeeds.
f0486c68 4654 */
fb25e914
JB
4655static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4656 u64 num_bytes, int reserve)
11833d66 4657{
fb25e914 4658 struct btrfs_space_info *space_info = cache->space_info;
f0486c68 4659 int ret = 0;
fb25e914
JB
4660 spin_lock(&space_info->lock);
4661 spin_lock(&cache->lock);
4662 if (reserve != RESERVE_FREE) {
f0486c68
YZ
4663 if (cache->ro) {
4664 ret = -EAGAIN;
4665 } else {
fb25e914
JB
4666 cache->reserved += num_bytes;
4667 space_info->bytes_reserved += num_bytes;
4668 if (reserve == RESERVE_ALLOC) {
4669 BUG_ON(space_info->bytes_may_use < num_bytes);
4670 space_info->bytes_may_use -= num_bytes;
4671 }
f0486c68 4672 }
fb25e914
JB
4673 } else {
4674 if (cache->ro)
4675 space_info->bytes_readonly += num_bytes;
4676 cache->reserved -= num_bytes;
4677 space_info->bytes_reserved -= num_bytes;
4678 space_info->reservation_progress++;
324ae4df 4679 }
fb25e914
JB
4680 spin_unlock(&cache->lock);
4681 spin_unlock(&space_info->lock);
f0486c68 4682 return ret;
324ae4df 4683}
9078a3e1 4684
11833d66
YZ
4685int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4686 struct btrfs_root *root)
e8569813 4687{
e8569813 4688 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
4689 struct btrfs_caching_control *next;
4690 struct btrfs_caching_control *caching_ctl;
4691 struct btrfs_block_group_cache *cache;
e8569813 4692
11833d66 4693 down_write(&fs_info->extent_commit_sem);
25179201 4694
11833d66
YZ
4695 list_for_each_entry_safe(caching_ctl, next,
4696 &fs_info->caching_block_groups, list) {
4697 cache = caching_ctl->block_group;
4698 if (block_group_cache_done(cache)) {
4699 cache->last_byte_to_unpin = (u64)-1;
4700 list_del_init(&caching_ctl->list);
4701 put_caching_control(caching_ctl);
e8569813 4702 } else {
11833d66 4703 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 4704 }
e8569813 4705 }
11833d66
YZ
4706
4707 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4708 fs_info->pinned_extents = &fs_info->freed_extents[1];
4709 else
4710 fs_info->pinned_extents = &fs_info->freed_extents[0];
4711
4712 up_write(&fs_info->extent_commit_sem);
8929ecfa
YZ
4713
4714 update_global_block_rsv(fs_info);
e8569813
ZY
4715 return 0;
4716}
4717
11833d66 4718static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 4719{
11833d66
YZ
4720 struct btrfs_fs_info *fs_info = root->fs_info;
4721 struct btrfs_block_group_cache *cache = NULL;
4722 u64 len;
ccd467d6 4723
11833d66
YZ
4724 while (start <= end) {
4725 if (!cache ||
4726 start >= cache->key.objectid + cache->key.offset) {
4727 if (cache)
4728 btrfs_put_block_group(cache);
4729 cache = btrfs_lookup_block_group(fs_info, start);
4730 BUG_ON(!cache);
4731 }
4732
4733 len = cache->key.objectid + cache->key.offset - start;
4734 len = min(len, end + 1 - start);
4735
4736 if (start < cache->last_byte_to_unpin) {
4737 len = min(len, cache->last_byte_to_unpin - start);
4738 btrfs_add_free_space(cache, start, len);
4739 }
4740
f0486c68
YZ
4741 start += len;
4742
11833d66
YZ
4743 spin_lock(&cache->space_info->lock);
4744 spin_lock(&cache->lock);
4745 cache->pinned -= len;
4746 cache->space_info->bytes_pinned -= len;
37be25bc 4747 if (cache->ro)
f0486c68 4748 cache->space_info->bytes_readonly += len;
11833d66
YZ
4749 spin_unlock(&cache->lock);
4750 spin_unlock(&cache->space_info->lock);
ccd467d6 4751 }
11833d66
YZ
4752
4753 if (cache)
4754 btrfs_put_block_group(cache);
ccd467d6
CM
4755 return 0;
4756}
4757
4758int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 4759 struct btrfs_root *root)
a28ec197 4760{
11833d66
YZ
4761 struct btrfs_fs_info *fs_info = root->fs_info;
4762 struct extent_io_tree *unpin;
1a5bc167
CM
4763 u64 start;
4764 u64 end;
a28ec197 4765 int ret;
a28ec197 4766
11833d66
YZ
4767 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4768 unpin = &fs_info->freed_extents[1];
4769 else
4770 unpin = &fs_info->freed_extents[0];
4771
d397712b 4772 while (1) {
1a5bc167
CM
4773 ret = find_first_extent_bit(unpin, 0, &start, &end,
4774 EXTENT_DIRTY);
4775 if (ret)
a28ec197 4776 break;
1f3c79a2 4777
5378e607
LD
4778 if (btrfs_test_opt(root, DISCARD))
4779 ret = btrfs_discard_extent(root, start,
4780 end + 1 - start, NULL);
1f3c79a2 4781
1a5bc167 4782 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 4783 unpin_extent_range(root, start, end);
b9473439 4784 cond_resched();
a28ec197 4785 }
817d52f8 4786
e20d96d6
CM
4787 return 0;
4788}
4789
5d4f98a2
YZ
4790static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4791 struct btrfs_root *root,
4792 u64 bytenr, u64 num_bytes, u64 parent,
4793 u64 root_objectid, u64 owner_objectid,
4794 u64 owner_offset, int refs_to_drop,
4795 struct btrfs_delayed_extent_op *extent_op)
a28ec197 4796{
e2fa7227 4797 struct btrfs_key key;
5d4f98a2 4798 struct btrfs_path *path;
1261ec42
CM
4799 struct btrfs_fs_info *info = root->fs_info;
4800 struct btrfs_root *extent_root = info->extent_root;
5f39d397 4801 struct extent_buffer *leaf;
5d4f98a2
YZ
4802 struct btrfs_extent_item *ei;
4803 struct btrfs_extent_inline_ref *iref;
a28ec197 4804 int ret;
5d4f98a2 4805 int is_data;
952fccac
CM
4806 int extent_slot = 0;
4807 int found_extent = 0;
4808 int num_to_del = 1;
5d4f98a2
YZ
4809 u32 item_size;
4810 u64 refs;
037e6390 4811
5caf2a00 4812 path = btrfs_alloc_path();
54aa1f4d
CM
4813 if (!path)
4814 return -ENOMEM;
5f26f772 4815
3c12ac72 4816 path->reada = 1;
b9473439 4817 path->leave_spinning = 1;
5d4f98a2
YZ
4818
4819 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4820 BUG_ON(!is_data && refs_to_drop != 1);
4821
4822 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4823 bytenr, num_bytes, parent,
4824 root_objectid, owner_objectid,
4825 owner_offset);
7bb86316 4826 if (ret == 0) {
952fccac 4827 extent_slot = path->slots[0];
5d4f98a2
YZ
4828 while (extent_slot >= 0) {
4829 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 4830 extent_slot);
5d4f98a2 4831 if (key.objectid != bytenr)
952fccac 4832 break;
5d4f98a2
YZ
4833 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4834 key.offset == num_bytes) {
952fccac
CM
4835 found_extent = 1;
4836 break;
4837 }
4838 if (path->slots[0] - extent_slot > 5)
4839 break;
5d4f98a2 4840 extent_slot--;
952fccac 4841 }
5d4f98a2
YZ
4842#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4843 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4844 if (found_extent && item_size < sizeof(*ei))
4845 found_extent = 0;
4846#endif
31840ae1 4847 if (!found_extent) {
5d4f98a2 4848 BUG_ON(iref);
56bec294 4849 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
4850 NULL, refs_to_drop,
4851 is_data);
31840ae1 4852 BUG_ON(ret);
b3b4aa74 4853 btrfs_release_path(path);
b9473439 4854 path->leave_spinning = 1;
5d4f98a2
YZ
4855
4856 key.objectid = bytenr;
4857 key.type = BTRFS_EXTENT_ITEM_KEY;
4858 key.offset = num_bytes;
4859
31840ae1
ZY
4860 ret = btrfs_search_slot(trans, extent_root,
4861 &key, path, -1, 1);
f3465ca4
JB
4862 if (ret) {
4863 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
4864 ", was looking for %llu\n", ret,
4865 (unsigned long long)bytenr);
b783e62d
JB
4866 if (ret > 0)
4867 btrfs_print_leaf(extent_root,
4868 path->nodes[0]);
f3465ca4 4869 }
31840ae1
ZY
4870 BUG_ON(ret);
4871 extent_slot = path->slots[0];
4872 }
7bb86316
CM
4873 } else {
4874 btrfs_print_leaf(extent_root, path->nodes[0]);
4875 WARN_ON(1);
d397712b 4876 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 4877 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 4878 (unsigned long long)bytenr,
56bec294 4879 (unsigned long long)parent,
d397712b 4880 (unsigned long long)root_objectid,
5d4f98a2
YZ
4881 (unsigned long long)owner_objectid,
4882 (unsigned long long)owner_offset);
7bb86316 4883 }
5f39d397
CM
4884
4885 leaf = path->nodes[0];
5d4f98a2
YZ
4886 item_size = btrfs_item_size_nr(leaf, extent_slot);
4887#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4888 if (item_size < sizeof(*ei)) {
4889 BUG_ON(found_extent || extent_slot != path->slots[0]);
4890 ret = convert_extent_item_v0(trans, extent_root, path,
4891 owner_objectid, 0);
4892 BUG_ON(ret < 0);
4893
b3b4aa74 4894 btrfs_release_path(path);
5d4f98a2
YZ
4895 path->leave_spinning = 1;
4896
4897 key.objectid = bytenr;
4898 key.type = BTRFS_EXTENT_ITEM_KEY;
4899 key.offset = num_bytes;
4900
4901 ret = btrfs_search_slot(trans, extent_root, &key, path,
4902 -1, 1);
4903 if (ret) {
4904 printk(KERN_ERR "umm, got %d back from search"
4905 ", was looking for %llu\n", ret,
4906 (unsigned long long)bytenr);
4907 btrfs_print_leaf(extent_root, path->nodes[0]);
4908 }
4909 BUG_ON(ret);
4910 extent_slot = path->slots[0];
4911 leaf = path->nodes[0];
4912 item_size = btrfs_item_size_nr(leaf, extent_slot);
4913 }
4914#endif
4915 BUG_ON(item_size < sizeof(*ei));
952fccac 4916 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 4917 struct btrfs_extent_item);
5d4f98a2
YZ
4918 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4919 struct btrfs_tree_block_info *bi;
4920 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4921 bi = (struct btrfs_tree_block_info *)(ei + 1);
4922 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4923 }
56bec294 4924
5d4f98a2 4925 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
4926 BUG_ON(refs < refs_to_drop);
4927 refs -= refs_to_drop;
5f39d397 4928
5d4f98a2
YZ
4929 if (refs > 0) {
4930 if (extent_op)
4931 __run_delayed_extent_op(extent_op, leaf, ei);
4932 /*
4933 * In the case of inline back ref, reference count will
4934 * be updated by remove_extent_backref
952fccac 4935 */
5d4f98a2
YZ
4936 if (iref) {
4937 BUG_ON(!found_extent);
4938 } else {
4939 btrfs_set_extent_refs(leaf, ei, refs);
4940 btrfs_mark_buffer_dirty(leaf);
4941 }
4942 if (found_extent) {
4943 ret = remove_extent_backref(trans, extent_root, path,
4944 iref, refs_to_drop,
4945 is_data);
952fccac
CM
4946 BUG_ON(ret);
4947 }
5d4f98a2 4948 } else {
5d4f98a2
YZ
4949 if (found_extent) {
4950 BUG_ON(is_data && refs_to_drop !=
4951 extent_data_ref_count(root, path, iref));
4952 if (iref) {
4953 BUG_ON(path->slots[0] != extent_slot);
4954 } else {
4955 BUG_ON(path->slots[0] != extent_slot + 1);
4956 path->slots[0] = extent_slot;
4957 num_to_del = 2;
4958 }
78fae27e 4959 }
b9473439 4960
952fccac
CM
4961 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4962 num_to_del);
31840ae1 4963 BUG_ON(ret);
b3b4aa74 4964 btrfs_release_path(path);
21af804c 4965
5d4f98a2 4966 if (is_data) {
459931ec
CM
4967 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4968 BUG_ON(ret);
d57e62b8
CM
4969 } else {
4970 invalidate_mapping_pages(info->btree_inode->i_mapping,
4971 bytenr >> PAGE_CACHE_SHIFT,
4972 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
4973 }
4974
f0486c68 4975 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
dcbdd4dc 4976 BUG_ON(ret);
a28ec197 4977 }
5caf2a00 4978 btrfs_free_path(path);
a28ec197
CM
4979 return ret;
4980}
4981
1887be66 4982/*
f0486c68 4983 * when we free an block, it is possible (and likely) that we free the last
1887be66
CM
4984 * delayed ref for that extent as well. This searches the delayed ref tree for
4985 * a given extent, and if there are no other delayed refs to be processed, it
4986 * removes it from the tree.
4987 */
4988static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4989 struct btrfs_root *root, u64 bytenr)
4990{
4991 struct btrfs_delayed_ref_head *head;
4992 struct btrfs_delayed_ref_root *delayed_refs;
4993 struct btrfs_delayed_ref_node *ref;
4994 struct rb_node *node;
f0486c68 4995 int ret = 0;
1887be66
CM
4996
4997 delayed_refs = &trans->transaction->delayed_refs;
4998 spin_lock(&delayed_refs->lock);
4999 head = btrfs_find_delayed_ref_head(trans, bytenr);
5000 if (!head)
5001 goto out;
5002
5003 node = rb_prev(&head->node.rb_node);
5004 if (!node)
5005 goto out;
5006
5007 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5008
5009 /* there are still entries for this ref, we can't drop it */
5010 if (ref->bytenr == bytenr)
5011 goto out;
5012
5d4f98a2
YZ
5013 if (head->extent_op) {
5014 if (!head->must_insert_reserved)
5015 goto out;
5016 kfree(head->extent_op);
5017 head->extent_op = NULL;
5018 }
5019
1887be66
CM
5020 /*
5021 * waiting for the lock here would deadlock. If someone else has it
5022 * locked they are already in the process of dropping it anyway
5023 */
5024 if (!mutex_trylock(&head->mutex))
5025 goto out;
5026
5027 /*
5028 * at this point we have a head with no other entries. Go
5029 * ahead and process it.
5030 */
5031 head->node.in_tree = 0;
5032 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 5033
1887be66 5034 delayed_refs->num_entries--;
a168650c
JS
5035 if (waitqueue_active(&delayed_refs->seq_wait))
5036 wake_up(&delayed_refs->seq_wait);
1887be66
CM
5037
5038 /*
5039 * we don't take a ref on the node because we're removing it from the
5040 * tree, so we just steal the ref the tree was holding.
5041 */
c3e69d58
CM
5042 delayed_refs->num_heads--;
5043 if (list_empty(&head->cluster))
5044 delayed_refs->num_heads_ready--;
5045
5046 list_del_init(&head->cluster);
1887be66
CM
5047 spin_unlock(&delayed_refs->lock);
5048
f0486c68
YZ
5049 BUG_ON(head->extent_op);
5050 if (head->must_insert_reserved)
5051 ret = 1;
5052
5053 mutex_unlock(&head->mutex);
1887be66 5054 btrfs_put_delayed_ref(&head->node);
f0486c68 5055 return ret;
1887be66
CM
5056out:
5057 spin_unlock(&delayed_refs->lock);
5058 return 0;
5059}
5060
f0486c68
YZ
5061void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5062 struct btrfs_root *root,
5063 struct extent_buffer *buf,
66d7e7f0 5064 u64 parent, int last_ref, int for_cow)
f0486c68 5065{
f0486c68
YZ
5066 struct btrfs_block_group_cache *cache = NULL;
5067 int ret;
5068
5069 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
66d7e7f0
AJ
5070 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5071 buf->start, buf->len,
5072 parent, root->root_key.objectid,
5073 btrfs_header_level(buf),
5074 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
f0486c68
YZ
5075 BUG_ON(ret);
5076 }
5077
5078 if (!last_ref)
5079 return;
5080
f0486c68 5081 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
f0486c68
YZ
5082
5083 if (btrfs_header_generation(buf) == trans->transid) {
5084 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5085 ret = check_ref_cleanup(trans, root, buf->start);
5086 if (!ret)
37be25bc 5087 goto out;
f0486c68
YZ
5088 }
5089
5090 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5091 pin_down_extent(root, cache, buf->start, buf->len, 1);
37be25bc 5092 goto out;
f0486c68
YZ
5093 }
5094
5095 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5096
5097 btrfs_add_free_space(cache, buf->start, buf->len);
fb25e914 5098 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
f0486c68
YZ
5099 }
5100out:
a826d6dc
JB
5101 /*
5102 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5103 * anymore.
5104 */
5105 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
f0486c68
YZ
5106 btrfs_put_block_group(cache);
5107}
5108
66d7e7f0
AJ
5109int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5110 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5111 u64 owner, u64 offset, int for_cow)
925baedd
CM
5112{
5113 int ret;
66d7e7f0 5114 struct btrfs_fs_info *fs_info = root->fs_info;
925baedd 5115
56bec294
CM
5116 /*
5117 * tree log blocks never actually go into the extent allocation
5118 * tree, just update pinning info and exit early.
56bec294 5119 */
5d4f98a2
YZ
5120 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5121 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 5122 /* unlocks the pinned mutex */
11833d66 5123 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 5124 ret = 0;
5d4f98a2 5125 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
66d7e7f0
AJ
5126 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5127 num_bytes,
5d4f98a2 5128 parent, root_objectid, (int)owner,
66d7e7f0 5129 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
1887be66 5130 BUG_ON(ret);
5d4f98a2 5131 } else {
66d7e7f0
AJ
5132 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5133 num_bytes,
5134 parent, root_objectid, owner,
5135 offset, BTRFS_DROP_DELAYED_REF,
5136 NULL, for_cow);
5d4f98a2 5137 BUG_ON(ret);
56bec294 5138 }
925baedd
CM
5139 return ret;
5140}
5141
87ee04eb
CM
5142static u64 stripe_align(struct btrfs_root *root, u64 val)
5143{
5144 u64 mask = ((u64)root->stripesize - 1);
5145 u64 ret = (val + mask) & ~mask;
5146 return ret;
5147}
5148
817d52f8
JB
5149/*
5150 * when we wait for progress in the block group caching, its because
5151 * our allocation attempt failed at least once. So, we must sleep
5152 * and let some progress happen before we try again.
5153 *
5154 * This function will sleep at least once waiting for new free space to
5155 * show up, and then it will check the block group free space numbers
5156 * for our min num_bytes. Another option is to have it go ahead
5157 * and look in the rbtree for a free extent of a given size, but this
5158 * is a good start.
5159 */
5160static noinline int
5161wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5162 u64 num_bytes)
5163{
11833d66 5164 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
5165 DEFINE_WAIT(wait);
5166
11833d66
YZ
5167 caching_ctl = get_caching_control(cache);
5168 if (!caching_ctl)
817d52f8 5169 return 0;
817d52f8 5170
11833d66 5171 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
34d52cb6 5172 (cache->free_space_ctl->free_space >= num_bytes));
11833d66
YZ
5173
5174 put_caching_control(caching_ctl);
5175 return 0;
5176}
5177
5178static noinline int
5179wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5180{
5181 struct btrfs_caching_control *caching_ctl;
5182 DEFINE_WAIT(wait);
5183
5184 caching_ctl = get_caching_control(cache);
5185 if (!caching_ctl)
5186 return 0;
5187
5188 wait_event(caching_ctl->wait, block_group_cache_done(cache));
5189
5190 put_caching_control(caching_ctl);
817d52f8
JB
5191 return 0;
5192}
5193
b742bb82
YZ
5194static int get_block_group_index(struct btrfs_block_group_cache *cache)
5195{
5196 int index;
5197 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5198 index = 0;
5199 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5200 index = 1;
5201 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5202 index = 2;
5203 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5204 index = 3;
5205 else
5206 index = 4;
5207 return index;
5208}
5209
817d52f8 5210enum btrfs_loop_type {
ccf0e725 5211 LOOP_FIND_IDEAL = 0,
817d52f8
JB
5212 LOOP_CACHING_NOWAIT = 1,
5213 LOOP_CACHING_WAIT = 2,
5214 LOOP_ALLOC_CHUNK = 3,
5215 LOOP_NO_EMPTY_SIZE = 4,
5216};
5217
fec577fb
CM
5218/*
5219 * walks the btree of allocated extents and find a hole of a given size.
5220 * The key ins is changed to record the hole:
5221 * ins->objectid == block start
62e2749e 5222 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
5223 * ins->offset == number of blocks
5224 * Any available blocks before search_start are skipped.
5225 */
d397712b 5226static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
5227 struct btrfs_root *orig_root,
5228 u64 num_bytes, u64 empty_size,
5229 u64 search_start, u64 search_end,
5230 u64 hint_byte, struct btrfs_key *ins,
e0f54067 5231 u64 data)
fec577fb 5232{
80eb234a 5233 int ret = 0;
d397712b 5234 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 5235 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 5236 struct btrfs_block_group_cache *block_group = NULL;
274bd4fb 5237 struct btrfs_block_group_cache *used_block_group;
239b14b3 5238 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 5239 int allowed_chunk_alloc = 0;
ccf0e725 5240 int done_chunk_alloc = 0;
80eb234a 5241 struct btrfs_space_info *space_info;
fa9c0d79 5242 int loop = 0;
f0486c68 5243 int index = 0;
fb25e914
JB
5244 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5245 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
817d52f8 5246 bool found_uncached_bg = false;
0a24325e 5247 bool failed_cluster_refill = false;
1cdda9b8 5248 bool failed_alloc = false;
67377734 5249 bool use_cluster = true;
60d2adbb 5250 bool have_caching_bg = false;
ccf0e725
JB
5251 u64 ideal_cache_percent = 0;
5252 u64 ideal_cache_offset = 0;
fec577fb 5253
db94535d 5254 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 5255 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
5256 ins->objectid = 0;
5257 ins->offset = 0;
b1a4d965 5258
3f7de037
JB
5259 trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5260
2552d17e 5261 space_info = __find_space_info(root->fs_info, data);
1b1d1f66 5262 if (!space_info) {
e0f54067 5263 printk(KERN_ERR "No space info for %llu\n", data);
1b1d1f66
JB
5264 return -ENOSPC;
5265 }
2552d17e 5266
67377734
JB
5267 /*
5268 * If the space info is for both data and metadata it means we have a
5269 * small filesystem and we can't use the clustering stuff.
5270 */
5271 if (btrfs_mixed_space_info(space_info))
5272 use_cluster = false;
5273
0ef3e66b
CM
5274 if (orig_root->ref_cows || empty_size)
5275 allowed_chunk_alloc = 1;
5276
67377734 5277 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
fa9c0d79 5278 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
5279 if (!btrfs_test_opt(root, SSD))
5280 empty_cluster = 64 * 1024;
239b14b3
CM
5281 }
5282
67377734
JB
5283 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5284 btrfs_test_opt(root, SSD)) {
fa9c0d79
CM
5285 last_ptr = &root->fs_info->data_alloc_cluster;
5286 }
0f9dd46c 5287
239b14b3 5288 if (last_ptr) {
fa9c0d79
CM
5289 spin_lock(&last_ptr->lock);
5290 if (last_ptr->block_group)
5291 hint_byte = last_ptr->window_start;
5292 spin_unlock(&last_ptr->lock);
239b14b3 5293 }
fa9c0d79 5294
a061fc8d 5295 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 5296 search_start = max(search_start, hint_byte);
0b86a832 5297
817d52f8 5298 if (!last_ptr)
fa9c0d79 5299 empty_cluster = 0;
fa9c0d79 5300
2552d17e 5301 if (search_start == hint_byte) {
ccf0e725 5302ideal_cache:
2552d17e
JB
5303 block_group = btrfs_lookup_block_group(root->fs_info,
5304 search_start);
274bd4fb 5305 used_block_group = block_group;
817d52f8
JB
5306 /*
5307 * we don't want to use the block group if it doesn't match our
5308 * allocation bits, or if its not cached.
ccf0e725
JB
5309 *
5310 * However if we are re-searching with an ideal block group
5311 * picked out then we don't care that the block group is cached.
817d52f8
JB
5312 */
5313 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
5314 (block_group->cached != BTRFS_CACHE_NO ||
5315 search_start == ideal_cache_offset)) {
2552d17e 5316 down_read(&space_info->groups_sem);
44fb5511
CM
5317 if (list_empty(&block_group->list) ||
5318 block_group->ro) {
5319 /*
5320 * someone is removing this block group,
5321 * we can't jump into the have_block_group
5322 * target because our list pointers are not
5323 * valid
5324 */
5325 btrfs_put_block_group(block_group);
5326 up_read(&space_info->groups_sem);
ccf0e725 5327 } else {
b742bb82 5328 index = get_block_group_index(block_group);
44fb5511 5329 goto have_block_group;
ccf0e725 5330 }
2552d17e 5331 } else if (block_group) {
fa9c0d79 5332 btrfs_put_block_group(block_group);
2552d17e 5333 }
42e70e7a 5334 }
2552d17e 5335search:
60d2adbb 5336 have_caching_bg = false;
80eb234a 5337 down_read(&space_info->groups_sem);
b742bb82
YZ
5338 list_for_each_entry(block_group, &space_info->block_groups[index],
5339 list) {
6226cb0a 5340 u64 offset;
817d52f8 5341 int cached;
8a1413a2 5342
274bd4fb 5343 used_block_group = block_group;
11dfe35a 5344 btrfs_get_block_group(block_group);
2552d17e 5345 search_start = block_group->key.objectid;
42e70e7a 5346
83a50de9
CM
5347 /*
5348 * this can happen if we end up cycling through all the
5349 * raid types, but we want to make sure we only allocate
5350 * for the proper type.
5351 */
5352 if (!block_group_bits(block_group, data)) {
5353 u64 extra = BTRFS_BLOCK_GROUP_DUP |
5354 BTRFS_BLOCK_GROUP_RAID1 |
5355 BTRFS_BLOCK_GROUP_RAID10;
5356
5357 /*
5358 * if they asked for extra copies and this block group
5359 * doesn't provide them, bail. This does allow us to
5360 * fill raid0 from raid1.
5361 */
5362 if ((data & extra) && !(block_group->flags & extra))
5363 goto loop;
5364 }
5365
2552d17e 5366have_block_group:
291c7d2f
JB
5367 cached = block_group_cache_done(block_group);
5368 if (unlikely(!cached)) {
ccf0e725
JB
5369 u64 free_percent;
5370
291c7d2f 5371 found_uncached_bg = true;
b8399dee
JB
5372 ret = cache_block_group(block_group, trans,
5373 orig_root, 1);
9d66e233 5374 if (block_group->cached == BTRFS_CACHE_FINISHED)
291c7d2f 5375 goto alloc;
9d66e233 5376
ccf0e725
JB
5377 free_percent = btrfs_block_group_used(&block_group->item);
5378 free_percent *= 100;
5379 free_percent = div64_u64(free_percent,
5380 block_group->key.offset);
5381 free_percent = 100 - free_percent;
5382 if (free_percent > ideal_cache_percent &&
5383 likely(!block_group->ro)) {
5384 ideal_cache_offset = block_group->key.objectid;
5385 ideal_cache_percent = free_percent;
5386 }
5387
817d52f8 5388 /*
bab39bf9
JB
5389 * The caching workers are limited to 2 threads, so we
5390 * can queue as much work as we care to.
817d52f8 5391 */
bab39bf9 5392 if (loop > LOOP_FIND_IDEAL) {
b8399dee
JB
5393 ret = cache_block_group(block_group, trans,
5394 orig_root, 0);
817d52f8 5395 BUG_ON(ret);
2552d17e 5396 }
817d52f8 5397
ccf0e725
JB
5398 /*
5399 * If loop is set for cached only, try the next block
5400 * group.
5401 */
5402 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
5403 goto loop;
5404 }
5405
291c7d2f 5406alloc:
ea6a478e 5407 if (unlikely(block_group->ro))
2552d17e 5408 goto loop;
0f9dd46c 5409
0a24325e 5410 /*
062c05c4
AO
5411 * Ok we want to try and use the cluster allocator, so
5412 * lets look there
0a24325e 5413 */
062c05c4 5414 if (last_ptr) {
fa9c0d79
CM
5415 /*
5416 * the refill lock keeps out other
5417 * people trying to start a new cluster
5418 */
5419 spin_lock(&last_ptr->refill_lock);
274bd4fb
AO
5420 used_block_group = last_ptr->block_group;
5421 if (used_block_group != block_group &&
5422 (!used_block_group ||
5423 used_block_group->ro ||
5424 !block_group_bits(used_block_group, data))) {
5425 used_block_group = block_group;
44fb5511 5426 goto refill_cluster;
274bd4fb
AO
5427 }
5428
5429 if (used_block_group != block_group)
5430 btrfs_get_block_group(used_block_group);
44fb5511 5431
274bd4fb
AO
5432 offset = btrfs_alloc_from_cluster(used_block_group,
5433 last_ptr, num_bytes, used_block_group->key.objectid);
fa9c0d79
CM
5434 if (offset) {
5435 /* we have a block, we're done */
5436 spin_unlock(&last_ptr->refill_lock);
3f7de037
JB
5437 trace_btrfs_reserve_extent_cluster(root,
5438 block_group, search_start, num_bytes);
fa9c0d79
CM
5439 goto checks;
5440 }
5441
274bd4fb
AO
5442 WARN_ON(last_ptr->block_group != used_block_group);
5443 if (used_block_group != block_group) {
5444 btrfs_put_block_group(used_block_group);
5445 used_block_group = block_group;
fa9c0d79 5446 }
44fb5511 5447refill_cluster:
274bd4fb 5448 BUG_ON(used_block_group != block_group);
062c05c4
AO
5449 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5450 * set up a new clusters, so lets just skip it
5451 * and let the allocator find whatever block
5452 * it can find. If we reach this point, we
5453 * will have tried the cluster allocator
5454 * plenty of times and not have found
5455 * anything, so we are likely way too
5456 * fragmented for the clustering stuff to find
a5f6f719
AO
5457 * anything.
5458 *
5459 * However, if the cluster is taken from the
5460 * current block group, release the cluster
5461 * first, so that we stand a better chance of
5462 * succeeding in the unclustered
5463 * allocation. */
5464 if (loop >= LOOP_NO_EMPTY_SIZE &&
5465 last_ptr->block_group != block_group) {
062c05c4
AO
5466 spin_unlock(&last_ptr->refill_lock);
5467 goto unclustered_alloc;
5468 }
5469
fa9c0d79
CM
5470 /*
5471 * this cluster didn't work out, free it and
5472 * start over
5473 */
5474 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5475
a5f6f719
AO
5476 if (loop >= LOOP_NO_EMPTY_SIZE) {
5477 spin_unlock(&last_ptr->refill_lock);
5478 goto unclustered_alloc;
5479 }
5480
fa9c0d79 5481 /* allocate a cluster in this block group */
451d7585 5482 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79 5483 block_group, last_ptr,
1b22bad7 5484 search_start, num_bytes,
fa9c0d79
CM
5485 empty_cluster + empty_size);
5486 if (ret == 0) {
5487 /*
5488 * now pull our allocation out of this
5489 * cluster
5490 */
5491 offset = btrfs_alloc_from_cluster(block_group,
5492 last_ptr, num_bytes,
5493 search_start);
5494 if (offset) {
5495 /* we found one, proceed */
5496 spin_unlock(&last_ptr->refill_lock);
3f7de037
JB
5497 trace_btrfs_reserve_extent_cluster(root,
5498 block_group, search_start,
5499 num_bytes);
fa9c0d79
CM
5500 goto checks;
5501 }
0a24325e
JB
5502 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5503 && !failed_cluster_refill) {
817d52f8
JB
5504 spin_unlock(&last_ptr->refill_lock);
5505
0a24325e 5506 failed_cluster_refill = true;
817d52f8
JB
5507 wait_block_group_cache_progress(block_group,
5508 num_bytes + empty_cluster + empty_size);
5509 goto have_block_group;
fa9c0d79 5510 }
817d52f8 5511
fa9c0d79
CM
5512 /*
5513 * at this point we either didn't find a cluster
5514 * or we weren't able to allocate a block from our
5515 * cluster. Free the cluster we've been trying
5516 * to use, and go to the next block group
5517 */
0a24325e 5518 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 5519 spin_unlock(&last_ptr->refill_lock);
0a24325e 5520 goto loop;
fa9c0d79
CM
5521 }
5522
062c05c4 5523unclustered_alloc:
a5f6f719
AO
5524 spin_lock(&block_group->free_space_ctl->tree_lock);
5525 if (cached &&
5526 block_group->free_space_ctl->free_space <
5527 num_bytes + empty_cluster + empty_size) {
5528 spin_unlock(&block_group->free_space_ctl->tree_lock);
5529 goto loop;
5530 }
5531 spin_unlock(&block_group->free_space_ctl->tree_lock);
5532
6226cb0a
JB
5533 offset = btrfs_find_space_for_alloc(block_group, search_start,
5534 num_bytes, empty_size);
1cdda9b8
JB
5535 /*
5536 * If we didn't find a chunk, and we haven't failed on this
5537 * block group before, and this block group is in the middle of
5538 * caching and we are ok with waiting, then go ahead and wait
5539 * for progress to be made, and set failed_alloc to true.
5540 *
5541 * If failed_alloc is true then we've already waited on this
5542 * block group once and should move on to the next block group.
5543 */
5544 if (!offset && !failed_alloc && !cached &&
5545 loop > LOOP_CACHING_NOWAIT) {
817d52f8 5546 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
5547 num_bytes + empty_size);
5548 failed_alloc = true;
817d52f8 5549 goto have_block_group;
1cdda9b8 5550 } else if (!offset) {
60d2adbb
MX
5551 if (!cached)
5552 have_caching_bg = true;
1cdda9b8 5553 goto loop;
817d52f8 5554 }
fa9c0d79 5555checks:
6226cb0a 5556 search_start = stripe_align(root, offset);
2552d17e 5557 /* move on to the next group */
6226cb0a 5558 if (search_start + num_bytes >= search_end) {
274bd4fb 5559 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5560 goto loop;
6226cb0a 5561 }
25179201 5562
2552d17e
JB
5563 /* move on to the next group */
5564 if (search_start + num_bytes >
274bd4fb
AO
5565 used_block_group->key.objectid + used_block_group->key.offset) {
5566 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5567 goto loop;
6226cb0a 5568 }
f5a31e16 5569
f0486c68 5570 if (offset < search_start)
274bd4fb 5571 btrfs_add_free_space(used_block_group, offset,
f0486c68
YZ
5572 search_start - offset);
5573 BUG_ON(offset > search_start);
2552d17e 5574
274bd4fb 5575 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
fb25e914 5576 alloc_type);
f0486c68 5577 if (ret == -EAGAIN) {
274bd4fb 5578 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5579 goto loop;
0f9dd46c 5580 }
0b86a832 5581
f0486c68 5582 /* we are all good, lets return */
2552d17e
JB
5583 ins->objectid = search_start;
5584 ins->offset = num_bytes;
d2fb3437 5585
3f7de037
JB
5586 trace_btrfs_reserve_extent(orig_root, block_group,
5587 search_start, num_bytes);
6226cb0a 5588 if (offset < search_start)
274bd4fb 5589 btrfs_add_free_space(used_block_group, offset,
6226cb0a
JB
5590 search_start - offset);
5591 BUG_ON(offset > search_start);
274bd4fb
AO
5592 if (used_block_group != block_group)
5593 btrfs_put_block_group(used_block_group);
d82a6f1d 5594 btrfs_put_block_group(block_group);
2552d17e
JB
5595 break;
5596loop:
0a24325e 5597 failed_cluster_refill = false;
1cdda9b8 5598 failed_alloc = false;
b742bb82 5599 BUG_ON(index != get_block_group_index(block_group));
274bd4fb
AO
5600 if (used_block_group != block_group)
5601 btrfs_put_block_group(used_block_group);
fa9c0d79 5602 btrfs_put_block_group(block_group);
2552d17e
JB
5603 }
5604 up_read(&space_info->groups_sem);
5605
60d2adbb
MX
5606 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5607 goto search;
5608
b742bb82
YZ
5609 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5610 goto search;
5611
ccf0e725
JB
5612 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5613 * for them to make caching progress. Also
5614 * determine the best possible bg to cache
5615 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5616 * caching kthreads as we move along
817d52f8
JB
5617 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5618 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5619 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5620 * again
fa9c0d79 5621 */
723bda20 5622 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
b742bb82 5623 index = 0;
ccf0e725 5624 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 5625 found_uncached_bg = false;
ccf0e725 5626 loop++;
bab39bf9 5627 if (!ideal_cache_percent)
817d52f8 5628 goto search;
ccf0e725
JB
5629
5630 /*
5631 * 1 of the following 2 things have happened so far
5632 *
5633 * 1) We found an ideal block group for caching that
5634 * is mostly full and will cache quickly, so we might
5635 * as well wait for it.
5636 *
5637 * 2) We searched for cached only and we didn't find
5638 * anything, and we didn't start any caching kthreads
5639 * either, so chances are we will loop through and
5640 * start a couple caching kthreads, and then come back
5641 * around and just wait for them. This will be slower
5642 * because we will have 2 caching kthreads reading at
5643 * the same time when we could have just started one
5644 * and waited for it to get far enough to give us an
5645 * allocation, so go ahead and go to the wait caching
5646 * loop.
5647 */
5648 loop = LOOP_CACHING_WAIT;
5649 search_start = ideal_cache_offset;
5650 ideal_cache_percent = 0;
5651 goto ideal_cache;
5652 } else if (loop == LOOP_FIND_IDEAL) {
5653 /*
5654 * Didn't find a uncached bg, wait on anything we find
5655 * next.
5656 */
5657 loop = LOOP_CACHING_WAIT;
5658 goto search;
5659 }
5660
723bda20 5661 loop++;
817d52f8
JB
5662
5663 if (loop == LOOP_ALLOC_CHUNK) {
723bda20
JB
5664 if (allowed_chunk_alloc) {
5665 ret = do_chunk_alloc(trans, root, num_bytes +
5666 2 * 1024 * 1024, data,
5667 CHUNK_ALLOC_LIMITED);
5668 allowed_chunk_alloc = 0;
5669 if (ret == 1)
5670 done_chunk_alloc = 1;
5671 } else if (!done_chunk_alloc &&
5672 space_info->force_alloc ==
5673 CHUNK_ALLOC_NO_FORCE) {
5674 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5675 }
2552d17e 5676
723bda20
JB
5677 /*
5678 * We didn't allocate a chunk, go ahead and drop the
5679 * empty size and loop again.
5680 */
5681 if (!done_chunk_alloc)
5682 loop = LOOP_NO_EMPTY_SIZE;
2552d17e
JB
5683 }
5684
723bda20
JB
5685 if (loop == LOOP_NO_EMPTY_SIZE) {
5686 empty_size = 0;
5687 empty_cluster = 0;
fa9c0d79 5688 }
723bda20
JB
5689
5690 goto search;
2552d17e
JB
5691 } else if (!ins->objectid) {
5692 ret = -ENOSPC;
d82a6f1d 5693 } else if (ins->objectid) {
80eb234a 5694 ret = 0;
be744175 5695 }
be744175 5696
0f70abe2 5697 return ret;
fec577fb 5698}
ec44a35c 5699
9ed74f2d
JB
5700static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5701 int dump_block_groups)
0f9dd46c
JB
5702{
5703 struct btrfs_block_group_cache *cache;
b742bb82 5704 int index = 0;
0f9dd46c 5705
9ed74f2d 5706 spin_lock(&info->lock);
fb25e914
JB
5707 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5708 (unsigned long long)info->flags,
d397712b 5709 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d 5710 info->bytes_pinned - info->bytes_reserved -
8929ecfa 5711 info->bytes_readonly),
d397712b 5712 (info->full) ? "" : "not ");
8929ecfa
YZ
5713 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5714 "reserved=%llu, may_use=%llu, readonly=%llu\n",
21380931 5715 (unsigned long long)info->total_bytes,
8929ecfa 5716 (unsigned long long)info->bytes_used,
21380931 5717 (unsigned long long)info->bytes_pinned,
8929ecfa 5718 (unsigned long long)info->bytes_reserved,
21380931 5719 (unsigned long long)info->bytes_may_use,
8929ecfa 5720 (unsigned long long)info->bytes_readonly);
9ed74f2d
JB
5721 spin_unlock(&info->lock);
5722
5723 if (!dump_block_groups)
5724 return;
0f9dd46c 5725
80eb234a 5726 down_read(&info->groups_sem);
b742bb82
YZ
5727again:
5728 list_for_each_entry(cache, &info->block_groups[index], list) {
0f9dd46c 5729 spin_lock(&cache->lock);
d397712b
CM
5730 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5731 "%llu pinned %llu reserved\n",
5732 (unsigned long long)cache->key.objectid,
5733 (unsigned long long)cache->key.offset,
5734 (unsigned long long)btrfs_block_group_used(&cache->item),
5735 (unsigned long long)cache->pinned,
5736 (unsigned long long)cache->reserved);
0f9dd46c
JB
5737 btrfs_dump_free_space(cache, bytes);
5738 spin_unlock(&cache->lock);
5739 }
b742bb82
YZ
5740 if (++index < BTRFS_NR_RAID_TYPES)
5741 goto again;
80eb234a 5742 up_read(&info->groups_sem);
0f9dd46c 5743}
e8569813 5744
11833d66
YZ
5745int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5746 struct btrfs_root *root,
5747 u64 num_bytes, u64 min_alloc_size,
5748 u64 empty_size, u64 hint_byte,
5749 u64 search_end, struct btrfs_key *ins,
5750 u64 data)
fec577fb
CM
5751{
5752 int ret;
fbdc762b 5753 u64 search_start = 0;
925baedd 5754
6a63209f 5755 data = btrfs_get_alloc_profile(root, data);
98d20f67 5756again:
0ef3e66b
CM
5757 /*
5758 * the only place that sets empty_size is btrfs_realloc_node, which
5759 * is not called recursively on allocations
5760 */
83d3c969 5761 if (empty_size || root->ref_cows)
6324fbf3 5762 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88
CM
5763 num_bytes + 2 * 1024 * 1024, data,
5764 CHUNK_ALLOC_NO_FORCE);
0b86a832 5765
db94535d
CM
5766 WARN_ON(num_bytes < root->sectorsize);
5767 ret = find_free_extent(trans, root, num_bytes, empty_size,
f0486c68
YZ
5768 search_start, search_end, hint_byte,
5769 ins, data);
3b951516 5770
98d20f67
CM
5771 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5772 num_bytes = num_bytes >> 1;
0f9dd46c 5773 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 5774 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b 5775 do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88 5776 num_bytes, data, CHUNK_ALLOC_FORCE);
98d20f67
CM
5777 goto again;
5778 }
91435650 5779 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
0f9dd46c
JB
5780 struct btrfs_space_info *sinfo;
5781
5782 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
5783 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5784 "wanted %llu\n", (unsigned long long)data,
5785 (unsigned long long)num_bytes);
9ed74f2d 5786 dump_space_info(sinfo, num_bytes, 1);
925baedd 5787 }
0f9dd46c 5788
1abe9b8a 5789 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5790
0f9dd46c 5791 return ret;
e6dcd2dc
CM
5792}
5793
e688b725
CM
5794static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5795 u64 start, u64 len, int pin)
65b51a00 5796{
0f9dd46c 5797 struct btrfs_block_group_cache *cache;
1f3c79a2 5798 int ret = 0;
0f9dd46c 5799
0f9dd46c
JB
5800 cache = btrfs_lookup_block_group(root->fs_info, start);
5801 if (!cache) {
d397712b
CM
5802 printk(KERN_ERR "Unable to find block group for %llu\n",
5803 (unsigned long long)start);
0f9dd46c
JB
5804 return -ENOSPC;
5805 }
1f3c79a2 5806
5378e607
LD
5807 if (btrfs_test_opt(root, DISCARD))
5808 ret = btrfs_discard_extent(root, start, len, NULL);
1f3c79a2 5809
e688b725
CM
5810 if (pin)
5811 pin_down_extent(root, cache, start, len, 1);
5812 else {
5813 btrfs_add_free_space(cache, start, len);
5814 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5815 }
fa9c0d79 5816 btrfs_put_block_group(cache);
817d52f8 5817
1abe9b8a 5818 trace_btrfs_reserved_extent_free(root, start, len);
5819
e6dcd2dc
CM
5820 return ret;
5821}
5822
e688b725
CM
5823int btrfs_free_reserved_extent(struct btrfs_root *root,
5824 u64 start, u64 len)
5825{
5826 return __btrfs_free_reserved_extent(root, start, len, 0);
5827}
5828
5829int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5830 u64 start, u64 len)
5831{
5832 return __btrfs_free_reserved_extent(root, start, len, 1);
5833}
5834
5d4f98a2
YZ
5835static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5836 struct btrfs_root *root,
5837 u64 parent, u64 root_objectid,
5838 u64 flags, u64 owner, u64 offset,
5839 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
5840{
5841 int ret;
5d4f98a2 5842 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 5843 struct btrfs_extent_item *extent_item;
5d4f98a2 5844 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 5845 struct btrfs_path *path;
5d4f98a2
YZ
5846 struct extent_buffer *leaf;
5847 int type;
5848 u32 size;
26b8003f 5849
5d4f98a2
YZ
5850 if (parent > 0)
5851 type = BTRFS_SHARED_DATA_REF_KEY;
5852 else
5853 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 5854
5d4f98a2 5855 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
5856
5857 path = btrfs_alloc_path();
db5b493a
TI
5858 if (!path)
5859 return -ENOMEM;
47e4bb98 5860
b9473439 5861 path->leave_spinning = 1;
5d4f98a2
YZ
5862 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5863 ins, size);
ccd467d6 5864 BUG_ON(ret);
0f9dd46c 5865
5d4f98a2
YZ
5866 leaf = path->nodes[0];
5867 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 5868 struct btrfs_extent_item);
5d4f98a2
YZ
5869 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5870 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5871 btrfs_set_extent_flags(leaf, extent_item,
5872 flags | BTRFS_EXTENT_FLAG_DATA);
5873
5874 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5875 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5876 if (parent > 0) {
5877 struct btrfs_shared_data_ref *ref;
5878 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5879 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5880 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5881 } else {
5882 struct btrfs_extent_data_ref *ref;
5883 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5884 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5885 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5886 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5887 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5888 }
47e4bb98
CM
5889
5890 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 5891 btrfs_free_path(path);
f510cfec 5892
f0486c68 5893 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
f5947066 5894 if (ret) {
d397712b
CM
5895 printk(KERN_ERR "btrfs update block group failed for %llu "
5896 "%llu\n", (unsigned long long)ins->objectid,
5897 (unsigned long long)ins->offset);
f5947066
CM
5898 BUG();
5899 }
e6dcd2dc
CM
5900 return ret;
5901}
5902
5d4f98a2
YZ
5903static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5904 struct btrfs_root *root,
5905 u64 parent, u64 root_objectid,
5906 u64 flags, struct btrfs_disk_key *key,
5907 int level, struct btrfs_key *ins)
e6dcd2dc
CM
5908{
5909 int ret;
5d4f98a2
YZ
5910 struct btrfs_fs_info *fs_info = root->fs_info;
5911 struct btrfs_extent_item *extent_item;
5912 struct btrfs_tree_block_info *block_info;
5913 struct btrfs_extent_inline_ref *iref;
5914 struct btrfs_path *path;
5915 struct extent_buffer *leaf;
5916 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 5917
5d4f98a2 5918 path = btrfs_alloc_path();
d8926bb3
MF
5919 if (!path)
5920 return -ENOMEM;
56bec294 5921
5d4f98a2
YZ
5922 path->leave_spinning = 1;
5923 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5924 ins, size);
56bec294 5925 BUG_ON(ret);
5d4f98a2
YZ
5926
5927 leaf = path->nodes[0];
5928 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5929 struct btrfs_extent_item);
5930 btrfs_set_extent_refs(leaf, extent_item, 1);
5931 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5932 btrfs_set_extent_flags(leaf, extent_item,
5933 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5934 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5935
5936 btrfs_set_tree_block_key(leaf, block_info, key);
5937 btrfs_set_tree_block_level(leaf, block_info, level);
5938
5939 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5940 if (parent > 0) {
5941 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5942 btrfs_set_extent_inline_ref_type(leaf, iref,
5943 BTRFS_SHARED_BLOCK_REF_KEY);
5944 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5945 } else {
5946 btrfs_set_extent_inline_ref_type(leaf, iref,
5947 BTRFS_TREE_BLOCK_REF_KEY);
5948 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5949 }
5950
5951 btrfs_mark_buffer_dirty(leaf);
5952 btrfs_free_path(path);
5953
f0486c68 5954 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5d4f98a2
YZ
5955 if (ret) {
5956 printk(KERN_ERR "btrfs update block group failed for %llu "
5957 "%llu\n", (unsigned long long)ins->objectid,
5958 (unsigned long long)ins->offset);
5959 BUG();
5960 }
5961 return ret;
5962}
5963
5964int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5965 struct btrfs_root *root,
5966 u64 root_objectid, u64 owner,
5967 u64 offset, struct btrfs_key *ins)
5968{
5969 int ret;
5970
5971 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5972
66d7e7f0
AJ
5973 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
5974 ins->offset, 0,
5975 root_objectid, owner, offset,
5976 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
e6dcd2dc
CM
5977 return ret;
5978}
e02119d5
CM
5979
5980/*
5981 * this is used by the tree logging recovery code. It records that
5982 * an extent has been allocated and makes sure to clear the free
5983 * space cache bits as well
5984 */
5d4f98a2
YZ
5985int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5986 struct btrfs_root *root,
5987 u64 root_objectid, u64 owner, u64 offset,
5988 struct btrfs_key *ins)
e02119d5
CM
5989{
5990 int ret;
5991 struct btrfs_block_group_cache *block_group;
11833d66
YZ
5992 struct btrfs_caching_control *caching_ctl;
5993 u64 start = ins->objectid;
5994 u64 num_bytes = ins->offset;
e02119d5 5995
e02119d5 5996 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
b8399dee 5997 cache_block_group(block_group, trans, NULL, 0);
11833d66 5998 caching_ctl = get_caching_control(block_group);
e02119d5 5999
11833d66
YZ
6000 if (!caching_ctl) {
6001 BUG_ON(!block_group_cache_done(block_group));
6002 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6003 BUG_ON(ret);
6004 } else {
6005 mutex_lock(&caching_ctl->mutex);
6006
6007 if (start >= caching_ctl->progress) {
6008 ret = add_excluded_extent(root, start, num_bytes);
6009 BUG_ON(ret);
6010 } else if (start + num_bytes <= caching_ctl->progress) {
6011 ret = btrfs_remove_free_space(block_group,
6012 start, num_bytes);
6013 BUG_ON(ret);
6014 } else {
6015 num_bytes = caching_ctl->progress - start;
6016 ret = btrfs_remove_free_space(block_group,
6017 start, num_bytes);
6018 BUG_ON(ret);
6019
6020 start = caching_ctl->progress;
6021 num_bytes = ins->objectid + ins->offset -
6022 caching_ctl->progress;
6023 ret = add_excluded_extent(root, start, num_bytes);
6024 BUG_ON(ret);
6025 }
6026
6027 mutex_unlock(&caching_ctl->mutex);
6028 put_caching_control(caching_ctl);
6029 }
6030
fb25e914
JB
6031 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6032 RESERVE_ALLOC_NO_ACCOUNT);
f0486c68 6033 BUG_ON(ret);
fa9c0d79 6034 btrfs_put_block_group(block_group);
5d4f98a2
YZ
6035 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6036 0, owner, offset, ins, 1);
e02119d5
CM
6037 return ret;
6038}
6039
65b51a00
CM
6040struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6041 struct btrfs_root *root,
4008c04a
CM
6042 u64 bytenr, u32 blocksize,
6043 int level)
65b51a00
CM
6044{
6045 struct extent_buffer *buf;
6046
6047 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6048 if (!buf)
6049 return ERR_PTR(-ENOMEM);
6050 btrfs_set_header_generation(buf, trans->transid);
85d4e461 6051 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
65b51a00
CM
6052 btrfs_tree_lock(buf);
6053 clean_tree_block(trans, root, buf);
b4ce94de
CM
6054
6055 btrfs_set_lock_blocking(buf);
65b51a00 6056 btrfs_set_buffer_uptodate(buf);
b4ce94de 6057
d0c803c4 6058 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
6059 /*
6060 * we allow two log transactions at a time, use different
6061 * EXENT bit to differentiate dirty pages.
6062 */
6063 if (root->log_transid % 2 == 0)
6064 set_extent_dirty(&root->dirty_log_pages, buf->start,
6065 buf->start + buf->len - 1, GFP_NOFS);
6066 else
6067 set_extent_new(&root->dirty_log_pages, buf->start,
6068 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
6069 } else {
6070 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 6071 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 6072 }
65b51a00 6073 trans->blocks_used++;
b4ce94de 6074 /* this returns a buffer locked for blocking */
65b51a00
CM
6075 return buf;
6076}
6077
f0486c68
YZ
6078static struct btrfs_block_rsv *
6079use_block_rsv(struct btrfs_trans_handle *trans,
6080 struct btrfs_root *root, u32 blocksize)
6081{
6082 struct btrfs_block_rsv *block_rsv;
68a82277 6083 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
f0486c68
YZ
6084 int ret;
6085
6086 block_rsv = get_block_rsv(trans, root);
6087
6088 if (block_rsv->size == 0) {
36ba022a 6089 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
68a82277
JB
6090 /*
6091 * If we couldn't reserve metadata bytes try and use some from
6092 * the global reserve.
6093 */
6094 if (ret && block_rsv != global_rsv) {
6095 ret = block_rsv_use_bytes(global_rsv, blocksize);
6096 if (!ret)
6097 return global_rsv;
f0486c68 6098 return ERR_PTR(ret);
68a82277 6099 } else if (ret) {
f0486c68 6100 return ERR_PTR(ret);
68a82277 6101 }
f0486c68
YZ
6102 return block_rsv;
6103 }
6104
6105 ret = block_rsv_use_bytes(block_rsv, blocksize);
6106 if (!ret)
6107 return block_rsv;
68a82277 6108 if (ret) {
dff51cd1
DS
6109 static DEFINE_RATELIMIT_STATE(_rs,
6110 DEFAULT_RATELIMIT_INTERVAL,
6111 /*DEFAULT_RATELIMIT_BURST*/ 2);
6112 if (__ratelimit(&_rs)) {
6113 printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6114 WARN_ON(1);
6115 }
36ba022a 6116 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
68a82277 6117 if (!ret) {
68a82277
JB
6118 return block_rsv;
6119 } else if (ret && block_rsv != global_rsv) {
6120 ret = block_rsv_use_bytes(global_rsv, blocksize);
6121 if (!ret)
6122 return global_rsv;
6123 }
6124 }
f0486c68 6125
f0486c68
YZ
6126 return ERR_PTR(-ENOSPC);
6127}
6128
6129static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
6130{
6131 block_rsv_add_bytes(block_rsv, blocksize, 0);
6132 block_rsv_release_bytes(block_rsv, NULL, 0);
6133}
6134
fec577fb 6135/*
f0486c68
YZ
6136 * finds a free extent and does all the dirty work required for allocation
6137 * returns the key for the extent through ins, and a tree buffer for
6138 * the first block of the extent through buf.
6139 *
fec577fb
CM
6140 * returns the tree buffer or NULL.
6141 */
5f39d397 6142struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
6143 struct btrfs_root *root, u32 blocksize,
6144 u64 parent, u64 root_objectid,
6145 struct btrfs_disk_key *key, int level,
66d7e7f0 6146 u64 hint, u64 empty_size, int for_cow)
fec577fb 6147{
e2fa7227 6148 struct btrfs_key ins;
f0486c68 6149 struct btrfs_block_rsv *block_rsv;
5f39d397 6150 struct extent_buffer *buf;
f0486c68
YZ
6151 u64 flags = 0;
6152 int ret;
6153
fec577fb 6154
f0486c68
YZ
6155 block_rsv = use_block_rsv(trans, root, blocksize);
6156 if (IS_ERR(block_rsv))
6157 return ERR_CAST(block_rsv);
6158
6159 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6160 empty_size, hint, (u64)-1, &ins, 0);
fec577fb 6161 if (ret) {
f0486c68 6162 unuse_block_rsv(block_rsv, blocksize);
54aa1f4d 6163 return ERR_PTR(ret);
fec577fb 6164 }
55c69072 6165
4008c04a
CM
6166 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6167 blocksize, level);
f0486c68
YZ
6168 BUG_ON(IS_ERR(buf));
6169
6170 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6171 if (parent == 0)
6172 parent = ins.objectid;
6173 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6174 } else
6175 BUG_ON(parent > 0);
6176
6177 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6178 struct btrfs_delayed_extent_op *extent_op;
6179 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6180 BUG_ON(!extent_op);
6181 if (key)
6182 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6183 else
6184 memset(&extent_op->key, 0, sizeof(extent_op->key));
6185 extent_op->flags_to_set = flags;
6186 extent_op->update_key = 1;
6187 extent_op->update_flags = 1;
6188 extent_op->is_data = 0;
6189
66d7e7f0
AJ
6190 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6191 ins.objectid,
f0486c68
YZ
6192 ins.offset, parent, root_objectid,
6193 level, BTRFS_ADD_DELAYED_EXTENT,
66d7e7f0 6194 extent_op, for_cow);
f0486c68
YZ
6195 BUG_ON(ret);
6196 }
fec577fb
CM
6197 return buf;
6198}
a28ec197 6199
2c47e605
YZ
6200struct walk_control {
6201 u64 refs[BTRFS_MAX_LEVEL];
6202 u64 flags[BTRFS_MAX_LEVEL];
6203 struct btrfs_key update_progress;
6204 int stage;
6205 int level;
6206 int shared_level;
6207 int update_ref;
6208 int keep_locks;
1c4850e2
YZ
6209 int reada_slot;
6210 int reada_count;
66d7e7f0 6211 int for_reloc;
2c47e605
YZ
6212};
6213
6214#define DROP_REFERENCE 1
6215#define UPDATE_BACKREF 2
6216
1c4850e2
YZ
6217static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6218 struct btrfs_root *root,
6219 struct walk_control *wc,
6220 struct btrfs_path *path)
6407bf6d 6221{
1c4850e2
YZ
6222 u64 bytenr;
6223 u64 generation;
6224 u64 refs;
94fcca9f 6225 u64 flags;
5d4f98a2 6226 u32 nritems;
1c4850e2
YZ
6227 u32 blocksize;
6228 struct btrfs_key key;
6229 struct extent_buffer *eb;
6407bf6d 6230 int ret;
1c4850e2
YZ
6231 int slot;
6232 int nread = 0;
6407bf6d 6233
1c4850e2
YZ
6234 if (path->slots[wc->level] < wc->reada_slot) {
6235 wc->reada_count = wc->reada_count * 2 / 3;
6236 wc->reada_count = max(wc->reada_count, 2);
6237 } else {
6238 wc->reada_count = wc->reada_count * 3 / 2;
6239 wc->reada_count = min_t(int, wc->reada_count,
6240 BTRFS_NODEPTRS_PER_BLOCK(root));
6241 }
7bb86316 6242
1c4850e2
YZ
6243 eb = path->nodes[wc->level];
6244 nritems = btrfs_header_nritems(eb);
6245 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 6246
1c4850e2
YZ
6247 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6248 if (nread >= wc->reada_count)
6249 break;
bd56b302 6250
2dd3e67b 6251 cond_resched();
1c4850e2
YZ
6252 bytenr = btrfs_node_blockptr(eb, slot);
6253 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 6254
1c4850e2
YZ
6255 if (slot == path->slots[wc->level])
6256 goto reada;
5d4f98a2 6257
1c4850e2
YZ
6258 if (wc->stage == UPDATE_BACKREF &&
6259 generation <= root->root_key.offset)
bd56b302
CM
6260 continue;
6261
94fcca9f
YZ
6262 /* We don't lock the tree block, it's OK to be racy here */
6263 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6264 &refs, &flags);
6265 BUG_ON(ret);
6266 BUG_ON(refs == 0);
6267
1c4850e2 6268 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
6269 if (refs == 1)
6270 goto reada;
bd56b302 6271
94fcca9f
YZ
6272 if (wc->level == 1 &&
6273 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6274 continue;
1c4850e2
YZ
6275 if (!wc->update_ref ||
6276 generation <= root->root_key.offset)
6277 continue;
6278 btrfs_node_key_to_cpu(eb, &key, slot);
6279 ret = btrfs_comp_cpu_keys(&key,
6280 &wc->update_progress);
6281 if (ret < 0)
6282 continue;
94fcca9f
YZ
6283 } else {
6284 if (wc->level == 1 &&
6285 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6286 continue;
6407bf6d 6287 }
1c4850e2
YZ
6288reada:
6289 ret = readahead_tree_block(root, bytenr, blocksize,
6290 generation);
6291 if (ret)
bd56b302 6292 break;
1c4850e2 6293 nread++;
20524f02 6294 }
1c4850e2 6295 wc->reada_slot = slot;
20524f02 6296}
2c47e605 6297
f82d02d9 6298/*
2c47e605
YZ
6299 * hepler to process tree block while walking down the tree.
6300 *
2c47e605
YZ
6301 * when wc->stage == UPDATE_BACKREF, this function updates
6302 * back refs for pointers in the block.
6303 *
6304 * NOTE: return value 1 means we should stop walking down.
f82d02d9 6305 */
2c47e605 6306static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 6307 struct btrfs_root *root,
2c47e605 6308 struct btrfs_path *path,
94fcca9f 6309 struct walk_control *wc, int lookup_info)
f82d02d9 6310{
2c47e605
YZ
6311 int level = wc->level;
6312 struct extent_buffer *eb = path->nodes[level];
2c47e605 6313 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
6314 int ret;
6315
2c47e605
YZ
6316 if (wc->stage == UPDATE_BACKREF &&
6317 btrfs_header_owner(eb) != root->root_key.objectid)
6318 return 1;
f82d02d9 6319
2c47e605
YZ
6320 /*
6321 * when reference count of tree block is 1, it won't increase
6322 * again. once full backref flag is set, we never clear it.
6323 */
94fcca9f
YZ
6324 if (lookup_info &&
6325 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6326 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
6327 BUG_ON(!path->locks[level]);
6328 ret = btrfs_lookup_extent_info(trans, root,
6329 eb->start, eb->len,
6330 &wc->refs[level],
6331 &wc->flags[level]);
6332 BUG_ON(ret);
6333 BUG_ON(wc->refs[level] == 0);
6334 }
5d4f98a2 6335
2c47e605
YZ
6336 if (wc->stage == DROP_REFERENCE) {
6337 if (wc->refs[level] > 1)
6338 return 1;
f82d02d9 6339
2c47e605 6340 if (path->locks[level] && !wc->keep_locks) {
bd681513 6341 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6342 path->locks[level] = 0;
6343 }
6344 return 0;
6345 }
f82d02d9 6346
2c47e605
YZ
6347 /* wc->stage == UPDATE_BACKREF */
6348 if (!(wc->flags[level] & flag)) {
6349 BUG_ON(!path->locks[level]);
66d7e7f0 6350 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
f82d02d9 6351 BUG_ON(ret);
66d7e7f0 6352 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
2c47e605
YZ
6353 BUG_ON(ret);
6354 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6355 eb->len, flag, 0);
6356 BUG_ON(ret);
6357 wc->flags[level] |= flag;
6358 }
6359
6360 /*
6361 * the block is shared by multiple trees, so it's not good to
6362 * keep the tree lock
6363 */
6364 if (path->locks[level] && level > 0) {
bd681513 6365 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6366 path->locks[level] = 0;
6367 }
6368 return 0;
6369}
6370
1c4850e2
YZ
6371/*
6372 * hepler to process tree block pointer.
6373 *
6374 * when wc->stage == DROP_REFERENCE, this function checks
6375 * reference count of the block pointed to. if the block
6376 * is shared and we need update back refs for the subtree
6377 * rooted at the block, this function changes wc->stage to
6378 * UPDATE_BACKREF. if the block is shared and there is no
6379 * need to update back, this function drops the reference
6380 * to the block.
6381 *
6382 * NOTE: return value 1 means we should stop walking down.
6383 */
6384static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6385 struct btrfs_root *root,
6386 struct btrfs_path *path,
94fcca9f 6387 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
6388{
6389 u64 bytenr;
6390 u64 generation;
6391 u64 parent;
6392 u32 blocksize;
6393 struct btrfs_key key;
6394 struct extent_buffer *next;
6395 int level = wc->level;
6396 int reada = 0;
6397 int ret = 0;
6398
6399 generation = btrfs_node_ptr_generation(path->nodes[level],
6400 path->slots[level]);
6401 /*
6402 * if the lower level block was created before the snapshot
6403 * was created, we know there is no need to update back refs
6404 * for the subtree
6405 */
6406 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
6407 generation <= root->root_key.offset) {
6408 *lookup_info = 1;
1c4850e2 6409 return 1;
94fcca9f 6410 }
1c4850e2
YZ
6411
6412 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6413 blocksize = btrfs_level_size(root, level - 1);
6414
6415 next = btrfs_find_tree_block(root, bytenr, blocksize);
6416 if (!next) {
6417 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
90d2c51d
MX
6418 if (!next)
6419 return -ENOMEM;
1c4850e2
YZ
6420 reada = 1;
6421 }
6422 btrfs_tree_lock(next);
6423 btrfs_set_lock_blocking(next);
6424
94fcca9f
YZ
6425 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6426 &wc->refs[level - 1],
6427 &wc->flags[level - 1]);
6428 BUG_ON(ret);
6429 BUG_ON(wc->refs[level - 1] == 0);
6430 *lookup_info = 0;
1c4850e2 6431
94fcca9f 6432 if (wc->stage == DROP_REFERENCE) {
1c4850e2 6433 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
6434 if (level == 1 &&
6435 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6436 goto skip;
6437
1c4850e2
YZ
6438 if (!wc->update_ref ||
6439 generation <= root->root_key.offset)
6440 goto skip;
6441
6442 btrfs_node_key_to_cpu(path->nodes[level], &key,
6443 path->slots[level]);
6444 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6445 if (ret < 0)
6446 goto skip;
6447
6448 wc->stage = UPDATE_BACKREF;
6449 wc->shared_level = level - 1;
6450 }
94fcca9f
YZ
6451 } else {
6452 if (level == 1 &&
6453 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6454 goto skip;
1c4850e2
YZ
6455 }
6456
6457 if (!btrfs_buffer_uptodate(next, generation)) {
6458 btrfs_tree_unlock(next);
6459 free_extent_buffer(next);
6460 next = NULL;
94fcca9f 6461 *lookup_info = 1;
1c4850e2
YZ
6462 }
6463
6464 if (!next) {
6465 if (reada && level == 1)
6466 reada_walk_down(trans, root, wc, path);
6467 next = read_tree_block(root, bytenr, blocksize, generation);
97d9a8a4
TI
6468 if (!next)
6469 return -EIO;
1c4850e2
YZ
6470 btrfs_tree_lock(next);
6471 btrfs_set_lock_blocking(next);
6472 }
6473
6474 level--;
6475 BUG_ON(level != btrfs_header_level(next));
6476 path->nodes[level] = next;
6477 path->slots[level] = 0;
bd681513 6478 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
1c4850e2
YZ
6479 wc->level = level;
6480 if (wc->level == 1)
6481 wc->reada_slot = 0;
6482 return 0;
6483skip:
6484 wc->refs[level - 1] = 0;
6485 wc->flags[level - 1] = 0;
94fcca9f
YZ
6486 if (wc->stage == DROP_REFERENCE) {
6487 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6488 parent = path->nodes[level]->start;
6489 } else {
6490 BUG_ON(root->root_key.objectid !=
6491 btrfs_header_owner(path->nodes[level]));
6492 parent = 0;
6493 }
1c4850e2 6494
94fcca9f 6495 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
66d7e7f0 6496 root->root_key.objectid, level - 1, 0, 0);
94fcca9f 6497 BUG_ON(ret);
1c4850e2 6498 }
1c4850e2
YZ
6499 btrfs_tree_unlock(next);
6500 free_extent_buffer(next);
94fcca9f 6501 *lookup_info = 1;
1c4850e2
YZ
6502 return 1;
6503}
6504
2c47e605
YZ
6505/*
6506 * hepler to process tree block while walking up the tree.
6507 *
6508 * when wc->stage == DROP_REFERENCE, this function drops
6509 * reference count on the block.
6510 *
6511 * when wc->stage == UPDATE_BACKREF, this function changes
6512 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6513 * to UPDATE_BACKREF previously while processing the block.
6514 *
6515 * NOTE: return value 1 means we should stop walking up.
6516 */
6517static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6518 struct btrfs_root *root,
6519 struct btrfs_path *path,
6520 struct walk_control *wc)
6521{
f0486c68 6522 int ret;
2c47e605
YZ
6523 int level = wc->level;
6524 struct extent_buffer *eb = path->nodes[level];
6525 u64 parent = 0;
6526
6527 if (wc->stage == UPDATE_BACKREF) {
6528 BUG_ON(wc->shared_level < level);
6529 if (level < wc->shared_level)
6530 goto out;
6531
2c47e605
YZ
6532 ret = find_next_key(path, level + 1, &wc->update_progress);
6533 if (ret > 0)
6534 wc->update_ref = 0;
6535
6536 wc->stage = DROP_REFERENCE;
6537 wc->shared_level = -1;
6538 path->slots[level] = 0;
6539
6540 /*
6541 * check reference count again if the block isn't locked.
6542 * we should start walking down the tree again if reference
6543 * count is one.
6544 */
6545 if (!path->locks[level]) {
6546 BUG_ON(level == 0);
6547 btrfs_tree_lock(eb);
6548 btrfs_set_lock_blocking(eb);
bd681513 6549 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6550
6551 ret = btrfs_lookup_extent_info(trans, root,
6552 eb->start, eb->len,
6553 &wc->refs[level],
6554 &wc->flags[level]);
f82d02d9 6555 BUG_ON(ret);
2c47e605
YZ
6556 BUG_ON(wc->refs[level] == 0);
6557 if (wc->refs[level] == 1) {
bd681513 6558 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6559 return 1;
6560 }
f82d02d9 6561 }
2c47e605 6562 }
f82d02d9 6563
2c47e605
YZ
6564 /* wc->stage == DROP_REFERENCE */
6565 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 6566
2c47e605
YZ
6567 if (wc->refs[level] == 1) {
6568 if (level == 0) {
6569 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
66d7e7f0
AJ
6570 ret = btrfs_dec_ref(trans, root, eb, 1,
6571 wc->for_reloc);
2c47e605 6572 else
66d7e7f0
AJ
6573 ret = btrfs_dec_ref(trans, root, eb, 0,
6574 wc->for_reloc);
2c47e605
YZ
6575 BUG_ON(ret);
6576 }
6577 /* make block locked assertion in clean_tree_block happy */
6578 if (!path->locks[level] &&
6579 btrfs_header_generation(eb) == trans->transid) {
6580 btrfs_tree_lock(eb);
6581 btrfs_set_lock_blocking(eb);
bd681513 6582 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6583 }
6584 clean_tree_block(trans, root, eb);
6585 }
6586
6587 if (eb == root->node) {
6588 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6589 parent = eb->start;
6590 else
6591 BUG_ON(root->root_key.objectid !=
6592 btrfs_header_owner(eb));
6593 } else {
6594 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6595 parent = path->nodes[level + 1]->start;
6596 else
6597 BUG_ON(root->root_key.objectid !=
6598 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 6599 }
f82d02d9 6600
66d7e7f0 6601 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
2c47e605
YZ
6602out:
6603 wc->refs[level] = 0;
6604 wc->flags[level] = 0;
f0486c68 6605 return 0;
2c47e605
YZ
6606}
6607
6608static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6609 struct btrfs_root *root,
6610 struct btrfs_path *path,
6611 struct walk_control *wc)
6612{
2c47e605 6613 int level = wc->level;
94fcca9f 6614 int lookup_info = 1;
2c47e605
YZ
6615 int ret;
6616
6617 while (level >= 0) {
94fcca9f 6618 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
6619 if (ret > 0)
6620 break;
6621
6622 if (level == 0)
6623 break;
6624
7a7965f8
YZ
6625 if (path->slots[level] >=
6626 btrfs_header_nritems(path->nodes[level]))
6627 break;
6628
94fcca9f 6629 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
6630 if (ret > 0) {
6631 path->slots[level]++;
6632 continue;
90d2c51d
MX
6633 } else if (ret < 0)
6634 return ret;
1c4850e2 6635 level = wc->level;
f82d02d9 6636 }
f82d02d9
YZ
6637 return 0;
6638}
6639
d397712b 6640static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 6641 struct btrfs_root *root,
f82d02d9 6642 struct btrfs_path *path,
2c47e605 6643 struct walk_control *wc, int max_level)
20524f02 6644{
2c47e605 6645 int level = wc->level;
20524f02 6646 int ret;
9f3a7427 6647
2c47e605
YZ
6648 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6649 while (level < max_level && path->nodes[level]) {
6650 wc->level = level;
6651 if (path->slots[level] + 1 <
6652 btrfs_header_nritems(path->nodes[level])) {
6653 path->slots[level]++;
20524f02
CM
6654 return 0;
6655 } else {
2c47e605
YZ
6656 ret = walk_up_proc(trans, root, path, wc);
6657 if (ret > 0)
6658 return 0;
bd56b302 6659
2c47e605 6660 if (path->locks[level]) {
bd681513
CM
6661 btrfs_tree_unlock_rw(path->nodes[level],
6662 path->locks[level]);
2c47e605 6663 path->locks[level] = 0;
f82d02d9 6664 }
2c47e605
YZ
6665 free_extent_buffer(path->nodes[level]);
6666 path->nodes[level] = NULL;
6667 level++;
20524f02
CM
6668 }
6669 }
6670 return 1;
6671}
6672
9aca1d51 6673/*
2c47e605
YZ
6674 * drop a subvolume tree.
6675 *
6676 * this function traverses the tree freeing any blocks that only
6677 * referenced by the tree.
6678 *
6679 * when a shared tree block is found. this function decreases its
6680 * reference count by one. if update_ref is true, this function
6681 * also make sure backrefs for the shared block and all lower level
6682 * blocks are properly updated.
9aca1d51 6683 */
cb1b69f4 6684void btrfs_drop_snapshot(struct btrfs_root *root,
66d7e7f0
AJ
6685 struct btrfs_block_rsv *block_rsv, int update_ref,
6686 int for_reloc)
20524f02 6687{
5caf2a00 6688 struct btrfs_path *path;
2c47e605
YZ
6689 struct btrfs_trans_handle *trans;
6690 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 6691 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
6692 struct walk_control *wc;
6693 struct btrfs_key key;
6694 int err = 0;
6695 int ret;
6696 int level;
20524f02 6697
5caf2a00 6698 path = btrfs_alloc_path();
cb1b69f4
TI
6699 if (!path) {
6700 err = -ENOMEM;
6701 goto out;
6702 }
20524f02 6703
2c47e605 6704 wc = kzalloc(sizeof(*wc), GFP_NOFS);
38a1a919
MF
6705 if (!wc) {
6706 btrfs_free_path(path);
cb1b69f4
TI
6707 err = -ENOMEM;
6708 goto out;
38a1a919 6709 }
2c47e605 6710
a22285a6 6711 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13
TI
6712 BUG_ON(IS_ERR(trans));
6713
3fd0a558
YZ
6714 if (block_rsv)
6715 trans->block_rsv = block_rsv;
2c47e605 6716
9f3a7427 6717 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 6718 level = btrfs_header_level(root->node);
5d4f98a2
YZ
6719 path->nodes[level] = btrfs_lock_root_node(root);
6720 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 6721 path->slots[level] = 0;
bd681513 6722 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6723 memset(&wc->update_progress, 0,
6724 sizeof(wc->update_progress));
9f3a7427 6725 } else {
9f3a7427 6726 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
6727 memcpy(&wc->update_progress, &key,
6728 sizeof(wc->update_progress));
6729
6702ed49 6730 level = root_item->drop_level;
2c47e605 6731 BUG_ON(level == 0);
6702ed49 6732 path->lowest_level = level;
2c47e605
YZ
6733 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6734 path->lowest_level = 0;
6735 if (ret < 0) {
6736 err = ret;
cb1b69f4 6737 goto out_free;
9f3a7427 6738 }
1c4850e2 6739 WARN_ON(ret > 0);
2c47e605 6740
7d9eb12c
CM
6741 /*
6742 * unlock our path, this is safe because only this
6743 * function is allowed to delete this snapshot
6744 */
5d4f98a2 6745 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
6746
6747 level = btrfs_header_level(root->node);
6748 while (1) {
6749 btrfs_tree_lock(path->nodes[level]);
6750 btrfs_set_lock_blocking(path->nodes[level]);
6751
6752 ret = btrfs_lookup_extent_info(trans, root,
6753 path->nodes[level]->start,
6754 path->nodes[level]->len,
6755 &wc->refs[level],
6756 &wc->flags[level]);
6757 BUG_ON(ret);
6758 BUG_ON(wc->refs[level] == 0);
6759
6760 if (level == root_item->drop_level)
6761 break;
6762
6763 btrfs_tree_unlock(path->nodes[level]);
6764 WARN_ON(wc->refs[level] != 1);
6765 level--;
6766 }
9f3a7427 6767 }
2c47e605
YZ
6768
6769 wc->level = level;
6770 wc->shared_level = -1;
6771 wc->stage = DROP_REFERENCE;
6772 wc->update_ref = update_ref;
6773 wc->keep_locks = 0;
66d7e7f0 6774 wc->for_reloc = for_reloc;
1c4850e2 6775 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 6776
d397712b 6777 while (1) {
2c47e605
YZ
6778 ret = walk_down_tree(trans, root, path, wc);
6779 if (ret < 0) {
6780 err = ret;
20524f02 6781 break;
2c47e605 6782 }
9aca1d51 6783
2c47e605
YZ
6784 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6785 if (ret < 0) {
6786 err = ret;
20524f02 6787 break;
2c47e605
YZ
6788 }
6789
6790 if (ret > 0) {
6791 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
6792 break;
6793 }
2c47e605
YZ
6794
6795 if (wc->stage == DROP_REFERENCE) {
6796 level = wc->level;
6797 btrfs_node_key(path->nodes[level],
6798 &root_item->drop_progress,
6799 path->slots[level]);
6800 root_item->drop_level = level;
6801 }
6802
6803 BUG_ON(wc->level == 0);
3fd0a558 6804 if (btrfs_should_end_transaction(trans, tree_root)) {
2c47e605
YZ
6805 ret = btrfs_update_root(trans, tree_root,
6806 &root->root_key,
6807 root_item);
6808 BUG_ON(ret);
6809
3fd0a558 6810 btrfs_end_transaction_throttle(trans, tree_root);
a22285a6 6811 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13 6812 BUG_ON(IS_ERR(trans));
3fd0a558
YZ
6813 if (block_rsv)
6814 trans->block_rsv = block_rsv;
c3e69d58 6815 }
20524f02 6816 }
b3b4aa74 6817 btrfs_release_path(path);
2c47e605
YZ
6818 BUG_ON(err);
6819
6820 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6821 BUG_ON(ret);
6822
76dda93c
YZ
6823 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6824 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6825 NULL, NULL);
6826 BUG_ON(ret < 0);
6827 if (ret > 0) {
84cd948c
JB
6828 /* if we fail to delete the orphan item this time
6829 * around, it'll get picked up the next time.
6830 *
6831 * The most common failure here is just -ENOENT.
6832 */
6833 btrfs_del_orphan_item(trans, tree_root,
6834 root->root_key.objectid);
76dda93c
YZ
6835 }
6836 }
6837
6838 if (root->in_radix) {
6839 btrfs_free_fs_root(tree_root->fs_info, root);
6840 } else {
6841 free_extent_buffer(root->node);
6842 free_extent_buffer(root->commit_root);
6843 kfree(root);
6844 }
cb1b69f4 6845out_free:
3fd0a558 6846 btrfs_end_transaction_throttle(trans, tree_root);
2c47e605 6847 kfree(wc);
5caf2a00 6848 btrfs_free_path(path);
cb1b69f4
TI
6849out:
6850 if (err)
6851 btrfs_std_error(root->fs_info, err);
6852 return;
20524f02 6853}
9078a3e1 6854
2c47e605
YZ
6855/*
6856 * drop subtree rooted at tree block 'node'.
6857 *
6858 * NOTE: this function will unlock and release tree block 'node'
66d7e7f0 6859 * only used by relocation code
2c47e605 6860 */
f82d02d9
YZ
6861int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6862 struct btrfs_root *root,
6863 struct extent_buffer *node,
6864 struct extent_buffer *parent)
6865{
6866 struct btrfs_path *path;
2c47e605 6867 struct walk_control *wc;
f82d02d9
YZ
6868 int level;
6869 int parent_level;
6870 int ret = 0;
6871 int wret;
6872
2c47e605
YZ
6873 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6874
f82d02d9 6875 path = btrfs_alloc_path();
db5b493a
TI
6876 if (!path)
6877 return -ENOMEM;
f82d02d9 6878
2c47e605 6879 wc = kzalloc(sizeof(*wc), GFP_NOFS);
db5b493a
TI
6880 if (!wc) {
6881 btrfs_free_path(path);
6882 return -ENOMEM;
6883 }
2c47e605 6884
b9447ef8 6885 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
6886 parent_level = btrfs_header_level(parent);
6887 extent_buffer_get(parent);
6888 path->nodes[parent_level] = parent;
6889 path->slots[parent_level] = btrfs_header_nritems(parent);
6890
b9447ef8 6891 btrfs_assert_tree_locked(node);
f82d02d9 6892 level = btrfs_header_level(node);
f82d02d9
YZ
6893 path->nodes[level] = node;
6894 path->slots[level] = 0;
bd681513 6895 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6896
6897 wc->refs[parent_level] = 1;
6898 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6899 wc->level = level;
6900 wc->shared_level = -1;
6901 wc->stage = DROP_REFERENCE;
6902 wc->update_ref = 0;
6903 wc->keep_locks = 1;
66d7e7f0 6904 wc->for_reloc = 1;
1c4850e2 6905 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
6906
6907 while (1) {
2c47e605
YZ
6908 wret = walk_down_tree(trans, root, path, wc);
6909 if (wret < 0) {
f82d02d9 6910 ret = wret;
f82d02d9 6911 break;
2c47e605 6912 }
f82d02d9 6913
2c47e605 6914 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
6915 if (wret < 0)
6916 ret = wret;
6917 if (wret != 0)
6918 break;
6919 }
6920
2c47e605 6921 kfree(wc);
f82d02d9
YZ
6922 btrfs_free_path(path);
6923 return ret;
6924}
6925
ec44a35c
CM
6926static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6927{
6928 u64 num_devices;
6929 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6930 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6931
e4d8ec0f
ID
6932 if (root->fs_info->balance_ctl) {
6933 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
6934 u64 tgt = 0;
6935
6936 /* pick restriper's target profile and return */
6937 if (flags & BTRFS_BLOCK_GROUP_DATA &&
6938 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6939 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6940 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6941 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6942 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
6943 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
6944 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6945 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
6946 }
6947
6948 if (tgt) {
6949 /* extended -> chunk profile */
6950 tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
6951 return tgt;
6952 }
6953 }
6954
cd02dca5
CM
6955 /*
6956 * we add in the count of missing devices because we want
6957 * to make sure that any RAID levels on a degraded FS
6958 * continue to be honored.
6959 */
6960 num_devices = root->fs_info->fs_devices->rw_devices +
6961 root->fs_info->fs_devices->missing_devices;
6962
ec44a35c
CM
6963 if (num_devices == 1) {
6964 stripped |= BTRFS_BLOCK_GROUP_DUP;
6965 stripped = flags & ~stripped;
6966
6967 /* turn raid0 into single device chunks */
6968 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6969 return stripped;
6970
6971 /* turn mirroring into duplication */
6972 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6973 BTRFS_BLOCK_GROUP_RAID10))
6974 return stripped | BTRFS_BLOCK_GROUP_DUP;
6975 return flags;
6976 } else {
6977 /* they already had raid on here, just return */
ec44a35c
CM
6978 if (flags & stripped)
6979 return flags;
6980
6981 stripped |= BTRFS_BLOCK_GROUP_DUP;
6982 stripped = flags & ~stripped;
6983
6984 /* switch duplicated blocks with raid1 */
6985 if (flags & BTRFS_BLOCK_GROUP_DUP)
6986 return stripped | BTRFS_BLOCK_GROUP_RAID1;
6987
6988 /* turn single device chunks into raid0 */
6989 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6990 }
6991 return flags;
6992}
6993
199c36ea 6994static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
0ef3e66b 6995{
f0486c68
YZ
6996 struct btrfs_space_info *sinfo = cache->space_info;
6997 u64 num_bytes;
199c36ea 6998 u64 min_allocable_bytes;
f0486c68 6999 int ret = -ENOSPC;
0ef3e66b 7000
c286ac48 7001
199c36ea
MX
7002 /*
7003 * We need some metadata space and system metadata space for
7004 * allocating chunks in some corner cases until we force to set
7005 * it to be readonly.
7006 */
7007 if ((sinfo->flags &
7008 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7009 !force)
7010 min_allocable_bytes = 1 * 1024 * 1024;
7011 else
7012 min_allocable_bytes = 0;
7013
f0486c68
YZ
7014 spin_lock(&sinfo->lock);
7015 spin_lock(&cache->lock);
61cfea9b
W
7016
7017 if (cache->ro) {
7018 ret = 0;
7019 goto out;
7020 }
7021
f0486c68
YZ
7022 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7023 cache->bytes_super - btrfs_block_group_used(&cache->item);
7024
7025 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
37be25bc
JB
7026 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7027 min_allocable_bytes <= sinfo->total_bytes) {
f0486c68 7028 sinfo->bytes_readonly += num_bytes;
f0486c68
YZ
7029 cache->ro = 1;
7030 ret = 0;
7031 }
61cfea9b 7032out:
f0486c68
YZ
7033 spin_unlock(&cache->lock);
7034 spin_unlock(&sinfo->lock);
7035 return ret;
7036}
7d9eb12c 7037
f0486c68
YZ
7038int btrfs_set_block_group_ro(struct btrfs_root *root,
7039 struct btrfs_block_group_cache *cache)
c286ac48 7040
f0486c68
YZ
7041{
7042 struct btrfs_trans_handle *trans;
7043 u64 alloc_flags;
7044 int ret;
7d9eb12c 7045
f0486c68 7046 BUG_ON(cache->ro);
0ef3e66b 7047
ff5714cc 7048 trans = btrfs_join_transaction(root);
f0486c68 7049 BUG_ON(IS_ERR(trans));
5d4f98a2 7050
f0486c68
YZ
7051 alloc_flags = update_block_group_flags(root, cache->flags);
7052 if (alloc_flags != cache->flags)
0e4f8f88
CM
7053 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7054 CHUNK_ALLOC_FORCE);
5d4f98a2 7055
199c36ea 7056 ret = set_block_group_ro(cache, 0);
f0486c68
YZ
7057 if (!ret)
7058 goto out;
7059 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
0e4f8f88
CM
7060 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7061 CHUNK_ALLOC_FORCE);
f0486c68
YZ
7062 if (ret < 0)
7063 goto out;
199c36ea 7064 ret = set_block_group_ro(cache, 0);
f0486c68
YZ
7065out:
7066 btrfs_end_transaction(trans, root);
7067 return ret;
7068}
5d4f98a2 7069
c87f08ca
CM
7070int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7071 struct btrfs_root *root, u64 type)
7072{
7073 u64 alloc_flags = get_alloc_profile(root, type);
0e4f8f88
CM
7074 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7075 CHUNK_ALLOC_FORCE);
c87f08ca
CM
7076}
7077
6d07bcec
MX
7078/*
7079 * helper to account the unused space of all the readonly block group in the
7080 * list. takes mirrors into account.
7081 */
7082static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7083{
7084 struct btrfs_block_group_cache *block_group;
7085 u64 free_bytes = 0;
7086 int factor;
7087
7088 list_for_each_entry(block_group, groups_list, list) {
7089 spin_lock(&block_group->lock);
7090
7091 if (!block_group->ro) {
7092 spin_unlock(&block_group->lock);
7093 continue;
7094 }
7095
7096 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7097 BTRFS_BLOCK_GROUP_RAID10 |
7098 BTRFS_BLOCK_GROUP_DUP))
7099 factor = 2;
7100 else
7101 factor = 1;
7102
7103 free_bytes += (block_group->key.offset -
7104 btrfs_block_group_used(&block_group->item)) *
7105 factor;
7106
7107 spin_unlock(&block_group->lock);
7108 }
7109
7110 return free_bytes;
7111}
7112
7113/*
7114 * helper to account the unused space of all the readonly block group in the
7115 * space_info. takes mirrors into account.
7116 */
7117u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7118{
7119 int i;
7120 u64 free_bytes = 0;
7121
7122 spin_lock(&sinfo->lock);
7123
7124 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7125 if (!list_empty(&sinfo->block_groups[i]))
7126 free_bytes += __btrfs_get_ro_block_group_free_space(
7127 &sinfo->block_groups[i]);
7128
7129 spin_unlock(&sinfo->lock);
7130
7131 return free_bytes;
7132}
7133
f0486c68
YZ
7134int btrfs_set_block_group_rw(struct btrfs_root *root,
7135 struct btrfs_block_group_cache *cache)
5d4f98a2 7136{
f0486c68
YZ
7137 struct btrfs_space_info *sinfo = cache->space_info;
7138 u64 num_bytes;
7139
7140 BUG_ON(!cache->ro);
7141
7142 spin_lock(&sinfo->lock);
7143 spin_lock(&cache->lock);
7144 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7145 cache->bytes_super - btrfs_block_group_used(&cache->item);
7146 sinfo->bytes_readonly -= num_bytes;
7147 cache->ro = 0;
7148 spin_unlock(&cache->lock);
7149 spin_unlock(&sinfo->lock);
5d4f98a2
YZ
7150 return 0;
7151}
7152
ba1bf481
JB
7153/*
7154 * checks to see if its even possible to relocate this block group.
7155 *
7156 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7157 * ok to go ahead and try.
7158 */
7159int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 7160{
ba1bf481
JB
7161 struct btrfs_block_group_cache *block_group;
7162 struct btrfs_space_info *space_info;
7163 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7164 struct btrfs_device *device;
cdcb725c 7165 u64 min_free;
6719db6a
JB
7166 u64 dev_min = 1;
7167 u64 dev_nr = 0;
cdcb725c 7168 int index;
ba1bf481
JB
7169 int full = 0;
7170 int ret = 0;
1a40e23b 7171
ba1bf481 7172 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 7173
ba1bf481
JB
7174 /* odd, couldn't find the block group, leave it alone */
7175 if (!block_group)
7176 return -1;
1a40e23b 7177
cdcb725c 7178 min_free = btrfs_block_group_used(&block_group->item);
7179
ba1bf481 7180 /* no bytes used, we're good */
cdcb725c 7181 if (!min_free)
1a40e23b
ZY
7182 goto out;
7183
ba1bf481
JB
7184 space_info = block_group->space_info;
7185 spin_lock(&space_info->lock);
17d217fe 7186
ba1bf481 7187 full = space_info->full;
17d217fe 7188
ba1bf481
JB
7189 /*
7190 * if this is the last block group we have in this space, we can't
7ce618db
CM
7191 * relocate it unless we're able to allocate a new chunk below.
7192 *
7193 * Otherwise, we need to make sure we have room in the space to handle
7194 * all of the extents from this block group. If we can, we're good
ba1bf481 7195 */
7ce618db 7196 if ((space_info->total_bytes != block_group->key.offset) &&
cdcb725c 7197 (space_info->bytes_used + space_info->bytes_reserved +
7198 space_info->bytes_pinned + space_info->bytes_readonly +
7199 min_free < space_info->total_bytes)) {
ba1bf481
JB
7200 spin_unlock(&space_info->lock);
7201 goto out;
17d217fe 7202 }
ba1bf481 7203 spin_unlock(&space_info->lock);
ea8c2819 7204
ba1bf481
JB
7205 /*
7206 * ok we don't have enough space, but maybe we have free space on our
7207 * devices to allocate new chunks for relocation, so loop through our
7208 * alloc devices and guess if we have enough space. However, if we
7209 * were marked as full, then we know there aren't enough chunks, and we
7210 * can just return.
7211 */
7212 ret = -1;
7213 if (full)
7214 goto out;
ea8c2819 7215
cdcb725c 7216 /*
7217 * index:
7218 * 0: raid10
7219 * 1: raid1
7220 * 2: dup
7221 * 3: raid0
7222 * 4: single
7223 */
7224 index = get_block_group_index(block_group);
7225 if (index == 0) {
7226 dev_min = 4;
6719db6a
JB
7227 /* Divide by 2 */
7228 min_free >>= 1;
cdcb725c 7229 } else if (index == 1) {
7230 dev_min = 2;
7231 } else if (index == 2) {
6719db6a
JB
7232 /* Multiply by 2 */
7233 min_free <<= 1;
cdcb725c 7234 } else if (index == 3) {
7235 dev_min = fs_devices->rw_devices;
6719db6a 7236 do_div(min_free, dev_min);
cdcb725c 7237 }
7238
ba1bf481
JB
7239 mutex_lock(&root->fs_info->chunk_mutex);
7240 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7bfc837d 7241 u64 dev_offset;
56bec294 7242
ba1bf481
JB
7243 /*
7244 * check to make sure we can actually find a chunk with enough
7245 * space to fit our block group in.
7246 */
7247 if (device->total_bytes > device->bytes_used + min_free) {
125ccb0a 7248 ret = find_free_dev_extent(device, min_free,
7bfc837d 7249 &dev_offset, NULL);
ba1bf481 7250 if (!ret)
cdcb725c 7251 dev_nr++;
7252
7253 if (dev_nr >= dev_min)
73e48b27 7254 break;
cdcb725c 7255
ba1bf481 7256 ret = -1;
725c8463 7257 }
edbd8d4e 7258 }
ba1bf481 7259 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 7260out:
ba1bf481 7261 btrfs_put_block_group(block_group);
edbd8d4e
CM
7262 return ret;
7263}
7264
b2950863
CH
7265static int find_first_block_group(struct btrfs_root *root,
7266 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 7267{
925baedd 7268 int ret = 0;
0b86a832
CM
7269 struct btrfs_key found_key;
7270 struct extent_buffer *leaf;
7271 int slot;
edbd8d4e 7272
0b86a832
CM
7273 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7274 if (ret < 0)
925baedd
CM
7275 goto out;
7276
d397712b 7277 while (1) {
0b86a832 7278 slot = path->slots[0];
edbd8d4e 7279 leaf = path->nodes[0];
0b86a832
CM
7280 if (slot >= btrfs_header_nritems(leaf)) {
7281 ret = btrfs_next_leaf(root, path);
7282 if (ret == 0)
7283 continue;
7284 if (ret < 0)
925baedd 7285 goto out;
0b86a832 7286 break;
edbd8d4e 7287 }
0b86a832 7288 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 7289
0b86a832 7290 if (found_key.objectid >= key->objectid &&
925baedd
CM
7291 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7292 ret = 0;
7293 goto out;
7294 }
0b86a832 7295 path->slots[0]++;
edbd8d4e 7296 }
925baedd 7297out:
0b86a832 7298 return ret;
edbd8d4e
CM
7299}
7300
0af3d00b
JB
7301void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7302{
7303 struct btrfs_block_group_cache *block_group;
7304 u64 last = 0;
7305
7306 while (1) {
7307 struct inode *inode;
7308
7309 block_group = btrfs_lookup_first_block_group(info, last);
7310 while (block_group) {
7311 spin_lock(&block_group->lock);
7312 if (block_group->iref)
7313 break;
7314 spin_unlock(&block_group->lock);
7315 block_group = next_block_group(info->tree_root,
7316 block_group);
7317 }
7318 if (!block_group) {
7319 if (last == 0)
7320 break;
7321 last = 0;
7322 continue;
7323 }
7324
7325 inode = block_group->inode;
7326 block_group->iref = 0;
7327 block_group->inode = NULL;
7328 spin_unlock(&block_group->lock);
7329 iput(inode);
7330 last = block_group->key.objectid + block_group->key.offset;
7331 btrfs_put_block_group(block_group);
7332 }
7333}
7334
1a40e23b
ZY
7335int btrfs_free_block_groups(struct btrfs_fs_info *info)
7336{
7337 struct btrfs_block_group_cache *block_group;
4184ea7f 7338 struct btrfs_space_info *space_info;
11833d66 7339 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
7340 struct rb_node *n;
7341
11833d66
YZ
7342 down_write(&info->extent_commit_sem);
7343 while (!list_empty(&info->caching_block_groups)) {
7344 caching_ctl = list_entry(info->caching_block_groups.next,
7345 struct btrfs_caching_control, list);
7346 list_del(&caching_ctl->list);
7347 put_caching_control(caching_ctl);
7348 }
7349 up_write(&info->extent_commit_sem);
7350
1a40e23b
ZY
7351 spin_lock(&info->block_group_cache_lock);
7352 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7353 block_group = rb_entry(n, struct btrfs_block_group_cache,
7354 cache_node);
1a40e23b
ZY
7355 rb_erase(&block_group->cache_node,
7356 &info->block_group_cache_tree);
d899e052
YZ
7357 spin_unlock(&info->block_group_cache_lock);
7358
80eb234a 7359 down_write(&block_group->space_info->groups_sem);
1a40e23b 7360 list_del(&block_group->list);
80eb234a 7361 up_write(&block_group->space_info->groups_sem);
d2fb3437 7362
817d52f8 7363 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7364 wait_block_group_cache_done(block_group);
817d52f8 7365
3c14874a
JB
7366 /*
7367 * We haven't cached this block group, which means we could
7368 * possibly have excluded extents on this block group.
7369 */
7370 if (block_group->cached == BTRFS_CACHE_NO)
7371 free_excluded_extents(info->extent_root, block_group);
7372
817d52f8 7373 btrfs_remove_free_space_cache(block_group);
11dfe35a 7374 btrfs_put_block_group(block_group);
d899e052
YZ
7375
7376 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
7377 }
7378 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
7379
7380 /* now that all the block groups are freed, go through and
7381 * free all the space_info structs. This is only called during
7382 * the final stages of unmount, and so we know nobody is
7383 * using them. We call synchronize_rcu() once before we start,
7384 * just to be on the safe side.
7385 */
7386 synchronize_rcu();
7387
8929ecfa
YZ
7388 release_global_block_rsv(info);
7389
4184ea7f
CM
7390 while(!list_empty(&info->space_info)) {
7391 space_info = list_entry(info->space_info.next,
7392 struct btrfs_space_info,
7393 list);
f0486c68 7394 if (space_info->bytes_pinned > 0 ||
fb25e914
JB
7395 space_info->bytes_reserved > 0 ||
7396 space_info->bytes_may_use > 0) {
f0486c68
YZ
7397 WARN_ON(1);
7398 dump_space_info(space_info, 0, 0);
7399 }
4184ea7f
CM
7400 list_del(&space_info->list);
7401 kfree(space_info);
7402 }
1a40e23b
ZY
7403 return 0;
7404}
7405
b742bb82
YZ
7406static void __link_block_group(struct btrfs_space_info *space_info,
7407 struct btrfs_block_group_cache *cache)
7408{
7409 int index = get_block_group_index(cache);
7410
7411 down_write(&space_info->groups_sem);
7412 list_add_tail(&cache->list, &space_info->block_groups[index]);
7413 up_write(&space_info->groups_sem);
7414}
7415
9078a3e1
CM
7416int btrfs_read_block_groups(struct btrfs_root *root)
7417{
7418 struct btrfs_path *path;
7419 int ret;
9078a3e1 7420 struct btrfs_block_group_cache *cache;
be744175 7421 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 7422 struct btrfs_space_info *space_info;
9078a3e1
CM
7423 struct btrfs_key key;
7424 struct btrfs_key found_key;
5f39d397 7425 struct extent_buffer *leaf;
0af3d00b
JB
7426 int need_clear = 0;
7427 u64 cache_gen;
96b5179d 7428
be744175 7429 root = info->extent_root;
9078a3e1 7430 key.objectid = 0;
0b86a832 7431 key.offset = 0;
9078a3e1 7432 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
7433 path = btrfs_alloc_path();
7434 if (!path)
7435 return -ENOMEM;
026fd317 7436 path->reada = 1;
9078a3e1 7437
6c41761f 7438 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
73bc1876 7439 if (btrfs_test_opt(root, SPACE_CACHE) &&
6c41761f 7440 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
0af3d00b 7441 need_clear = 1;
88c2ba3b
JB
7442 if (btrfs_test_opt(root, CLEAR_CACHE))
7443 need_clear = 1;
0af3d00b 7444
d397712b 7445 while (1) {
0b86a832 7446 ret = find_first_block_group(root, path, &key);
b742bb82
YZ
7447 if (ret > 0)
7448 break;
0b86a832
CM
7449 if (ret != 0)
7450 goto error;
5f39d397
CM
7451 leaf = path->nodes[0];
7452 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 7453 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 7454 if (!cache) {
0b86a832 7455 ret = -ENOMEM;
f0486c68 7456 goto error;
9078a3e1 7457 }
34d52cb6
LZ
7458 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7459 GFP_NOFS);
7460 if (!cache->free_space_ctl) {
7461 kfree(cache);
7462 ret = -ENOMEM;
7463 goto error;
7464 }
3e1ad54f 7465
d2fb3437 7466 atomic_set(&cache->count, 1);
c286ac48 7467 spin_lock_init(&cache->lock);
817d52f8 7468 cache->fs_info = info;
0f9dd46c 7469 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7470 INIT_LIST_HEAD(&cache->cluster_list);
96303081 7471
0af3d00b
JB
7472 if (need_clear)
7473 cache->disk_cache_state = BTRFS_DC_CLEAR;
7474
5f39d397
CM
7475 read_extent_buffer(leaf, &cache->item,
7476 btrfs_item_ptr_offset(leaf, path->slots[0]),
7477 sizeof(cache->item));
9078a3e1 7478 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 7479
9078a3e1 7480 key.objectid = found_key.objectid + found_key.offset;
b3b4aa74 7481 btrfs_release_path(path);
0b86a832 7482 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
7483 cache->sectorsize = root->sectorsize;
7484
34d52cb6
LZ
7485 btrfs_init_free_space_ctl(cache);
7486
3c14874a
JB
7487 /*
7488 * We need to exclude the super stripes now so that the space
7489 * info has super bytes accounted for, otherwise we'll think
7490 * we have more space than we actually do.
7491 */
7492 exclude_super_stripes(root, cache);
7493
817d52f8
JB
7494 /*
7495 * check for two cases, either we are full, and therefore
7496 * don't need to bother with the caching work since we won't
7497 * find any space, or we are empty, and we can just add all
7498 * the space in and be done with it. This saves us _alot_ of
7499 * time, particularly in the full case.
7500 */
7501 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
11833d66 7502 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7503 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 7504 free_excluded_extents(root, cache);
817d52f8 7505 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66 7506 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
7507 cache->cached = BTRFS_CACHE_FINISHED;
7508 add_new_free_space(cache, root->fs_info,
7509 found_key.objectid,
7510 found_key.objectid +
7511 found_key.offset);
11833d66 7512 free_excluded_extents(root, cache);
817d52f8 7513 }
96b5179d 7514
6324fbf3
CM
7515 ret = update_space_info(info, cache->flags, found_key.offset,
7516 btrfs_block_group_used(&cache->item),
7517 &space_info);
7518 BUG_ON(ret);
7519 cache->space_info = space_info;
1b2da372 7520 spin_lock(&cache->space_info->lock);
f0486c68 7521 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
7522 spin_unlock(&cache->space_info->lock);
7523
b742bb82 7524 __link_block_group(space_info, cache);
0f9dd46c
JB
7525
7526 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7527 BUG_ON(ret);
75ccf47d
CM
7528
7529 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c 7530 if (btrfs_chunk_readonly(root, cache->key.objectid))
199c36ea 7531 set_block_group_ro(cache, 1);
9078a3e1 7532 }
b742bb82
YZ
7533
7534 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7535 if (!(get_alloc_profile(root, space_info->flags) &
7536 (BTRFS_BLOCK_GROUP_RAID10 |
7537 BTRFS_BLOCK_GROUP_RAID1 |
7538 BTRFS_BLOCK_GROUP_DUP)))
7539 continue;
7540 /*
7541 * avoid allocating from un-mirrored block group if there are
7542 * mirrored block groups.
7543 */
7544 list_for_each_entry(cache, &space_info->block_groups[3], list)
199c36ea 7545 set_block_group_ro(cache, 1);
b742bb82 7546 list_for_each_entry(cache, &space_info->block_groups[4], list)
199c36ea 7547 set_block_group_ro(cache, 1);
9078a3e1 7548 }
f0486c68
YZ
7549
7550 init_global_block_rsv(info);
0b86a832
CM
7551 ret = 0;
7552error:
9078a3e1 7553 btrfs_free_path(path);
0b86a832 7554 return ret;
9078a3e1 7555}
6324fbf3
CM
7556
7557int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7558 struct btrfs_root *root, u64 bytes_used,
e17cade2 7559 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
7560 u64 size)
7561{
7562 int ret;
6324fbf3
CM
7563 struct btrfs_root *extent_root;
7564 struct btrfs_block_group_cache *cache;
6324fbf3
CM
7565
7566 extent_root = root->fs_info->extent_root;
6324fbf3 7567
12fcfd22 7568 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 7569
8f18cf13 7570 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
7571 if (!cache)
7572 return -ENOMEM;
34d52cb6
LZ
7573 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7574 GFP_NOFS);
7575 if (!cache->free_space_ctl) {
7576 kfree(cache);
7577 return -ENOMEM;
7578 }
0f9dd46c 7579
e17cade2 7580 cache->key.objectid = chunk_offset;
6324fbf3 7581 cache->key.offset = size;
d2fb3437 7582 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081 7583 cache->sectorsize = root->sectorsize;
0af3d00b 7584 cache->fs_info = root->fs_info;
96303081 7585
d2fb3437 7586 atomic_set(&cache->count, 1);
c286ac48 7587 spin_lock_init(&cache->lock);
0f9dd46c 7588 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7589 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 7590
34d52cb6
LZ
7591 btrfs_init_free_space_ctl(cache);
7592
6324fbf3 7593 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
7594 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7595 cache->flags = type;
7596 btrfs_set_block_group_flags(&cache->item, type);
7597
11833d66 7598 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7599 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 7600 exclude_super_stripes(root, cache);
96303081 7601
817d52f8
JB
7602 add_new_free_space(cache, root->fs_info, chunk_offset,
7603 chunk_offset + size);
7604
11833d66
YZ
7605 free_excluded_extents(root, cache);
7606
6324fbf3
CM
7607 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7608 &cache->space_info);
7609 BUG_ON(ret);
c7c144db 7610 update_global_block_rsv(root->fs_info);
1b2da372
JB
7611
7612 spin_lock(&cache->space_info->lock);
f0486c68 7613 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
7614 spin_unlock(&cache->space_info->lock);
7615
b742bb82 7616 __link_block_group(cache->space_info, cache);
6324fbf3 7617
0f9dd46c
JB
7618 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7619 BUG_ON(ret);
c286ac48 7620
6324fbf3
CM
7621 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7622 sizeof(cache->item));
7623 BUG_ON(ret);
7624
d18a2c44 7625 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 7626
6324fbf3
CM
7627 return 0;
7628}
1a40e23b 7629
10ea00f5
ID
7630static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7631{
7632 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
7633
7634 /* chunk -> extended profile */
7635 if (extra_flags == 0)
7636 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7637
7638 if (flags & BTRFS_BLOCK_GROUP_DATA)
7639 fs_info->avail_data_alloc_bits &= ~extra_flags;
7640 if (flags & BTRFS_BLOCK_GROUP_METADATA)
7641 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7642 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7643 fs_info->avail_system_alloc_bits &= ~extra_flags;
7644}
7645
1a40e23b
ZY
7646int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7647 struct btrfs_root *root, u64 group_start)
7648{
7649 struct btrfs_path *path;
7650 struct btrfs_block_group_cache *block_group;
44fb5511 7651 struct btrfs_free_cluster *cluster;
0af3d00b 7652 struct btrfs_root *tree_root = root->fs_info->tree_root;
1a40e23b 7653 struct btrfs_key key;
0af3d00b 7654 struct inode *inode;
1a40e23b 7655 int ret;
10ea00f5 7656 int index;
89a55897 7657 int factor;
1a40e23b 7658
1a40e23b
ZY
7659 root = root->fs_info->extent_root;
7660
7661 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7662 BUG_ON(!block_group);
c146afad 7663 BUG_ON(!block_group->ro);
1a40e23b 7664
9f7c43c9 7665 /*
7666 * Free the reserved super bytes from this block group before
7667 * remove it.
7668 */
7669 free_excluded_extents(root, block_group);
7670
1a40e23b 7671 memcpy(&key, &block_group->key, sizeof(key));
10ea00f5 7672 index = get_block_group_index(block_group);
89a55897
JB
7673 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7674 BTRFS_BLOCK_GROUP_RAID1 |
7675 BTRFS_BLOCK_GROUP_RAID10))
7676 factor = 2;
7677 else
7678 factor = 1;
1a40e23b 7679
44fb5511
CM
7680 /* make sure this block group isn't part of an allocation cluster */
7681 cluster = &root->fs_info->data_alloc_cluster;
7682 spin_lock(&cluster->refill_lock);
7683 btrfs_return_cluster_to_free_space(block_group, cluster);
7684 spin_unlock(&cluster->refill_lock);
7685
7686 /*
7687 * make sure this block group isn't part of a metadata
7688 * allocation cluster
7689 */
7690 cluster = &root->fs_info->meta_alloc_cluster;
7691 spin_lock(&cluster->refill_lock);
7692 btrfs_return_cluster_to_free_space(block_group, cluster);
7693 spin_unlock(&cluster->refill_lock);
7694
1a40e23b 7695 path = btrfs_alloc_path();
d8926bb3
MF
7696 if (!path) {
7697 ret = -ENOMEM;
7698 goto out;
7699 }
1a40e23b 7700
10b2f34d 7701 inode = lookup_free_space_inode(tree_root, block_group, path);
0af3d00b 7702 if (!IS_ERR(inode)) {
b532402e
TI
7703 ret = btrfs_orphan_add(trans, inode);
7704 BUG_ON(ret);
0af3d00b
JB
7705 clear_nlink(inode);
7706 /* One for the block groups ref */
7707 spin_lock(&block_group->lock);
7708 if (block_group->iref) {
7709 block_group->iref = 0;
7710 block_group->inode = NULL;
7711 spin_unlock(&block_group->lock);
7712 iput(inode);
7713 } else {
7714 spin_unlock(&block_group->lock);
7715 }
7716 /* One for our lookup ref */
455757c3 7717 btrfs_add_delayed_iput(inode);
0af3d00b
JB
7718 }
7719
7720 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7721 key.offset = block_group->key.objectid;
7722 key.type = 0;
7723
7724 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7725 if (ret < 0)
7726 goto out;
7727 if (ret > 0)
b3b4aa74 7728 btrfs_release_path(path);
0af3d00b
JB
7729 if (ret == 0) {
7730 ret = btrfs_del_item(trans, tree_root, path);
7731 if (ret)
7732 goto out;
b3b4aa74 7733 btrfs_release_path(path);
0af3d00b
JB
7734 }
7735
3dfdb934 7736 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
7737 rb_erase(&block_group->cache_node,
7738 &root->fs_info->block_group_cache_tree);
3dfdb934 7739 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 7740
80eb234a 7741 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
7742 /*
7743 * we must use list_del_init so people can check to see if they
7744 * are still on the list after taking the semaphore
7745 */
7746 list_del_init(&block_group->list);
10ea00f5
ID
7747 if (list_empty(&block_group->space_info->block_groups[index]))
7748 clear_avail_alloc_bits(root->fs_info, block_group->flags);
80eb234a 7749 up_write(&block_group->space_info->groups_sem);
1a40e23b 7750
817d52f8 7751 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7752 wait_block_group_cache_done(block_group);
817d52f8
JB
7753
7754 btrfs_remove_free_space_cache(block_group);
7755
c146afad
YZ
7756 spin_lock(&block_group->space_info->lock);
7757 block_group->space_info->total_bytes -= block_group->key.offset;
7758 block_group->space_info->bytes_readonly -= block_group->key.offset;
89a55897 7759 block_group->space_info->disk_total -= block_group->key.offset * factor;
c146afad 7760 spin_unlock(&block_group->space_info->lock);
283bb197 7761
0af3d00b
JB
7762 memcpy(&key, &block_group->key, sizeof(key));
7763
283bb197 7764 btrfs_clear_space_info_full(root->fs_info);
c146afad 7765
fa9c0d79
CM
7766 btrfs_put_block_group(block_group);
7767 btrfs_put_block_group(block_group);
1a40e23b
ZY
7768
7769 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7770 if (ret > 0)
7771 ret = -EIO;
7772 if (ret < 0)
7773 goto out;
7774
7775 ret = btrfs_del_item(trans, root, path);
7776out:
7777 btrfs_free_path(path);
7778 return ret;
7779}
acce952b 7780
c59021f8 7781int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7782{
7783 struct btrfs_space_info *space_info;
1aba86d6 7784 struct btrfs_super_block *disk_super;
7785 u64 features;
7786 u64 flags;
7787 int mixed = 0;
c59021f8 7788 int ret;
7789
6c41761f 7790 disk_super = fs_info->super_copy;
1aba86d6 7791 if (!btrfs_super_root(disk_super))
7792 return 1;
c59021f8 7793
1aba86d6 7794 features = btrfs_super_incompat_flags(disk_super);
7795 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7796 mixed = 1;
c59021f8 7797
1aba86d6 7798 flags = BTRFS_BLOCK_GROUP_SYSTEM;
7799 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
c59021f8 7800 if (ret)
1aba86d6 7801 goto out;
c59021f8 7802
1aba86d6 7803 if (mixed) {
7804 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7805 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7806 } else {
7807 flags = BTRFS_BLOCK_GROUP_METADATA;
7808 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7809 if (ret)
7810 goto out;
7811
7812 flags = BTRFS_BLOCK_GROUP_DATA;
7813 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7814 }
7815out:
c59021f8 7816 return ret;
7817}
7818
acce952b 7819int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7820{
7821 return unpin_extent_range(root, start, end);
7822}
7823
7824int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 7825 u64 num_bytes, u64 *actual_bytes)
acce952b 7826{
5378e607 7827 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
acce952b 7828}
f7039b1d
LD
7829
7830int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7831{
7832 struct btrfs_fs_info *fs_info = root->fs_info;
7833 struct btrfs_block_group_cache *cache = NULL;
7834 u64 group_trimmed;
7835 u64 start;
7836 u64 end;
7837 u64 trimmed = 0;
7838 int ret = 0;
7839
7840 cache = btrfs_lookup_block_group(fs_info, range->start);
7841
7842 while (cache) {
7843 if (cache->key.objectid >= (range->start + range->len)) {
7844 btrfs_put_block_group(cache);
7845 break;
7846 }
7847
7848 start = max(range->start, cache->key.objectid);
7849 end = min(range->start + range->len,
7850 cache->key.objectid + cache->key.offset);
7851
7852 if (end - start >= range->minlen) {
7853 if (!block_group_cache_done(cache)) {
7854 ret = cache_block_group(cache, NULL, root, 0);
7855 if (!ret)
7856 wait_block_group_cache_done(cache);
7857 }
7858 ret = btrfs_trim_block_group(cache,
7859 &group_trimmed,
7860 start,
7861 end,
7862 range->minlen);
7863
7864 trimmed += group_trimmed;
7865 if (ret) {
7866 btrfs_put_block_group(cache);
7867 break;
7868 }
7869 }
7870
7871 cache = next_block_group(fs_info->tree_root, cache);
7872 }
7873
7874 range->len = trimmed;
7875 return ret;
7876}