]> git.ipfire.org Git - people/ms/linux.git/blame - fs/btrfs/ctree.c
Btrfs: add missing spin_lock for insertion into tree mod log
[people/ms/linux.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
bd989ba3 21#include <linux/rbtree.h>
eb60ceac
CM
22#include "ctree.h"
23#include "disk-io.h"
7f5c1516 24#include "transaction.h"
5f39d397 25#include "print-tree.h"
925baedd 26#include "locking.h"
9a8dd150 27
e089f05c
CM
28static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 31 *root, struct btrfs_key *ins_key,
cc0c5538 32 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
33static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 35 struct extent_buffer *src, int empty);
5f39d397
CM
36static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
143bede5 40static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
41 struct btrfs_path *path, int level, int slot,
42 int tree_mod_log);
f230475e
JS
43static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
47 u64 time_seq);
48struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
50 u64 time_seq);
d97e63b6 51
df24a2b9 52struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 53{
df24a2b9 54 struct btrfs_path *path;
e00f7308 55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 56 return path;
2c90e5d6
CM
57}
58
b4ce94de
CM
59/*
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
62 */
63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64{
65 int i;
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
bd681513
CM
67 if (!p->nodes[i] || !p->locks[i])
68 continue;
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
b4ce94de
CM
74 }
75}
76
77/*
78 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
79 *
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
83 * for held
b4ce94de 84 */
4008c04a 85noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
bd681513 86 struct extent_buffer *held, int held_rw)
b4ce94de
CM
87{
88 int i;
4008c04a
CM
89
90#ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
95 * the path blocking.
96 */
bd681513
CM
97 if (held) {
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 }
4008c04a
CM
104 btrfs_set_path_blocking(p);
105#endif
106
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
bd681513
CM
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
114 }
b4ce94de 115 }
4008c04a
CM
116
117#ifdef CONFIG_DEBUG_LOCK_ALLOC
118 if (held)
bd681513 119 btrfs_clear_lock_blocking_rw(held, held_rw);
4008c04a 120#endif
b4ce94de
CM
121}
122
d352ac68 123/* this also releases the path */
df24a2b9 124void btrfs_free_path(struct btrfs_path *p)
be0e5c09 125{
ff175d57
JJ
126 if (!p)
127 return;
b3b4aa74 128 btrfs_release_path(p);
df24a2b9 129 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
130}
131
d352ac68
CM
132/*
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
135 *
136 * It is safe to call this on paths that no locks or extent buffers held.
137 */
b3b4aa74 138noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
139{
140 int i;
a2135011 141
234b63a0 142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 143 p->slots[i] = 0;
eb60ceac 144 if (!p->nodes[i])
925baedd
CM
145 continue;
146 if (p->locks[i]) {
bd681513 147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
148 p->locks[i] = 0;
149 }
5f39d397 150 free_extent_buffer(p->nodes[i]);
3f157a2f 151 p->nodes[i] = NULL;
eb60ceac
CM
152 }
153}
154
d352ac68
CM
155/*
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
159 * looping required.
160 *
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
164 */
925baedd
CM
165struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166{
167 struct extent_buffer *eb;
240f62c8 168
3083ee2e
JB
169 while (1) {
170 rcu_read_lock();
171 eb = rcu_dereference(root->node);
172
173 /*
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
178 */
179 if (atomic_inc_not_zero(&eb->refs)) {
180 rcu_read_unlock();
181 break;
182 }
183 rcu_read_unlock();
184 synchronize_rcu();
185 }
925baedd
CM
186 return eb;
187}
188
d352ac68
CM
189/* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
192 */
925baedd
CM
193struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194{
195 struct extent_buffer *eb;
196
d397712b 197 while (1) {
925baedd
CM
198 eb = btrfs_root_node(root);
199 btrfs_tree_lock(eb);
240f62c8 200 if (eb == root->node)
925baedd 201 break;
925baedd
CM
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
204 }
205 return eb;
206}
207
bd681513
CM
208/* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
211 */
212struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213{
214 struct extent_buffer *eb;
215
216 while (1) {
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
220 break;
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
223 }
224 return eb;
225}
226
d352ac68
CM
227/* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
230 */
0b86a832
CM
231static void add_root_to_dirty_list(struct btrfs_root *root)
232{
e5846fc6 233 spin_lock(&root->fs_info->trans_lock);
0b86a832
CM
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
237 }
e5846fc6 238 spin_unlock(&root->fs_info->trans_lock);
0b86a832
CM
239}
240
d352ac68
CM
241/*
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
245 */
be20aa9d
CM
246int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
250{
251 struct extent_buffer *cow;
be20aa9d
CM
252 int ret = 0;
253 int level;
5d4f98a2 254 struct btrfs_disk_key disk_key;
be20aa9d
CM
255
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259
260 level = btrfs_header_level(buf);
5d4f98a2
YZ
261 if (level == 0)
262 btrfs_item_key(buf, &disk_key, 0);
263 else
264 btrfs_node_key(buf, &disk_key, 0);
31840ae1 265
5d4f98a2
YZ
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
5581a51a 268 buf->start, 0);
5d4f98a2 269 if (IS_ERR(cow))
be20aa9d
CM
270 return PTR_ERR(cow);
271
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 else
281 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 282
2b82032c
YZ
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
285 BTRFS_FSID_SIZE);
286
be20aa9d 287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 290 else
66d7e7f0 291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
4aec2b52 292
be20aa9d
CM
293 if (ret)
294 return ret;
295
296 btrfs_mark_buffer_dirty(cow);
297 *cow_ret = cow;
298 return 0;
299}
300
bd989ba3
JS
301enum mod_log_op {
302 MOD_LOG_KEY_REPLACE,
303 MOD_LOG_KEY_ADD,
304 MOD_LOG_KEY_REMOVE,
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_MOVE_KEYS,
308 MOD_LOG_ROOT_REPLACE,
309};
310
311struct tree_mod_move {
312 int dst_slot;
313 int nr_items;
314};
315
316struct tree_mod_root {
317 u64 logical;
318 u8 level;
319};
320
321struct tree_mod_elem {
322 struct rb_node node;
323 u64 index; /* shifted logical */
324 struct seq_list elem;
325 enum mod_log_op op;
326
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 int slot;
329
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 u64 generation;
332
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
335 u64 blockptr;
336
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
339
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
342};
343
344static inline void
345__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346{
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349}
350
351void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 elem->flags = 1;
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
358}
359
360void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
362{
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
370
371 if (!seq_putting)
372 return;
373
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
377
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
381 /*
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
384 */
385 goto out;
386 }
387 min_seq = cur_elem->seq;
388 }
389 }
390
391 /*
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
394 */
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
401 continue;
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
404 kfree(tm);
405 }
406 write_unlock(&fs_info->tree_mod_log_lock);
407out:
408 spin_unlock(&fs_info->tree_mod_seq_lock);
409}
410
411/*
412 * key order of the log:
413 * index -> sequence
414 *
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
417 * operations.
418 */
419static noinline int
420__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421{
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
426 int ret = 0;
427
428 BUG_ON(!tm || !tm->elem.seq);
429
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
433 while (*new) {
434 cur = container_of(*new, struct tree_mod_elem, node);
435 parent = *new;
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
444 else {
445 kfree(tm);
446 ret = -EEXIST;
447 goto unlock;
448 }
449 }
450
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
453unlock:
454 write_unlock(&fs_info->tree_mod_log_lock);
455 return ret;
456}
457
926dd8a6
JS
458static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
459 struct tree_mod_elem **tm_ret)
bd989ba3
JS
460{
461 struct tree_mod_elem *tm;
926dd8a6 462 int seq;
bd989ba3
JS
463
464 smp_mb();
465 if (list_empty(&fs_info->tree_mod_seq_list))
466 return 0;
467
468 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
469 if (!tm)
470 return -ENOMEM;
471
bd989ba3 472 tm->elem.flags = 0;
926dd8a6
JS
473 spin_lock(&fs_info->tree_mod_seq_lock);
474 if (list_empty(&fs_info->tree_mod_seq_list)) {
475 /*
476 * someone emptied the list while we were waiting for the lock.
477 * we must not add to the list, because no blocker exists. items
478 * are removed from the list only when the existing blocker is
479 * removed from the list.
480 */
481 kfree(tm);
482 seq = 0;
483 } else {
484 __get_tree_mod_seq(fs_info, &tm->elem);
485 seq = tm->elem.seq;
486 }
487 spin_unlock(&fs_info->tree_mod_seq_lock);
bd989ba3
JS
488
489 return seq;
490}
491
492static noinline int
493tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
494 struct extent_buffer *eb, int slot,
495 enum mod_log_op op, gfp_t flags)
496{
497 struct tree_mod_elem *tm;
498 int ret;
499
500 ret = tree_mod_alloc(fs_info, flags, &tm);
501 if (ret <= 0)
502 return ret;
503
504 tm->index = eb->start >> PAGE_CACHE_SHIFT;
505 if (op != MOD_LOG_KEY_ADD) {
506 btrfs_node_key(eb, &tm->key, slot);
507 tm->blockptr = btrfs_node_blockptr(eb, slot);
508 }
509 tm->op = op;
510 tm->slot = slot;
511 tm->generation = btrfs_node_ptr_generation(eb, slot);
512
513 return __tree_mod_log_insert(fs_info, tm);
514}
515
516static noinline int
517tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
518 int slot, enum mod_log_op op)
519{
520 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
521}
522
523static noinline int
524tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
525 struct extent_buffer *eb, int dst_slot, int src_slot,
526 int nr_items, gfp_t flags)
527{
528 struct tree_mod_elem *tm;
529 int ret;
530 int i;
531
532 ret = tree_mod_alloc(fs_info, flags, &tm);
533 if (ret <= 0)
534 return ret;
535
536 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
537 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
538 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
539 BUG_ON(ret < 0);
540 }
541
542 tm->index = eb->start >> PAGE_CACHE_SHIFT;
543 tm->slot = src_slot;
544 tm->move.dst_slot = dst_slot;
545 tm->move.nr_items = nr_items;
546 tm->op = MOD_LOG_MOVE_KEYS;
547
548 return __tree_mod_log_insert(fs_info, tm);
549}
550
551static noinline int
552tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
553 struct extent_buffer *old_root,
554 struct extent_buffer *new_root, gfp_t flags)
555{
556 struct tree_mod_elem *tm;
557 int ret;
558
559 ret = tree_mod_alloc(fs_info, flags, &tm);
560 if (ret <= 0)
561 return ret;
562
563 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
564 tm->old_root.logical = old_root->start;
565 tm->old_root.level = btrfs_header_level(old_root);
566 tm->generation = btrfs_header_generation(old_root);
567 tm->op = MOD_LOG_ROOT_REPLACE;
568
569 return __tree_mod_log_insert(fs_info, tm);
570}
571
572static struct tree_mod_elem *
573__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
574 int smallest)
575{
576 struct rb_root *tm_root;
577 struct rb_node *node;
578 struct tree_mod_elem *cur = NULL;
579 struct tree_mod_elem *found = NULL;
580 u64 index = start >> PAGE_CACHE_SHIFT;
581
582 read_lock(&fs_info->tree_mod_log_lock);
583 tm_root = &fs_info->tree_mod_log;
584 node = tm_root->rb_node;
585 while (node) {
586 cur = container_of(node, struct tree_mod_elem, node);
587 if (cur->index < index) {
588 node = node->rb_left;
589 } else if (cur->index > index) {
590 node = node->rb_right;
591 } else if (cur->elem.seq < min_seq) {
592 node = node->rb_left;
593 } else if (!smallest) {
594 /* we want the node with the highest seq */
595 if (found)
596 BUG_ON(found->elem.seq > cur->elem.seq);
597 found = cur;
598 node = node->rb_left;
599 } else if (cur->elem.seq > min_seq) {
600 /* we want the node with the smallest seq */
601 if (found)
602 BUG_ON(found->elem.seq < cur->elem.seq);
603 found = cur;
604 node = node->rb_right;
605 } else {
606 found = cur;
607 break;
608 }
609 }
610 read_unlock(&fs_info->tree_mod_log_lock);
611
612 return found;
613}
614
615/*
616 * this returns the element from the log with the smallest time sequence
617 * value that's in the log (the oldest log item). any element with a time
618 * sequence lower than min_seq will be ignored.
619 */
620static struct tree_mod_elem *
621tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
622 u64 min_seq)
623{
624 return __tree_mod_log_search(fs_info, start, min_seq, 1);
625}
626
627/*
628 * this returns the element from the log with the largest time sequence
629 * value that's in the log (the most recent log item). any element with
630 * a time sequence lower than min_seq will be ignored.
631 */
632static struct tree_mod_elem *
633tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
634{
635 return __tree_mod_log_search(fs_info, start, min_seq, 0);
636}
637
638static inline void
639tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
640 struct extent_buffer *src, unsigned long dst_offset,
641 unsigned long src_offset, int nr_items)
642{
643 int ret;
644 int i;
645
646 smp_mb();
647 if (list_empty(&fs_info->tree_mod_seq_list))
648 return;
649
650 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
651 return;
652
653 /* speed this up by single seq for all operations? */
654 for (i = 0; i < nr_items; i++) {
655 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
656 MOD_LOG_KEY_REMOVE);
657 BUG_ON(ret < 0);
658 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
659 MOD_LOG_KEY_ADD);
660 BUG_ON(ret < 0);
661 }
662}
663
664static inline void
665tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
666 int dst_offset, int src_offset, int nr_items)
667{
668 int ret;
669 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
670 nr_items, GFP_NOFS);
671 BUG_ON(ret < 0);
672}
673
674static inline void
675tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
676 struct extent_buffer *eb,
677 struct btrfs_disk_key *disk_key, int slot, int atomic)
678{
679 int ret;
680
681 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
682 MOD_LOG_KEY_REPLACE,
683 atomic ? GFP_ATOMIC : GFP_NOFS);
684 BUG_ON(ret < 0);
685}
686
687static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
688 struct extent_buffer *eb)
689{
690 int i;
691 int ret;
692 u32 nritems;
693
694 smp_mb();
695 if (list_empty(&fs_info->tree_mod_seq_list))
696 return;
697
698 if (btrfs_header_level(eb) == 0)
699 return;
700
701 nritems = btrfs_header_nritems(eb);
702 for (i = nritems - 1; i >= 0; i--) {
703 ret = tree_mod_log_insert_key(fs_info, eb, i,
704 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
705 BUG_ON(ret < 0);
706 }
707}
708
709static inline void
710tree_mod_log_set_root_pointer(struct btrfs_root *root,
711 struct extent_buffer *new_root_node)
712{
713 int ret;
714 tree_mod_log_free_eb(root->fs_info, root->node);
715 ret = tree_mod_log_insert_root(root->fs_info, root->node,
716 new_root_node, GFP_NOFS);
717 BUG_ON(ret < 0);
718}
719
5d4f98a2
YZ
720/*
721 * check if the tree block can be shared by multiple trees
722 */
723int btrfs_block_can_be_shared(struct btrfs_root *root,
724 struct extent_buffer *buf)
725{
726 /*
727 * Tree blocks not in refernece counted trees and tree roots
728 * are never shared. If a block was allocated after the last
729 * snapshot and the block was not allocated by tree relocation,
730 * we know the block is not shared.
731 */
732 if (root->ref_cows &&
733 buf != root->node && buf != root->commit_root &&
734 (btrfs_header_generation(buf) <=
735 btrfs_root_last_snapshot(&root->root_item) ||
736 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
737 return 1;
738#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
739 if (root->ref_cows &&
740 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
741 return 1;
742#endif
743 return 0;
744}
745
746static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
747 struct btrfs_root *root,
748 struct extent_buffer *buf,
f0486c68
YZ
749 struct extent_buffer *cow,
750 int *last_ref)
5d4f98a2
YZ
751{
752 u64 refs;
753 u64 owner;
754 u64 flags;
755 u64 new_flags = 0;
756 int ret;
757
758 /*
759 * Backrefs update rules:
760 *
761 * Always use full backrefs for extent pointers in tree block
762 * allocated by tree relocation.
763 *
764 * If a shared tree block is no longer referenced by its owner
765 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
766 * use full backrefs for extent pointers in tree block.
767 *
768 * If a tree block is been relocating
769 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
770 * use full backrefs for extent pointers in tree block.
771 * The reason for this is some operations (such as drop tree)
772 * are only allowed for blocks use full backrefs.
773 */
774
775 if (btrfs_block_can_be_shared(root, buf)) {
776 ret = btrfs_lookup_extent_info(trans, root, buf->start,
777 buf->len, &refs, &flags);
be1a5564
MF
778 if (ret)
779 return ret;
e5df9573
MF
780 if (refs == 0) {
781 ret = -EROFS;
782 btrfs_std_error(root->fs_info, ret);
783 return ret;
784 }
5d4f98a2
YZ
785 } else {
786 refs = 1;
787 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
788 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
789 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
790 else
791 flags = 0;
792 }
793
794 owner = btrfs_header_owner(buf);
795 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
796 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
797
798 if (refs > 1) {
799 if ((owner == root->root_key.objectid ||
800 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
801 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
66d7e7f0 802 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
79787eaa 803 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
804
805 if (root->root_key.objectid ==
806 BTRFS_TREE_RELOC_OBJECTID) {
66d7e7f0 807 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
79787eaa 808 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 809 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
79787eaa 810 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
811 }
812 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
813 } else {
814
815 if (root->root_key.objectid ==
816 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 817 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 818 else
66d7e7f0 819 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 820 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
821 }
822 if (new_flags != 0) {
823 ret = btrfs_set_disk_extent_flags(trans, root,
824 buf->start,
825 buf->len,
826 new_flags, 0);
be1a5564
MF
827 if (ret)
828 return ret;
5d4f98a2
YZ
829 }
830 } else {
831 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
832 if (root->root_key.objectid ==
833 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 834 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 835 else
66d7e7f0 836 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 837 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 838 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
79787eaa 839 BUG_ON(ret); /* -ENOMEM */
5d4f98a2 840 }
f230475e
JS
841 /*
842 * don't log freeing in case we're freeing the root node, this
843 * is done by tree_mod_log_set_root_pointer later
844 */
845 if (buf != root->node && btrfs_header_level(buf) != 0)
846 tree_mod_log_free_eb(root->fs_info, buf);
5d4f98a2 847 clean_tree_block(trans, root, buf);
f0486c68 848 *last_ref = 1;
5d4f98a2
YZ
849 }
850 return 0;
851}
852
d352ac68 853/*
d397712b
CM
854 * does the dirty work in cow of a single block. The parent block (if
855 * supplied) is updated to point to the new cow copy. The new buffer is marked
856 * dirty and returned locked. If you modify the block it needs to be marked
857 * dirty again.
d352ac68
CM
858 *
859 * search_start -- an allocation hint for the new block
860 *
d397712b
CM
861 * empty_size -- a hint that you plan on doing more cow. This is the size in
862 * bytes the allocator should try to find free next to the block it returns.
863 * This is just a hint and may be ignored by the allocator.
d352ac68 864 */
d397712b 865static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
866 struct btrfs_root *root,
867 struct extent_buffer *buf,
868 struct extent_buffer *parent, int parent_slot,
869 struct extent_buffer **cow_ret,
9fa8cfe7 870 u64 search_start, u64 empty_size)
02217ed2 871{
5d4f98a2 872 struct btrfs_disk_key disk_key;
5f39d397 873 struct extent_buffer *cow;
be1a5564 874 int level, ret;
f0486c68 875 int last_ref = 0;
925baedd 876 int unlock_orig = 0;
5d4f98a2 877 u64 parent_start;
7bb86316 878
925baedd
CM
879 if (*cow_ret == buf)
880 unlock_orig = 1;
881
b9447ef8 882 btrfs_assert_tree_locked(buf);
925baedd 883
7bb86316
CM
884 WARN_ON(root->ref_cows && trans->transid !=
885 root->fs_info->running_transaction->transid);
6702ed49 886 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 887
7bb86316 888 level = btrfs_header_level(buf);
31840ae1 889
5d4f98a2
YZ
890 if (level == 0)
891 btrfs_item_key(buf, &disk_key, 0);
892 else
893 btrfs_node_key(buf, &disk_key, 0);
894
895 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
896 if (parent)
897 parent_start = parent->start;
898 else
899 parent_start = 0;
900 } else
901 parent_start = 0;
902
903 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
904 root->root_key.objectid, &disk_key,
5581a51a 905 level, search_start, empty_size);
54aa1f4d
CM
906 if (IS_ERR(cow))
907 return PTR_ERR(cow);
6702ed49 908
b4ce94de
CM
909 /* cow is set to blocking by btrfs_init_new_buffer */
910
5f39d397 911 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 912 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 913 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
914 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
915 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
916 BTRFS_HEADER_FLAG_RELOC);
917 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
918 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
919 else
920 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 921
2b82032c
YZ
922 write_extent_buffer(cow, root->fs_info->fsid,
923 (unsigned long)btrfs_header_fsid(cow),
924 BTRFS_FSID_SIZE);
925
be1a5564 926 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 927 if (ret) {
79787eaa 928 btrfs_abort_transaction(trans, root, ret);
b68dc2a9
MF
929 return ret;
930 }
1a40e23b 931
3fd0a558
YZ
932 if (root->ref_cows)
933 btrfs_reloc_cow_block(trans, root, buf, cow);
934
02217ed2 935 if (buf == root->node) {
925baedd 936 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
937 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
938 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
939 parent_start = buf->start;
940 else
941 parent_start = 0;
925baedd 942
5f39d397 943 extent_buffer_get(cow);
f230475e 944 tree_mod_log_set_root_pointer(root, cow);
240f62c8 945 rcu_assign_pointer(root->node, cow);
925baedd 946
f0486c68 947 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 948 last_ref);
5f39d397 949 free_extent_buffer(buf);
0b86a832 950 add_root_to_dirty_list(root);
02217ed2 951 } else {
5d4f98a2
YZ
952 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
953 parent_start = parent->start;
954 else
955 parent_start = 0;
956
957 WARN_ON(trans->transid != btrfs_header_generation(parent));
f230475e
JS
958 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
959 MOD_LOG_KEY_REPLACE);
5f39d397 960 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 961 cow->start);
74493f7a
CM
962 btrfs_set_node_ptr_generation(parent, parent_slot,
963 trans->transid);
d6025579 964 btrfs_mark_buffer_dirty(parent);
f0486c68 965 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 966 last_ref);
02217ed2 967 }
925baedd
CM
968 if (unlock_orig)
969 btrfs_tree_unlock(buf);
3083ee2e 970 free_extent_buffer_stale(buf);
ccd467d6 971 btrfs_mark_buffer_dirty(cow);
2c90e5d6 972 *cow_ret = cow;
02217ed2
CM
973 return 0;
974}
975
5d9e75c4
JS
976/*
977 * returns the logical address of the oldest predecessor of the given root.
978 * entries older than time_seq are ignored.
979 */
980static struct tree_mod_elem *
981__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
982 struct btrfs_root *root, u64 time_seq)
983{
984 struct tree_mod_elem *tm;
985 struct tree_mod_elem *found = NULL;
986 u64 root_logical = root->node->start;
987 int looped = 0;
988
989 if (!time_seq)
990 return 0;
991
992 /*
993 * the very last operation that's logged for a root is the replacement
994 * operation (if it is replaced at all). this has the index of the *new*
995 * root, making it the very first operation that's logged for this root.
996 */
997 while (1) {
998 tm = tree_mod_log_search_oldest(fs_info, root_logical,
999 time_seq);
1000 if (!looped && !tm)
1001 return 0;
1002 /*
1003 * we must have key remove operations in the log before the
1004 * replace operation.
1005 */
1006 BUG_ON(!tm);
1007
1008 if (tm->op != MOD_LOG_ROOT_REPLACE)
1009 break;
1010
1011 found = tm;
1012 root_logical = tm->old_root.logical;
1013 BUG_ON(root_logical == root->node->start);
1014 looped = 1;
1015 }
1016
1017 return found;
1018}
1019
1020/*
1021 * tm is a pointer to the first operation to rewind within eb. then, all
1022 * previous operations will be rewinded (until we reach something older than
1023 * time_seq).
1024 */
1025static void
1026__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1027 struct tree_mod_elem *first_tm)
1028{
1029 u32 n;
1030 struct rb_node *next;
1031 struct tree_mod_elem *tm = first_tm;
1032 unsigned long o_dst;
1033 unsigned long o_src;
1034 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1035
1036 n = btrfs_header_nritems(eb);
1037 while (tm && tm->elem.seq >= time_seq) {
1038 /*
1039 * all the operations are recorded with the operator used for
1040 * the modification. as we're going backwards, we do the
1041 * opposite of each operation here.
1042 */
1043 switch (tm->op) {
1044 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1045 BUG_ON(tm->slot < n);
1046 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1047 case MOD_LOG_KEY_REMOVE:
1048 btrfs_set_node_key(eb, &tm->key, tm->slot);
1049 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1050 btrfs_set_node_ptr_generation(eb, tm->slot,
1051 tm->generation);
1052 n++;
1053 break;
1054 case MOD_LOG_KEY_REPLACE:
1055 BUG_ON(tm->slot >= n);
1056 btrfs_set_node_key(eb, &tm->key, tm->slot);
1057 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1058 btrfs_set_node_ptr_generation(eb, tm->slot,
1059 tm->generation);
1060 break;
1061 case MOD_LOG_KEY_ADD:
1062 if (tm->slot != n - 1) {
1063 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1064 o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1065 memmove_extent_buffer(eb, o_dst, o_src, p_size);
1066 }
1067 n--;
1068 break;
1069 case MOD_LOG_MOVE_KEYS:
1070 memmove_extent_buffer(eb, tm->slot, tm->move.dst_slot,
1071 tm->move.nr_items * p_size);
1072 break;
1073 case MOD_LOG_ROOT_REPLACE:
1074 /*
1075 * this operation is special. for roots, this must be
1076 * handled explicitly before rewinding.
1077 * for non-roots, this operation may exist if the node
1078 * was a root: root A -> child B; then A gets empty and
1079 * B is promoted to the new root. in the mod log, we'll
1080 * have a root-replace operation for B, a tree block
1081 * that is no root. we simply ignore that operation.
1082 */
1083 break;
1084 }
1085 next = rb_next(&tm->node);
1086 if (!next)
1087 break;
1088 tm = container_of(next, struct tree_mod_elem, node);
1089 if (tm->index != first_tm->index)
1090 break;
1091 }
1092 btrfs_set_header_nritems(eb, n);
1093}
1094
1095static struct extent_buffer *
1096tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1097 u64 time_seq)
1098{
1099 struct extent_buffer *eb_rewin;
1100 struct tree_mod_elem *tm;
1101
1102 if (!time_seq)
1103 return eb;
1104
1105 if (btrfs_header_level(eb) == 0)
1106 return eb;
1107
1108 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1109 if (!tm)
1110 return eb;
1111
1112 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1113 BUG_ON(tm->slot != 0);
1114 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1115 fs_info->tree_root->nodesize);
1116 BUG_ON(!eb_rewin);
1117 btrfs_set_header_bytenr(eb_rewin, eb->start);
1118 btrfs_set_header_backref_rev(eb_rewin,
1119 btrfs_header_backref_rev(eb));
1120 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1121 } else {
1122 eb_rewin = btrfs_clone_extent_buffer(eb);
1123 BUG_ON(!eb_rewin);
1124 }
1125
1126 extent_buffer_get(eb_rewin);
1127 free_extent_buffer(eb);
1128
1129 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1130
1131 return eb_rewin;
1132}
1133
1134static inline struct extent_buffer *
1135get_old_root(struct btrfs_root *root, u64 time_seq)
1136{
1137 struct tree_mod_elem *tm;
1138 struct extent_buffer *eb;
1139 struct tree_mod_root *old_root;
1140 u64 old_generation;
1141
1142 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1143 if (!tm)
1144 return root->node;
1145
1146 old_root = &tm->old_root;
1147 old_generation = tm->generation;
1148
1149 tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
1150 /*
1151 * there was an item in the log when __tree_mod_log_oldest_root
1152 * returned. this one must not go away, because the time_seq passed to
1153 * us must be blocking its removal.
1154 */
1155 BUG_ON(!tm);
1156
1157 if (old_root->logical == root->node->start) {
1158 /* there are logged operations for the current root */
1159 eb = btrfs_clone_extent_buffer(root->node);
1160 } else {
1161 /* there's a root replace operation for the current root */
1162 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
1163 root->nodesize);
1164 btrfs_set_header_bytenr(eb, eb->start);
1165 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1166 btrfs_set_header_owner(eb, root->root_key.objectid);
1167 }
1168 if (!eb)
1169 return NULL;
1170 btrfs_set_header_level(eb, old_root->level);
1171 btrfs_set_header_generation(eb, old_generation);
1172 __tree_mod_log_rewind(eb, time_seq, tm);
1173
1174 return eb;
1175}
1176
5d4f98a2
YZ
1177static inline int should_cow_block(struct btrfs_trans_handle *trans,
1178 struct btrfs_root *root,
1179 struct extent_buffer *buf)
1180{
f1ebcc74
LB
1181 /* ensure we can see the force_cow */
1182 smp_rmb();
1183
1184 /*
1185 * We do not need to cow a block if
1186 * 1) this block is not created or changed in this transaction;
1187 * 2) this block does not belong to TREE_RELOC tree;
1188 * 3) the root is not forced COW.
1189 *
1190 * What is forced COW:
1191 * when we create snapshot during commiting the transaction,
1192 * after we've finished coping src root, we must COW the shared
1193 * block to ensure the metadata consistency.
1194 */
5d4f98a2
YZ
1195 if (btrfs_header_generation(buf) == trans->transid &&
1196 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1197 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74
LB
1198 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1199 !root->force_cow)
5d4f98a2
YZ
1200 return 0;
1201 return 1;
1202}
1203
d352ac68
CM
1204/*
1205 * cows a single block, see __btrfs_cow_block for the real work.
1206 * This version of it has extra checks so that a block isn't cow'd more than
1207 * once per transaction, as long as it hasn't been written yet
1208 */
d397712b 1209noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
1210 struct btrfs_root *root, struct extent_buffer *buf,
1211 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 1212 struct extent_buffer **cow_ret)
6702ed49
CM
1213{
1214 u64 search_start;
f510cfec 1215 int ret;
dc17ff8f 1216
6702ed49 1217 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
1218 printk(KERN_CRIT "trans %llu running %llu\n",
1219 (unsigned long long)trans->transid,
1220 (unsigned long long)
6702ed49
CM
1221 root->fs_info->running_transaction->transid);
1222 WARN_ON(1);
1223 }
1224 if (trans->transid != root->fs_info->generation) {
d397712b
CM
1225 printk(KERN_CRIT "trans %llu running %llu\n",
1226 (unsigned long long)trans->transid,
1227 (unsigned long long)root->fs_info->generation);
6702ed49
CM
1228 WARN_ON(1);
1229 }
dc17ff8f 1230
5d4f98a2 1231 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
1232 *cow_ret = buf;
1233 return 0;
1234 }
c487685d 1235
0b86a832 1236 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
1237
1238 if (parent)
1239 btrfs_set_lock_blocking(parent);
1240 btrfs_set_lock_blocking(buf);
1241
f510cfec 1242 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 1243 parent_slot, cow_ret, search_start, 0);
1abe9b8a 1244
1245 trace_btrfs_cow_block(root, buf, *cow_ret);
1246
f510cfec 1247 return ret;
6702ed49
CM
1248}
1249
d352ac68
CM
1250/*
1251 * helper function for defrag to decide if two blocks pointed to by a
1252 * node are actually close by
1253 */
6b80053d 1254static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 1255{
6b80053d 1256 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 1257 return 1;
6b80053d 1258 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
1259 return 1;
1260 return 0;
1261}
1262
081e9573
CM
1263/*
1264 * compare two keys in a memcmp fashion
1265 */
1266static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1267{
1268 struct btrfs_key k1;
1269
1270 btrfs_disk_key_to_cpu(&k1, disk);
1271
20736aba 1272 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
1273}
1274
f3465ca4
JB
1275/*
1276 * same as comp_keys only with two btrfs_key's
1277 */
5d4f98a2 1278int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
1279{
1280 if (k1->objectid > k2->objectid)
1281 return 1;
1282 if (k1->objectid < k2->objectid)
1283 return -1;
1284 if (k1->type > k2->type)
1285 return 1;
1286 if (k1->type < k2->type)
1287 return -1;
1288 if (k1->offset > k2->offset)
1289 return 1;
1290 if (k1->offset < k2->offset)
1291 return -1;
1292 return 0;
1293}
081e9573 1294
d352ac68
CM
1295/*
1296 * this is used by the defrag code to go through all the
1297 * leaves pointed to by a node and reallocate them so that
1298 * disk order is close to key order
1299 */
6702ed49 1300int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1301 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
1302 int start_slot, int cache_only, u64 *last_ret,
1303 struct btrfs_key *progress)
6702ed49 1304{
6b80053d 1305 struct extent_buffer *cur;
6702ed49 1306 u64 blocknr;
ca7a79ad 1307 u64 gen;
e9d0b13b
CM
1308 u64 search_start = *last_ret;
1309 u64 last_block = 0;
6702ed49
CM
1310 u64 other;
1311 u32 parent_nritems;
6702ed49
CM
1312 int end_slot;
1313 int i;
1314 int err = 0;
f2183bde 1315 int parent_level;
6b80053d
CM
1316 int uptodate;
1317 u32 blocksize;
081e9573
CM
1318 int progress_passed = 0;
1319 struct btrfs_disk_key disk_key;
6702ed49 1320
5708b959
CM
1321 parent_level = btrfs_header_level(parent);
1322 if (cache_only && parent_level != 1)
1323 return 0;
1324
d397712b 1325 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 1326 WARN_ON(1);
d397712b 1327 if (trans->transid != root->fs_info->generation)
6702ed49 1328 WARN_ON(1);
86479a04 1329
6b80053d 1330 parent_nritems = btrfs_header_nritems(parent);
6b80053d 1331 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
1332 end_slot = parent_nritems;
1333
1334 if (parent_nritems == 1)
1335 return 0;
1336
b4ce94de
CM
1337 btrfs_set_lock_blocking(parent);
1338
6702ed49
CM
1339 for (i = start_slot; i < end_slot; i++) {
1340 int close = 1;
a6b6e75e 1341
081e9573
CM
1342 btrfs_node_key(parent, &disk_key, i);
1343 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1344 continue;
1345
1346 progress_passed = 1;
6b80053d 1347 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 1348 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
1349 if (last_block == 0)
1350 last_block = blocknr;
5708b959 1351
6702ed49 1352 if (i > 0) {
6b80053d
CM
1353 other = btrfs_node_blockptr(parent, i - 1);
1354 close = close_blocks(blocknr, other, blocksize);
6702ed49 1355 }
0ef3e66b 1356 if (!close && i < end_slot - 2) {
6b80053d
CM
1357 other = btrfs_node_blockptr(parent, i + 1);
1358 close = close_blocks(blocknr, other, blocksize);
6702ed49 1359 }
e9d0b13b
CM
1360 if (close) {
1361 last_block = blocknr;
6702ed49 1362 continue;
e9d0b13b 1363 }
6702ed49 1364
6b80053d
CM
1365 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1366 if (cur)
b9fab919 1367 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
6b80053d
CM
1368 else
1369 uptodate = 0;
5708b959 1370 if (!cur || !uptodate) {
6702ed49 1371 if (cache_only) {
6b80053d 1372 free_extent_buffer(cur);
6702ed49
CM
1373 continue;
1374 }
6b80053d
CM
1375 if (!cur) {
1376 cur = read_tree_block(root, blocknr,
ca7a79ad 1377 blocksize, gen);
97d9a8a4
TI
1378 if (!cur)
1379 return -EIO;
6b80053d 1380 } else if (!uptodate) {
ca7a79ad 1381 btrfs_read_buffer(cur, gen);
f2183bde 1382 }
6702ed49 1383 }
e9d0b13b 1384 if (search_start == 0)
6b80053d 1385 search_start = last_block;
e9d0b13b 1386
e7a84565 1387 btrfs_tree_lock(cur);
b4ce94de 1388 btrfs_set_lock_blocking(cur);
6b80053d 1389 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 1390 &cur, search_start,
6b80053d 1391 min(16 * blocksize,
9fa8cfe7 1392 (end_slot - i) * blocksize));
252c38f0 1393 if (err) {
e7a84565 1394 btrfs_tree_unlock(cur);
6b80053d 1395 free_extent_buffer(cur);
6702ed49 1396 break;
252c38f0 1397 }
e7a84565
CM
1398 search_start = cur->start;
1399 last_block = cur->start;
f2183bde 1400 *last_ret = search_start;
e7a84565
CM
1401 btrfs_tree_unlock(cur);
1402 free_extent_buffer(cur);
6702ed49
CM
1403 }
1404 return err;
1405}
1406
74123bd7
CM
1407/*
1408 * The leaf data grows from end-to-front in the node.
1409 * this returns the address of the start of the last item,
1410 * which is the stop of the leaf data stack
1411 */
123abc88 1412static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 1413 struct extent_buffer *leaf)
be0e5c09 1414{
5f39d397 1415 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 1416 if (nr == 0)
123abc88 1417 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 1418 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
1419}
1420
aa5d6bed 1421
74123bd7 1422/*
5f39d397
CM
1423 * search for key in the extent_buffer. The items start at offset p,
1424 * and they are item_size apart. There are 'max' items in p.
1425 *
74123bd7
CM
1426 * the slot in the array is returned via slot, and it points to
1427 * the place where you would insert key if it is not found in
1428 * the array.
1429 *
1430 * slot may point to max if the key is bigger than all of the keys
1431 */
e02119d5
CM
1432static noinline int generic_bin_search(struct extent_buffer *eb,
1433 unsigned long p,
1434 int item_size, struct btrfs_key *key,
1435 int max, int *slot)
be0e5c09
CM
1436{
1437 int low = 0;
1438 int high = max;
1439 int mid;
1440 int ret;
479965d6 1441 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
1442 struct btrfs_disk_key unaligned;
1443 unsigned long offset;
5f39d397
CM
1444 char *kaddr = NULL;
1445 unsigned long map_start = 0;
1446 unsigned long map_len = 0;
479965d6 1447 int err;
be0e5c09 1448
d397712b 1449 while (low < high) {
be0e5c09 1450 mid = (low + high) / 2;
5f39d397
CM
1451 offset = p + mid * item_size;
1452
a6591715 1453 if (!kaddr || offset < map_start ||
5f39d397
CM
1454 (offset + sizeof(struct btrfs_disk_key)) >
1455 map_start + map_len) {
934d375b
CM
1456
1457 err = map_private_extent_buffer(eb, offset,
479965d6 1458 sizeof(struct btrfs_disk_key),
a6591715 1459 &kaddr, &map_start, &map_len);
479965d6
CM
1460
1461 if (!err) {
1462 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1463 map_start);
1464 } else {
1465 read_extent_buffer(eb, &unaligned,
1466 offset, sizeof(unaligned));
1467 tmp = &unaligned;
1468 }
5f39d397 1469
5f39d397
CM
1470 } else {
1471 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1472 map_start);
1473 }
be0e5c09
CM
1474 ret = comp_keys(tmp, key);
1475
1476 if (ret < 0)
1477 low = mid + 1;
1478 else if (ret > 0)
1479 high = mid;
1480 else {
1481 *slot = mid;
1482 return 0;
1483 }
1484 }
1485 *slot = low;
1486 return 1;
1487}
1488
97571fd0
CM
1489/*
1490 * simple bin_search frontend that does the right thing for
1491 * leaves vs nodes
1492 */
5f39d397
CM
1493static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1494 int level, int *slot)
be0e5c09 1495{
5f39d397
CM
1496 if (level == 0) {
1497 return generic_bin_search(eb,
1498 offsetof(struct btrfs_leaf, items),
0783fcfc 1499 sizeof(struct btrfs_item),
5f39d397 1500 key, btrfs_header_nritems(eb),
7518a238 1501 slot);
be0e5c09 1502 } else {
5f39d397
CM
1503 return generic_bin_search(eb,
1504 offsetof(struct btrfs_node, ptrs),
123abc88 1505 sizeof(struct btrfs_key_ptr),
5f39d397 1506 key, btrfs_header_nritems(eb),
7518a238 1507 slot);
be0e5c09
CM
1508 }
1509 return -1;
1510}
1511
5d4f98a2
YZ
1512int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1513 int level, int *slot)
1514{
1515 return bin_search(eb, key, level, slot);
1516}
1517
f0486c68
YZ
1518static void root_add_used(struct btrfs_root *root, u32 size)
1519{
1520 spin_lock(&root->accounting_lock);
1521 btrfs_set_root_used(&root->root_item,
1522 btrfs_root_used(&root->root_item) + size);
1523 spin_unlock(&root->accounting_lock);
1524}
1525
1526static void root_sub_used(struct btrfs_root *root, u32 size)
1527{
1528 spin_lock(&root->accounting_lock);
1529 btrfs_set_root_used(&root->root_item,
1530 btrfs_root_used(&root->root_item) - size);
1531 spin_unlock(&root->accounting_lock);
1532}
1533
d352ac68
CM
1534/* given a node and slot number, this reads the blocks it points to. The
1535 * extent buffer is returned with a reference taken (but unlocked).
1536 * NULL is returned on error.
1537 */
e02119d5 1538static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 1539 struct extent_buffer *parent, int slot)
bb803951 1540{
ca7a79ad 1541 int level = btrfs_header_level(parent);
bb803951
CM
1542 if (slot < 0)
1543 return NULL;
5f39d397 1544 if (slot >= btrfs_header_nritems(parent))
bb803951 1545 return NULL;
ca7a79ad
CM
1546
1547 BUG_ON(level == 0);
1548
db94535d 1549 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
1550 btrfs_level_size(root, level - 1),
1551 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
1552}
1553
d352ac68
CM
1554/*
1555 * node level balancing, used to make sure nodes are in proper order for
1556 * item deletion. We balance from the top down, so we have to make sure
1557 * that a deletion won't leave an node completely empty later on.
1558 */
e02119d5 1559static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
1560 struct btrfs_root *root,
1561 struct btrfs_path *path, int level)
bb803951 1562{
5f39d397
CM
1563 struct extent_buffer *right = NULL;
1564 struct extent_buffer *mid;
1565 struct extent_buffer *left = NULL;
1566 struct extent_buffer *parent = NULL;
bb803951
CM
1567 int ret = 0;
1568 int wret;
1569 int pslot;
bb803951 1570 int orig_slot = path->slots[level];
79f95c82 1571 u64 orig_ptr;
bb803951
CM
1572
1573 if (level == 0)
1574 return 0;
1575
5f39d397 1576 mid = path->nodes[level];
b4ce94de 1577
bd681513
CM
1578 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1579 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
7bb86316
CM
1580 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1581
1d4f8a0c 1582 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1583
a05a9bb1 1584 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1585 parent = path->nodes[level + 1];
a05a9bb1
LZ
1586 pslot = path->slots[level + 1];
1587 }
bb803951 1588
40689478
CM
1589 /*
1590 * deal with the case where there is only one pointer in the root
1591 * by promoting the node below to a root
1592 */
5f39d397
CM
1593 if (!parent) {
1594 struct extent_buffer *child;
bb803951 1595
5f39d397 1596 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1597 return 0;
1598
1599 /* promote the child to a root */
5f39d397 1600 child = read_node_slot(root, mid, 0);
305a26af
MF
1601 if (!child) {
1602 ret = -EROFS;
1603 btrfs_std_error(root->fs_info, ret);
1604 goto enospc;
1605 }
1606
925baedd 1607 btrfs_tree_lock(child);
b4ce94de 1608 btrfs_set_lock_blocking(child);
9fa8cfe7 1609 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
1610 if (ret) {
1611 btrfs_tree_unlock(child);
1612 free_extent_buffer(child);
1613 goto enospc;
1614 }
2f375ab9 1615
f230475e 1616 tree_mod_log_set_root_pointer(root, child);
240f62c8 1617 rcu_assign_pointer(root->node, child);
925baedd 1618
0b86a832 1619 add_root_to_dirty_list(root);
925baedd 1620 btrfs_tree_unlock(child);
b4ce94de 1621
925baedd 1622 path->locks[level] = 0;
bb803951 1623 path->nodes[level] = NULL;
5f39d397 1624 clean_tree_block(trans, root, mid);
925baedd 1625 btrfs_tree_unlock(mid);
bb803951 1626 /* once for the path */
5f39d397 1627 free_extent_buffer(mid);
f0486c68
YZ
1628
1629 root_sub_used(root, mid->len);
5581a51a 1630 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 1631 /* once for the root ptr */
3083ee2e 1632 free_extent_buffer_stale(mid);
f0486c68 1633 return 0;
bb803951 1634 }
5f39d397 1635 if (btrfs_header_nritems(mid) >
123abc88 1636 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
1637 return 0;
1638
559af821 1639 btrfs_header_nritems(mid);
54aa1f4d 1640
5f39d397
CM
1641 left = read_node_slot(root, parent, pslot - 1);
1642 if (left) {
925baedd 1643 btrfs_tree_lock(left);
b4ce94de 1644 btrfs_set_lock_blocking(left);
5f39d397 1645 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1646 parent, pslot - 1, &left);
54aa1f4d
CM
1647 if (wret) {
1648 ret = wret;
1649 goto enospc;
1650 }
2cc58cf2 1651 }
5f39d397
CM
1652 right = read_node_slot(root, parent, pslot + 1);
1653 if (right) {
925baedd 1654 btrfs_tree_lock(right);
b4ce94de 1655 btrfs_set_lock_blocking(right);
5f39d397 1656 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1657 parent, pslot + 1, &right);
2cc58cf2
CM
1658 if (wret) {
1659 ret = wret;
1660 goto enospc;
1661 }
1662 }
1663
1664 /* first, try to make some room in the middle buffer */
5f39d397
CM
1665 if (left) {
1666 orig_slot += btrfs_header_nritems(left);
bce4eae9 1667 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1668 if (wret < 0)
1669 ret = wret;
559af821 1670 btrfs_header_nritems(mid);
bb803951 1671 }
79f95c82
CM
1672
1673 /*
1674 * then try to empty the right most buffer into the middle
1675 */
5f39d397 1676 if (right) {
971a1f66 1677 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1678 if (wret < 0 && wret != -ENOSPC)
79f95c82 1679 ret = wret;
5f39d397 1680 if (btrfs_header_nritems(right) == 0) {
5f39d397 1681 clean_tree_block(trans, root, right);
925baedd 1682 btrfs_tree_unlock(right);
f3ea38da 1683 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
f0486c68 1684 root_sub_used(root, right->len);
5581a51a 1685 btrfs_free_tree_block(trans, root, right, 0, 1);
3083ee2e 1686 free_extent_buffer_stale(right);
f0486c68 1687 right = NULL;
bb803951 1688 } else {
5f39d397
CM
1689 struct btrfs_disk_key right_key;
1690 btrfs_node_key(right, &right_key, 0);
f230475e
JS
1691 tree_mod_log_set_node_key(root->fs_info, parent,
1692 &right_key, pslot + 1, 0);
5f39d397
CM
1693 btrfs_set_node_key(parent, &right_key, pslot + 1);
1694 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1695 }
1696 }
5f39d397 1697 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1698 /*
1699 * we're not allowed to leave a node with one item in the
1700 * tree during a delete. A deletion from lower in the tree
1701 * could try to delete the only pointer in this node.
1702 * So, pull some keys from the left.
1703 * There has to be a left pointer at this point because
1704 * otherwise we would have pulled some pointers from the
1705 * right
1706 */
305a26af
MF
1707 if (!left) {
1708 ret = -EROFS;
1709 btrfs_std_error(root->fs_info, ret);
1710 goto enospc;
1711 }
5f39d397 1712 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1713 if (wret < 0) {
79f95c82 1714 ret = wret;
54aa1f4d
CM
1715 goto enospc;
1716 }
bce4eae9
CM
1717 if (wret == 1) {
1718 wret = push_node_left(trans, root, left, mid, 1);
1719 if (wret < 0)
1720 ret = wret;
1721 }
79f95c82
CM
1722 BUG_ON(wret == 1);
1723 }
5f39d397 1724 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1725 clean_tree_block(trans, root, mid);
925baedd 1726 btrfs_tree_unlock(mid);
f3ea38da 1727 del_ptr(trans, root, path, level + 1, pslot, 1);
f0486c68 1728 root_sub_used(root, mid->len);
5581a51a 1729 btrfs_free_tree_block(trans, root, mid, 0, 1);
3083ee2e 1730 free_extent_buffer_stale(mid);
f0486c68 1731 mid = NULL;
79f95c82
CM
1732 } else {
1733 /* update the parent key to reflect our changes */
5f39d397
CM
1734 struct btrfs_disk_key mid_key;
1735 btrfs_node_key(mid, &mid_key, 0);
f230475e
JS
1736 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1737 pslot, 0);
5f39d397
CM
1738 btrfs_set_node_key(parent, &mid_key, pslot);
1739 btrfs_mark_buffer_dirty(parent);
79f95c82 1740 }
bb803951 1741
79f95c82 1742 /* update the path */
5f39d397
CM
1743 if (left) {
1744 if (btrfs_header_nritems(left) > orig_slot) {
1745 extent_buffer_get(left);
925baedd 1746 /* left was locked after cow */
5f39d397 1747 path->nodes[level] = left;
bb803951
CM
1748 path->slots[level + 1] -= 1;
1749 path->slots[level] = orig_slot;
925baedd
CM
1750 if (mid) {
1751 btrfs_tree_unlock(mid);
5f39d397 1752 free_extent_buffer(mid);
925baedd 1753 }
bb803951 1754 } else {
5f39d397 1755 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1756 path->slots[level] = orig_slot;
1757 }
1758 }
79f95c82 1759 /* double check we haven't messed things up */
e20d96d6 1760 if (orig_ptr !=
5f39d397 1761 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1762 BUG();
54aa1f4d 1763enospc:
925baedd
CM
1764 if (right) {
1765 btrfs_tree_unlock(right);
5f39d397 1766 free_extent_buffer(right);
925baedd
CM
1767 }
1768 if (left) {
1769 if (path->nodes[level] != left)
1770 btrfs_tree_unlock(left);
5f39d397 1771 free_extent_buffer(left);
925baedd 1772 }
bb803951
CM
1773 return ret;
1774}
1775
d352ac68
CM
1776/* Node balancing for insertion. Here we only split or push nodes around
1777 * when they are completely full. This is also done top down, so we
1778 * have to be pessimistic.
1779 */
d397712b 1780static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1781 struct btrfs_root *root,
1782 struct btrfs_path *path, int level)
e66f709b 1783{
5f39d397
CM
1784 struct extent_buffer *right = NULL;
1785 struct extent_buffer *mid;
1786 struct extent_buffer *left = NULL;
1787 struct extent_buffer *parent = NULL;
e66f709b
CM
1788 int ret = 0;
1789 int wret;
1790 int pslot;
1791 int orig_slot = path->slots[level];
e66f709b
CM
1792
1793 if (level == 0)
1794 return 1;
1795
5f39d397 1796 mid = path->nodes[level];
7bb86316 1797 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 1798
a05a9bb1 1799 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1800 parent = path->nodes[level + 1];
a05a9bb1
LZ
1801 pslot = path->slots[level + 1];
1802 }
e66f709b 1803
5f39d397 1804 if (!parent)
e66f709b 1805 return 1;
e66f709b 1806
5f39d397 1807 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1808
1809 /* first, try to make some room in the middle buffer */
5f39d397 1810 if (left) {
e66f709b 1811 u32 left_nr;
925baedd
CM
1812
1813 btrfs_tree_lock(left);
b4ce94de
CM
1814 btrfs_set_lock_blocking(left);
1815
5f39d397 1816 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1817 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1818 wret = 1;
1819 } else {
5f39d397 1820 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1821 pslot - 1, &left);
54aa1f4d
CM
1822 if (ret)
1823 wret = 1;
1824 else {
54aa1f4d 1825 wret = push_node_left(trans, root,
971a1f66 1826 left, mid, 0);
54aa1f4d 1827 }
33ade1f8 1828 }
e66f709b
CM
1829 if (wret < 0)
1830 ret = wret;
1831 if (wret == 0) {
5f39d397 1832 struct btrfs_disk_key disk_key;
e66f709b 1833 orig_slot += left_nr;
5f39d397 1834 btrfs_node_key(mid, &disk_key, 0);
f230475e
JS
1835 tree_mod_log_set_node_key(root->fs_info, parent,
1836 &disk_key, pslot, 0);
5f39d397
CM
1837 btrfs_set_node_key(parent, &disk_key, pslot);
1838 btrfs_mark_buffer_dirty(parent);
1839 if (btrfs_header_nritems(left) > orig_slot) {
1840 path->nodes[level] = left;
e66f709b
CM
1841 path->slots[level + 1] -= 1;
1842 path->slots[level] = orig_slot;
925baedd 1843 btrfs_tree_unlock(mid);
5f39d397 1844 free_extent_buffer(mid);
e66f709b
CM
1845 } else {
1846 orig_slot -=
5f39d397 1847 btrfs_header_nritems(left);
e66f709b 1848 path->slots[level] = orig_slot;
925baedd 1849 btrfs_tree_unlock(left);
5f39d397 1850 free_extent_buffer(left);
e66f709b 1851 }
e66f709b
CM
1852 return 0;
1853 }
925baedd 1854 btrfs_tree_unlock(left);
5f39d397 1855 free_extent_buffer(left);
e66f709b 1856 }
925baedd 1857 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1858
1859 /*
1860 * then try to empty the right most buffer into the middle
1861 */
5f39d397 1862 if (right) {
33ade1f8 1863 u32 right_nr;
b4ce94de 1864
925baedd 1865 btrfs_tree_lock(right);
b4ce94de
CM
1866 btrfs_set_lock_blocking(right);
1867
5f39d397 1868 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1869 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1870 wret = 1;
1871 } else {
5f39d397
CM
1872 ret = btrfs_cow_block(trans, root, right,
1873 parent, pslot + 1,
9fa8cfe7 1874 &right);
54aa1f4d
CM
1875 if (ret)
1876 wret = 1;
1877 else {
54aa1f4d 1878 wret = balance_node_right(trans, root,
5f39d397 1879 right, mid);
54aa1f4d 1880 }
33ade1f8 1881 }
e66f709b
CM
1882 if (wret < 0)
1883 ret = wret;
1884 if (wret == 0) {
5f39d397
CM
1885 struct btrfs_disk_key disk_key;
1886
1887 btrfs_node_key(right, &disk_key, 0);
f230475e
JS
1888 tree_mod_log_set_node_key(root->fs_info, parent,
1889 &disk_key, pslot + 1, 0);
5f39d397
CM
1890 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1891 btrfs_mark_buffer_dirty(parent);
1892
1893 if (btrfs_header_nritems(mid) <= orig_slot) {
1894 path->nodes[level] = right;
e66f709b
CM
1895 path->slots[level + 1] += 1;
1896 path->slots[level] = orig_slot -
5f39d397 1897 btrfs_header_nritems(mid);
925baedd 1898 btrfs_tree_unlock(mid);
5f39d397 1899 free_extent_buffer(mid);
e66f709b 1900 } else {
925baedd 1901 btrfs_tree_unlock(right);
5f39d397 1902 free_extent_buffer(right);
e66f709b 1903 }
e66f709b
CM
1904 return 0;
1905 }
925baedd 1906 btrfs_tree_unlock(right);
5f39d397 1907 free_extent_buffer(right);
e66f709b 1908 }
e66f709b
CM
1909 return 1;
1910}
1911
3c69faec 1912/*
d352ac68
CM
1913 * readahead one full node of leaves, finding things that are close
1914 * to the block in 'slot', and triggering ra on them.
3c69faec 1915 */
c8c42864
CM
1916static void reada_for_search(struct btrfs_root *root,
1917 struct btrfs_path *path,
1918 int level, int slot, u64 objectid)
3c69faec 1919{
5f39d397 1920 struct extent_buffer *node;
01f46658 1921 struct btrfs_disk_key disk_key;
3c69faec 1922 u32 nritems;
3c69faec 1923 u64 search;
a7175319 1924 u64 target;
6b80053d 1925 u64 nread = 0;
cb25c2ea 1926 u64 gen;
3c69faec 1927 int direction = path->reada;
5f39d397 1928 struct extent_buffer *eb;
6b80053d
CM
1929 u32 nr;
1930 u32 blocksize;
1931 u32 nscan = 0;
db94535d 1932
a6b6e75e 1933 if (level != 1)
6702ed49
CM
1934 return;
1935
1936 if (!path->nodes[level])
3c69faec
CM
1937 return;
1938
5f39d397 1939 node = path->nodes[level];
925baedd 1940
3c69faec 1941 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1942 blocksize = btrfs_level_size(root, level - 1);
1943 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1944 if (eb) {
1945 free_extent_buffer(eb);
3c69faec
CM
1946 return;
1947 }
1948
a7175319 1949 target = search;
6b80053d 1950
5f39d397 1951 nritems = btrfs_header_nritems(node);
6b80053d 1952 nr = slot;
25b8b936 1953
d397712b 1954 while (1) {
6b80053d
CM
1955 if (direction < 0) {
1956 if (nr == 0)
1957 break;
1958 nr--;
1959 } else if (direction > 0) {
1960 nr++;
1961 if (nr >= nritems)
1962 break;
3c69faec 1963 }
01f46658
CM
1964 if (path->reada < 0 && objectid) {
1965 btrfs_node_key(node, &disk_key, nr);
1966 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1967 break;
1968 }
6b80053d 1969 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1970 if ((search <= target && target - search <= 65536) ||
1971 (search > target && search - target <= 65536)) {
cb25c2ea 1972 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 1973 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
1974 nread += blocksize;
1975 }
1976 nscan++;
a7175319 1977 if ((nread > 65536 || nscan > 32))
6b80053d 1978 break;
3c69faec
CM
1979 }
1980}
925baedd 1981
b4ce94de
CM
1982/*
1983 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1984 * cache
1985 */
1986static noinline int reada_for_balance(struct btrfs_root *root,
1987 struct btrfs_path *path, int level)
1988{
1989 int slot;
1990 int nritems;
1991 struct extent_buffer *parent;
1992 struct extent_buffer *eb;
1993 u64 gen;
1994 u64 block1 = 0;
1995 u64 block2 = 0;
1996 int ret = 0;
1997 int blocksize;
1998
8c594ea8 1999 parent = path->nodes[level + 1];
b4ce94de
CM
2000 if (!parent)
2001 return 0;
2002
2003 nritems = btrfs_header_nritems(parent);
8c594ea8 2004 slot = path->slots[level + 1];
b4ce94de
CM
2005 blocksize = btrfs_level_size(root, level);
2006
2007 if (slot > 0) {
2008 block1 = btrfs_node_blockptr(parent, slot - 1);
2009 gen = btrfs_node_ptr_generation(parent, slot - 1);
2010 eb = btrfs_find_tree_block(root, block1, blocksize);
b9fab919
CM
2011 /*
2012 * if we get -eagain from btrfs_buffer_uptodate, we
2013 * don't want to return eagain here. That will loop
2014 * forever
2015 */
2016 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2017 block1 = 0;
2018 free_extent_buffer(eb);
2019 }
8c594ea8 2020 if (slot + 1 < nritems) {
b4ce94de
CM
2021 block2 = btrfs_node_blockptr(parent, slot + 1);
2022 gen = btrfs_node_ptr_generation(parent, slot + 1);
2023 eb = btrfs_find_tree_block(root, block2, blocksize);
b9fab919 2024 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2025 block2 = 0;
2026 free_extent_buffer(eb);
2027 }
2028 if (block1 || block2) {
2029 ret = -EAGAIN;
8c594ea8
CM
2030
2031 /* release the whole path */
b3b4aa74 2032 btrfs_release_path(path);
8c594ea8
CM
2033
2034 /* read the blocks */
b4ce94de
CM
2035 if (block1)
2036 readahead_tree_block(root, block1, blocksize, 0);
2037 if (block2)
2038 readahead_tree_block(root, block2, blocksize, 0);
2039
2040 if (block1) {
2041 eb = read_tree_block(root, block1, blocksize, 0);
2042 free_extent_buffer(eb);
2043 }
8c594ea8 2044 if (block2) {
b4ce94de
CM
2045 eb = read_tree_block(root, block2, blocksize, 0);
2046 free_extent_buffer(eb);
2047 }
2048 }
2049 return ret;
2050}
2051
2052
d352ac68 2053/*
d397712b
CM
2054 * when we walk down the tree, it is usually safe to unlock the higher layers
2055 * in the tree. The exceptions are when our path goes through slot 0, because
2056 * operations on the tree might require changing key pointers higher up in the
2057 * tree.
d352ac68 2058 *
d397712b
CM
2059 * callers might also have set path->keep_locks, which tells this code to keep
2060 * the lock if the path points to the last slot in the block. This is part of
2061 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 2062 *
d397712b
CM
2063 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2064 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 2065 */
e02119d5 2066static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
2067 int lowest_unlock, int min_write_lock_level,
2068 int *write_lock_level)
925baedd
CM
2069{
2070 int i;
2071 int skip_level = level;
051e1b9f 2072 int no_skips = 0;
925baedd
CM
2073 struct extent_buffer *t;
2074
2075 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2076 if (!path->nodes[i])
2077 break;
2078 if (!path->locks[i])
2079 break;
051e1b9f 2080 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
2081 skip_level = i + 1;
2082 continue;
2083 }
051e1b9f 2084 if (!no_skips && path->keep_locks) {
925baedd
CM
2085 u32 nritems;
2086 t = path->nodes[i];
2087 nritems = btrfs_header_nritems(t);
051e1b9f 2088 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
2089 skip_level = i + 1;
2090 continue;
2091 }
2092 }
051e1b9f
CM
2093 if (skip_level < i && i >= lowest_unlock)
2094 no_skips = 1;
2095
925baedd
CM
2096 t = path->nodes[i];
2097 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
bd681513 2098 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 2099 path->locks[i] = 0;
f7c79f30
CM
2100 if (write_lock_level &&
2101 i > min_write_lock_level &&
2102 i <= *write_lock_level) {
2103 *write_lock_level = i - 1;
2104 }
925baedd
CM
2105 }
2106 }
2107}
2108
b4ce94de
CM
2109/*
2110 * This releases any locks held in the path starting at level and
2111 * going all the way up to the root.
2112 *
2113 * btrfs_search_slot will keep the lock held on higher nodes in a few
2114 * corner cases, such as COW of the block at slot zero in the node. This
2115 * ignores those rules, and it should only be called when there are no
2116 * more updates to be done higher up in the tree.
2117 */
2118noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2119{
2120 int i;
2121
5d4f98a2 2122 if (path->keep_locks)
b4ce94de
CM
2123 return;
2124
2125 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2126 if (!path->nodes[i])
12f4dacc 2127 continue;
b4ce94de 2128 if (!path->locks[i])
12f4dacc 2129 continue;
bd681513 2130 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
b4ce94de
CM
2131 path->locks[i] = 0;
2132 }
2133}
2134
c8c42864
CM
2135/*
2136 * helper function for btrfs_search_slot. The goal is to find a block
2137 * in cache without setting the path to blocking. If we find the block
2138 * we return zero and the path is unchanged.
2139 *
2140 * If we can't find the block, we set the path blocking and do some
2141 * reada. -EAGAIN is returned and the search must be repeated.
2142 */
2143static int
2144read_block_for_search(struct btrfs_trans_handle *trans,
2145 struct btrfs_root *root, struct btrfs_path *p,
2146 struct extent_buffer **eb_ret, int level, int slot,
5d9e75c4 2147 struct btrfs_key *key, u64 time_seq)
c8c42864
CM
2148{
2149 u64 blocknr;
2150 u64 gen;
2151 u32 blocksize;
2152 struct extent_buffer *b = *eb_ret;
2153 struct extent_buffer *tmp;
76a05b35 2154 int ret;
c8c42864
CM
2155
2156 blocknr = btrfs_node_blockptr(b, slot);
2157 gen = btrfs_node_ptr_generation(b, slot);
2158 blocksize = btrfs_level_size(root, level - 1);
2159
2160 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a 2161 if (tmp) {
b9fab919
CM
2162 /* first we do an atomic uptodate check */
2163 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2164 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
cb44921a
CM
2165 /*
2166 * we found an up to date block without
2167 * sleeping, return
2168 * right away
2169 */
2170 *eb_ret = tmp;
2171 return 0;
2172 }
2173 /* the pages were up to date, but we failed
2174 * the generation number check. Do a full
2175 * read for the generation number that is correct.
2176 * We must do this without dropping locks so
2177 * we can trust our generation number
2178 */
2179 free_extent_buffer(tmp);
bd681513
CM
2180 btrfs_set_path_blocking(p);
2181
b9fab919 2182 /* now we're allowed to do a blocking uptodate check */
cb44921a 2183 tmp = read_tree_block(root, blocknr, blocksize, gen);
b9fab919 2184 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
cb44921a
CM
2185 *eb_ret = tmp;
2186 return 0;
2187 }
2188 free_extent_buffer(tmp);
b3b4aa74 2189 btrfs_release_path(p);
cb44921a
CM
2190 return -EIO;
2191 }
c8c42864
CM
2192 }
2193
2194 /*
2195 * reduce lock contention at high levels
2196 * of the btree by dropping locks before
76a05b35
CM
2197 * we read. Don't release the lock on the current
2198 * level because we need to walk this node to figure
2199 * out which blocks to read.
c8c42864 2200 */
8c594ea8
CM
2201 btrfs_unlock_up_safe(p, level + 1);
2202 btrfs_set_path_blocking(p);
2203
cb44921a 2204 free_extent_buffer(tmp);
c8c42864
CM
2205 if (p->reada)
2206 reada_for_search(root, p, level, slot, key->objectid);
2207
b3b4aa74 2208 btrfs_release_path(p);
76a05b35
CM
2209
2210 ret = -EAGAIN;
5bdd3536 2211 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
2212 if (tmp) {
2213 /*
2214 * If the read above didn't mark this buffer up to date,
2215 * it will never end up being up to date. Set ret to EIO now
2216 * and give up so that our caller doesn't loop forever
2217 * on our EAGAINs.
2218 */
b9fab919 2219 if (!btrfs_buffer_uptodate(tmp, 0, 0))
76a05b35 2220 ret = -EIO;
c8c42864 2221 free_extent_buffer(tmp);
76a05b35
CM
2222 }
2223 return ret;
c8c42864
CM
2224}
2225
2226/*
2227 * helper function for btrfs_search_slot. This does all of the checks
2228 * for node-level blocks and does any balancing required based on
2229 * the ins_len.
2230 *
2231 * If no extra work was required, zero is returned. If we had to
2232 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2233 * start over
2234 */
2235static int
2236setup_nodes_for_search(struct btrfs_trans_handle *trans,
2237 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
2238 struct extent_buffer *b, int level, int ins_len,
2239 int *write_lock_level)
c8c42864
CM
2240{
2241 int ret;
2242 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2243 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2244 int sret;
2245
bd681513
CM
2246 if (*write_lock_level < level + 1) {
2247 *write_lock_level = level + 1;
2248 btrfs_release_path(p);
2249 goto again;
2250 }
2251
c8c42864
CM
2252 sret = reada_for_balance(root, p, level);
2253 if (sret)
2254 goto again;
2255
2256 btrfs_set_path_blocking(p);
2257 sret = split_node(trans, root, p, level);
bd681513 2258 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2259
2260 BUG_ON(sret > 0);
2261 if (sret) {
2262 ret = sret;
2263 goto done;
2264 }
2265 b = p->nodes[level];
2266 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 2267 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
2268 int sret;
2269
bd681513
CM
2270 if (*write_lock_level < level + 1) {
2271 *write_lock_level = level + 1;
2272 btrfs_release_path(p);
2273 goto again;
2274 }
2275
c8c42864
CM
2276 sret = reada_for_balance(root, p, level);
2277 if (sret)
2278 goto again;
2279
2280 btrfs_set_path_blocking(p);
2281 sret = balance_level(trans, root, p, level);
bd681513 2282 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2283
2284 if (sret) {
2285 ret = sret;
2286 goto done;
2287 }
2288 b = p->nodes[level];
2289 if (!b) {
b3b4aa74 2290 btrfs_release_path(p);
c8c42864
CM
2291 goto again;
2292 }
2293 BUG_ON(btrfs_header_nritems(b) == 1);
2294 }
2295 return 0;
2296
2297again:
2298 ret = -EAGAIN;
2299done:
2300 return ret;
2301}
2302
74123bd7
CM
2303/*
2304 * look for key in the tree. path is filled in with nodes along the way
2305 * if key is found, we return zero and you can find the item in the leaf
2306 * level of the path (level 0)
2307 *
2308 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
2309 * be inserted, and 1 is returned. If there are other errors during the
2310 * search a negative error number is returned.
97571fd0
CM
2311 *
2312 * if ins_len > 0, nodes and leaves will be split as we walk down the
2313 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2314 * possible)
74123bd7 2315 */
e089f05c
CM
2316int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2317 *root, struct btrfs_key *key, struct btrfs_path *p, int
2318 ins_len, int cow)
be0e5c09 2319{
5f39d397 2320 struct extent_buffer *b;
be0e5c09
CM
2321 int slot;
2322 int ret;
33c66f43 2323 int err;
be0e5c09 2324 int level;
925baedd 2325 int lowest_unlock = 1;
bd681513
CM
2326 int root_lock;
2327 /* everything at write_lock_level or lower must be write locked */
2328 int write_lock_level = 0;
9f3a7427 2329 u8 lowest_level = 0;
f7c79f30 2330 int min_write_lock_level;
9f3a7427 2331
6702ed49 2332 lowest_level = p->lowest_level;
323ac95b 2333 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 2334 WARN_ON(p->nodes[0] != NULL);
25179201 2335
bd681513 2336 if (ins_len < 0) {
925baedd 2337 lowest_unlock = 2;
65b51a00 2338
bd681513
CM
2339 /* when we are removing items, we might have to go up to level
2340 * two as we update tree pointers Make sure we keep write
2341 * for those levels as well
2342 */
2343 write_lock_level = 2;
2344 } else if (ins_len > 0) {
2345 /*
2346 * for inserting items, make sure we have a write lock on
2347 * level 1 so we can update keys
2348 */
2349 write_lock_level = 1;
2350 }
2351
2352 if (!cow)
2353 write_lock_level = -1;
2354
2355 if (cow && (p->keep_locks || p->lowest_level))
2356 write_lock_level = BTRFS_MAX_LEVEL;
2357
f7c79f30
CM
2358 min_write_lock_level = write_lock_level;
2359
bb803951 2360again:
bd681513
CM
2361 /*
2362 * we try very hard to do read locks on the root
2363 */
2364 root_lock = BTRFS_READ_LOCK;
2365 level = 0;
5d4f98a2 2366 if (p->search_commit_root) {
bd681513
CM
2367 /*
2368 * the commit roots are read only
2369 * so we always do read locks
2370 */
5d4f98a2
YZ
2371 b = root->commit_root;
2372 extent_buffer_get(b);
bd681513 2373 level = btrfs_header_level(b);
5d4f98a2 2374 if (!p->skip_locking)
bd681513 2375 btrfs_tree_read_lock(b);
5d4f98a2 2376 } else {
bd681513 2377 if (p->skip_locking) {
5d4f98a2 2378 b = btrfs_root_node(root);
bd681513
CM
2379 level = btrfs_header_level(b);
2380 } else {
2381 /* we don't know the level of the root node
2382 * until we actually have it read locked
2383 */
2384 b = btrfs_read_lock_root_node(root);
2385 level = btrfs_header_level(b);
2386 if (level <= write_lock_level) {
2387 /* whoops, must trade for write lock */
2388 btrfs_tree_read_unlock(b);
2389 free_extent_buffer(b);
2390 b = btrfs_lock_root_node(root);
2391 root_lock = BTRFS_WRITE_LOCK;
2392
2393 /* the level might have changed, check again */
2394 level = btrfs_header_level(b);
2395 }
2396 }
5d4f98a2 2397 }
bd681513
CM
2398 p->nodes[level] = b;
2399 if (!p->skip_locking)
2400 p->locks[level] = root_lock;
925baedd 2401
eb60ceac 2402 while (b) {
5f39d397 2403 level = btrfs_header_level(b);
65b51a00
CM
2404
2405 /*
2406 * setup the path here so we can release it under lock
2407 * contention with the cow code
2408 */
02217ed2 2409 if (cow) {
c8c42864
CM
2410 /*
2411 * if we don't really need to cow this block
2412 * then we don't want to set the path blocking,
2413 * so we test it here
2414 */
5d4f98a2 2415 if (!should_cow_block(trans, root, b))
65b51a00 2416 goto cow_done;
5d4f98a2 2417
b4ce94de
CM
2418 btrfs_set_path_blocking(p);
2419
bd681513
CM
2420 /*
2421 * must have write locks on this node and the
2422 * parent
2423 */
2424 if (level + 1 > write_lock_level) {
2425 write_lock_level = level + 1;
2426 btrfs_release_path(p);
2427 goto again;
2428 }
2429
33c66f43
YZ
2430 err = btrfs_cow_block(trans, root, b,
2431 p->nodes[level + 1],
2432 p->slots[level + 1], &b);
2433 if (err) {
33c66f43 2434 ret = err;
65b51a00 2435 goto done;
54aa1f4d 2436 }
02217ed2 2437 }
65b51a00 2438cow_done:
02217ed2 2439 BUG_ON(!cow && ins_len);
65b51a00 2440
eb60ceac 2441 p->nodes[level] = b;
bd681513 2442 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de
CM
2443
2444 /*
2445 * we have a lock on b and as long as we aren't changing
2446 * the tree, there is no way to for the items in b to change.
2447 * It is safe to drop the lock on our parent before we
2448 * go through the expensive btree search on b.
2449 *
2450 * If cow is true, then we might be changing slot zero,
2451 * which may require changing the parent. So, we can't
2452 * drop the lock until after we know which slot we're
2453 * operating on.
2454 */
2455 if (!cow)
2456 btrfs_unlock_up_safe(p, level + 1);
2457
5f39d397 2458 ret = bin_search(b, key, level, &slot);
b4ce94de 2459
5f39d397 2460 if (level != 0) {
33c66f43
YZ
2461 int dec = 0;
2462 if (ret && slot > 0) {
2463 dec = 1;
be0e5c09 2464 slot -= 1;
33c66f43 2465 }
be0e5c09 2466 p->slots[level] = slot;
33c66f43 2467 err = setup_nodes_for_search(trans, root, p, b, level,
bd681513 2468 ins_len, &write_lock_level);
33c66f43 2469 if (err == -EAGAIN)
c8c42864 2470 goto again;
33c66f43
YZ
2471 if (err) {
2472 ret = err;
c8c42864 2473 goto done;
33c66f43 2474 }
c8c42864
CM
2475 b = p->nodes[level];
2476 slot = p->slots[level];
b4ce94de 2477
bd681513
CM
2478 /*
2479 * slot 0 is special, if we change the key
2480 * we have to update the parent pointer
2481 * which means we must have a write lock
2482 * on the parent
2483 */
2484 if (slot == 0 && cow &&
2485 write_lock_level < level + 1) {
2486 write_lock_level = level + 1;
2487 btrfs_release_path(p);
2488 goto again;
2489 }
2490
f7c79f30
CM
2491 unlock_up(p, level, lowest_unlock,
2492 min_write_lock_level, &write_lock_level);
f9efa9c7 2493
925baedd 2494 if (level == lowest_level) {
33c66f43
YZ
2495 if (dec)
2496 p->slots[level]++;
5b21f2ed 2497 goto done;
925baedd 2498 }
ca7a79ad 2499
33c66f43 2500 err = read_block_for_search(trans, root, p,
5d9e75c4 2501 &b, level, slot, key, 0);
33c66f43 2502 if (err == -EAGAIN)
c8c42864 2503 goto again;
33c66f43
YZ
2504 if (err) {
2505 ret = err;
76a05b35 2506 goto done;
33c66f43 2507 }
76a05b35 2508
b4ce94de 2509 if (!p->skip_locking) {
bd681513
CM
2510 level = btrfs_header_level(b);
2511 if (level <= write_lock_level) {
2512 err = btrfs_try_tree_write_lock(b);
2513 if (!err) {
2514 btrfs_set_path_blocking(p);
2515 btrfs_tree_lock(b);
2516 btrfs_clear_path_blocking(p, b,
2517 BTRFS_WRITE_LOCK);
2518 }
2519 p->locks[level] = BTRFS_WRITE_LOCK;
2520 } else {
2521 err = btrfs_try_tree_read_lock(b);
2522 if (!err) {
2523 btrfs_set_path_blocking(p);
2524 btrfs_tree_read_lock(b);
2525 btrfs_clear_path_blocking(p, b,
2526 BTRFS_READ_LOCK);
2527 }
2528 p->locks[level] = BTRFS_READ_LOCK;
b4ce94de 2529 }
bd681513 2530 p->nodes[level] = b;
b4ce94de 2531 }
be0e5c09
CM
2532 } else {
2533 p->slots[level] = slot;
87b29b20
YZ
2534 if (ins_len > 0 &&
2535 btrfs_leaf_free_space(root, b) < ins_len) {
bd681513
CM
2536 if (write_lock_level < 1) {
2537 write_lock_level = 1;
2538 btrfs_release_path(p);
2539 goto again;
2540 }
2541
b4ce94de 2542 btrfs_set_path_blocking(p);
33c66f43
YZ
2543 err = split_leaf(trans, root, key,
2544 p, ins_len, ret == 0);
bd681513 2545 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de 2546
33c66f43
YZ
2547 BUG_ON(err > 0);
2548 if (err) {
2549 ret = err;
65b51a00
CM
2550 goto done;
2551 }
5c680ed6 2552 }
459931ec 2553 if (!p->search_for_split)
f7c79f30
CM
2554 unlock_up(p, level, lowest_unlock,
2555 min_write_lock_level, &write_lock_level);
65b51a00 2556 goto done;
be0e5c09
CM
2557 }
2558 }
65b51a00
CM
2559 ret = 1;
2560done:
b4ce94de
CM
2561 /*
2562 * we don't really know what they plan on doing with the path
2563 * from here on, so for now just mark it as blocking
2564 */
b9473439
CM
2565 if (!p->leave_spinning)
2566 btrfs_set_path_blocking(p);
76a05b35 2567 if (ret < 0)
b3b4aa74 2568 btrfs_release_path(p);
65b51a00 2569 return ret;
be0e5c09
CM
2570}
2571
5d9e75c4
JS
2572/*
2573 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2574 * current state of the tree together with the operations recorded in the tree
2575 * modification log to search for the key in a previous version of this tree, as
2576 * denoted by the time_seq parameter.
2577 *
2578 * Naturally, there is no support for insert, delete or cow operations.
2579 *
2580 * The resulting path and return value will be set up as if we called
2581 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2582 */
2583int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2584 struct btrfs_path *p, u64 time_seq)
2585{
2586 struct extent_buffer *b;
2587 int slot;
2588 int ret;
2589 int err;
2590 int level;
2591 int lowest_unlock = 1;
2592 u8 lowest_level = 0;
2593
2594 lowest_level = p->lowest_level;
2595 WARN_ON(p->nodes[0] != NULL);
2596
2597 if (p->search_commit_root) {
2598 BUG_ON(time_seq);
2599 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2600 }
2601
2602again:
2603 level = 0;
2604 b = get_old_root(root, time_seq);
2605 extent_buffer_get(b);
2606 level = btrfs_header_level(b);
2607 btrfs_tree_read_lock(b);
2608 p->locks[level] = BTRFS_READ_LOCK;
2609
2610 while (b) {
2611 level = btrfs_header_level(b);
2612 p->nodes[level] = b;
2613 btrfs_clear_path_blocking(p, NULL, 0);
2614
2615 /*
2616 * we have a lock on b and as long as we aren't changing
2617 * the tree, there is no way to for the items in b to change.
2618 * It is safe to drop the lock on our parent before we
2619 * go through the expensive btree search on b.
2620 */
2621 btrfs_unlock_up_safe(p, level + 1);
2622
2623 ret = bin_search(b, key, level, &slot);
2624
2625 if (level != 0) {
2626 int dec = 0;
2627 if (ret && slot > 0) {
2628 dec = 1;
2629 slot -= 1;
2630 }
2631 p->slots[level] = slot;
2632 unlock_up(p, level, lowest_unlock, 0, NULL);
2633
2634 if (level == lowest_level) {
2635 if (dec)
2636 p->slots[level]++;
2637 goto done;
2638 }
2639
2640 err = read_block_for_search(NULL, root, p, &b, level,
2641 slot, key, time_seq);
2642 if (err == -EAGAIN)
2643 goto again;
2644 if (err) {
2645 ret = err;
2646 goto done;
2647 }
2648
2649 level = btrfs_header_level(b);
2650 err = btrfs_try_tree_read_lock(b);
2651 if (!err) {
2652 btrfs_set_path_blocking(p);
2653 btrfs_tree_read_lock(b);
2654 btrfs_clear_path_blocking(p, b,
2655 BTRFS_READ_LOCK);
2656 }
2657 p->locks[level] = BTRFS_READ_LOCK;
2658 p->nodes[level] = b;
2659 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2660 if (b != p->nodes[level]) {
2661 btrfs_tree_unlock_rw(p->nodes[level],
2662 p->locks[level]);
2663 p->locks[level] = 0;
2664 p->nodes[level] = b;
2665 }
2666 } else {
2667 p->slots[level] = slot;
2668 unlock_up(p, level, lowest_unlock, 0, NULL);
2669 goto done;
2670 }
2671 }
2672 ret = 1;
2673done:
2674 if (!p->leave_spinning)
2675 btrfs_set_path_blocking(p);
2676 if (ret < 0)
2677 btrfs_release_path(p);
2678
2679 return ret;
2680}
2681
74123bd7
CM
2682/*
2683 * adjust the pointers going up the tree, starting at level
2684 * making sure the right key of each node is points to 'key'.
2685 * This is used after shifting pointers to the left, so it stops
2686 * fixing up pointers when a given leaf/node is not in slot 0 of the
2687 * higher levels
aa5d6bed 2688 *
74123bd7 2689 */
143bede5
JM
2690static void fixup_low_keys(struct btrfs_trans_handle *trans,
2691 struct btrfs_root *root, struct btrfs_path *path,
2692 struct btrfs_disk_key *key, int level)
be0e5c09
CM
2693{
2694 int i;
5f39d397
CM
2695 struct extent_buffer *t;
2696
234b63a0 2697 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 2698 int tslot = path->slots[i];
eb60ceac 2699 if (!path->nodes[i])
be0e5c09 2700 break;
5f39d397 2701 t = path->nodes[i];
f230475e 2702 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
5f39d397 2703 btrfs_set_node_key(t, key, tslot);
d6025579 2704 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
2705 if (tslot != 0)
2706 break;
2707 }
2708}
2709
31840ae1
ZY
2710/*
2711 * update item key.
2712 *
2713 * This function isn't completely safe. It's the caller's responsibility
2714 * that the new key won't break the order
2715 */
143bede5
JM
2716void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2717 struct btrfs_root *root, struct btrfs_path *path,
2718 struct btrfs_key *new_key)
31840ae1
ZY
2719{
2720 struct btrfs_disk_key disk_key;
2721 struct extent_buffer *eb;
2722 int slot;
2723
2724 eb = path->nodes[0];
2725 slot = path->slots[0];
2726 if (slot > 0) {
2727 btrfs_item_key(eb, &disk_key, slot - 1);
143bede5 2728 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
31840ae1
ZY
2729 }
2730 if (slot < btrfs_header_nritems(eb) - 1) {
2731 btrfs_item_key(eb, &disk_key, slot + 1);
143bede5 2732 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
31840ae1
ZY
2733 }
2734
2735 btrfs_cpu_key_to_disk(&disk_key, new_key);
2736 btrfs_set_item_key(eb, &disk_key, slot);
2737 btrfs_mark_buffer_dirty(eb);
2738 if (slot == 0)
2739 fixup_low_keys(trans, root, path, &disk_key, 1);
31840ae1
ZY
2740}
2741
74123bd7
CM
2742/*
2743 * try to push data from one node into the next node left in the
79f95c82 2744 * tree.
aa5d6bed
CM
2745 *
2746 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2747 * error, and > 0 if there was no room in the left hand block.
74123bd7 2748 */
98ed5174
CM
2749static int push_node_left(struct btrfs_trans_handle *trans,
2750 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 2751 struct extent_buffer *src, int empty)
be0e5c09 2752{
be0e5c09 2753 int push_items = 0;
bb803951
CM
2754 int src_nritems;
2755 int dst_nritems;
aa5d6bed 2756 int ret = 0;
be0e5c09 2757
5f39d397
CM
2758 src_nritems = btrfs_header_nritems(src);
2759 dst_nritems = btrfs_header_nritems(dst);
123abc88 2760 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
2761 WARN_ON(btrfs_header_generation(src) != trans->transid);
2762 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 2763
bce4eae9 2764 if (!empty && src_nritems <= 8)
971a1f66
CM
2765 return 1;
2766
d397712b 2767 if (push_items <= 0)
be0e5c09
CM
2768 return 1;
2769
bce4eae9 2770 if (empty) {
971a1f66 2771 push_items = min(src_nritems, push_items);
bce4eae9
CM
2772 if (push_items < src_nritems) {
2773 /* leave at least 8 pointers in the node if
2774 * we aren't going to empty it
2775 */
2776 if (src_nritems - push_items < 8) {
2777 if (push_items <= 8)
2778 return 1;
2779 push_items -= 8;
2780 }
2781 }
2782 } else
2783 push_items = min(src_nritems - 8, push_items);
79f95c82 2784
f230475e
JS
2785 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2786 push_items);
5f39d397
CM
2787 copy_extent_buffer(dst, src,
2788 btrfs_node_key_ptr_offset(dst_nritems),
2789 btrfs_node_key_ptr_offset(0),
d397712b 2790 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 2791
bb803951 2792 if (push_items < src_nritems) {
f230475e
JS
2793 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2794 src_nritems - push_items);
5f39d397
CM
2795 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2796 btrfs_node_key_ptr_offset(push_items),
2797 (src_nritems - push_items) *
2798 sizeof(struct btrfs_key_ptr));
2799 }
2800 btrfs_set_header_nritems(src, src_nritems - push_items);
2801 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2802 btrfs_mark_buffer_dirty(src);
2803 btrfs_mark_buffer_dirty(dst);
31840ae1 2804
79f95c82
CM
2805 return ret;
2806}
2807
2808/*
2809 * try to push data from one node into the next node right in the
2810 * tree.
2811 *
2812 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2813 * error, and > 0 if there was no room in the right hand block.
2814 *
2815 * this will only push up to 1/2 the contents of the left node over
2816 */
5f39d397
CM
2817static int balance_node_right(struct btrfs_trans_handle *trans,
2818 struct btrfs_root *root,
2819 struct extent_buffer *dst,
2820 struct extent_buffer *src)
79f95c82 2821{
79f95c82
CM
2822 int push_items = 0;
2823 int max_push;
2824 int src_nritems;
2825 int dst_nritems;
2826 int ret = 0;
79f95c82 2827
7bb86316
CM
2828 WARN_ON(btrfs_header_generation(src) != trans->transid);
2829 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2830
5f39d397
CM
2831 src_nritems = btrfs_header_nritems(src);
2832 dst_nritems = btrfs_header_nritems(dst);
123abc88 2833 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 2834 if (push_items <= 0)
79f95c82 2835 return 1;
bce4eae9 2836
d397712b 2837 if (src_nritems < 4)
bce4eae9 2838 return 1;
79f95c82
CM
2839
2840 max_push = src_nritems / 2 + 1;
2841 /* don't try to empty the node */
d397712b 2842 if (max_push >= src_nritems)
79f95c82 2843 return 1;
252c38f0 2844
79f95c82
CM
2845 if (max_push < push_items)
2846 push_items = max_push;
2847
f230475e 2848 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
5f39d397
CM
2849 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2850 btrfs_node_key_ptr_offset(0),
2851 (dst_nritems) *
2852 sizeof(struct btrfs_key_ptr));
d6025579 2853
f230475e
JS
2854 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2855 src_nritems - push_items, push_items);
5f39d397
CM
2856 copy_extent_buffer(dst, src,
2857 btrfs_node_key_ptr_offset(0),
2858 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 2859 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 2860
5f39d397
CM
2861 btrfs_set_header_nritems(src, src_nritems - push_items);
2862 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 2863
5f39d397
CM
2864 btrfs_mark_buffer_dirty(src);
2865 btrfs_mark_buffer_dirty(dst);
31840ae1 2866
aa5d6bed 2867 return ret;
be0e5c09
CM
2868}
2869
97571fd0
CM
2870/*
2871 * helper function to insert a new root level in the tree.
2872 * A new node is allocated, and a single item is inserted to
2873 * point to the existing root
aa5d6bed
CM
2874 *
2875 * returns zero on success or < 0 on failure.
97571fd0 2876 */
d397712b 2877static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
2878 struct btrfs_root *root,
2879 struct btrfs_path *path, int level)
5c680ed6 2880{
7bb86316 2881 u64 lower_gen;
5f39d397
CM
2882 struct extent_buffer *lower;
2883 struct extent_buffer *c;
925baedd 2884 struct extent_buffer *old;
5f39d397 2885 struct btrfs_disk_key lower_key;
5c680ed6
CM
2886
2887 BUG_ON(path->nodes[level]);
2888 BUG_ON(path->nodes[level-1] != root->node);
2889
7bb86316
CM
2890 lower = path->nodes[level-1];
2891 if (level == 1)
2892 btrfs_item_key(lower, &lower_key, 0);
2893 else
2894 btrfs_node_key(lower, &lower_key, 0);
2895
31840ae1 2896 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 2897 root->root_key.objectid, &lower_key,
5581a51a 2898 level, root->node->start, 0);
5f39d397
CM
2899 if (IS_ERR(c))
2900 return PTR_ERR(c);
925baedd 2901
f0486c68
YZ
2902 root_add_used(root, root->nodesize);
2903
5d4f98a2 2904 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
2905 btrfs_set_header_nritems(c, 1);
2906 btrfs_set_header_level(c, level);
db94535d 2907 btrfs_set_header_bytenr(c, c->start);
5f39d397 2908 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 2909 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 2910 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
2911
2912 write_extent_buffer(c, root->fs_info->fsid,
2913 (unsigned long)btrfs_header_fsid(c),
2914 BTRFS_FSID_SIZE);
e17cade2
CM
2915
2916 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2917 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2918 BTRFS_UUID_SIZE);
2919
5f39d397 2920 btrfs_set_node_key(c, &lower_key, 0);
db94535d 2921 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 2922 lower_gen = btrfs_header_generation(lower);
31840ae1 2923 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
2924
2925 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 2926
5f39d397 2927 btrfs_mark_buffer_dirty(c);
d5719762 2928
925baedd 2929 old = root->node;
f230475e 2930 tree_mod_log_set_root_pointer(root, c);
240f62c8 2931 rcu_assign_pointer(root->node, c);
925baedd
CM
2932
2933 /* the super has an extra ref to root->node */
2934 free_extent_buffer(old);
2935
0b86a832 2936 add_root_to_dirty_list(root);
5f39d397
CM
2937 extent_buffer_get(c);
2938 path->nodes[level] = c;
bd681513 2939 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
2940 path->slots[level] = 0;
2941 return 0;
2942}
2943
74123bd7
CM
2944/*
2945 * worker function to insert a single pointer in a node.
2946 * the node should have enough room for the pointer already
97571fd0 2947 *
74123bd7
CM
2948 * slot and level indicate where you want the key to go, and
2949 * blocknr is the block the key points to.
2950 */
143bede5
JM
2951static void insert_ptr(struct btrfs_trans_handle *trans,
2952 struct btrfs_root *root, struct btrfs_path *path,
2953 struct btrfs_disk_key *key, u64 bytenr,
f3ea38da 2954 int slot, int level, int tree_mod_log)
74123bd7 2955{
5f39d397 2956 struct extent_buffer *lower;
74123bd7 2957 int nritems;
f3ea38da 2958 int ret;
5c680ed6
CM
2959
2960 BUG_ON(!path->nodes[level]);
f0486c68 2961 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
2962 lower = path->nodes[level];
2963 nritems = btrfs_header_nritems(lower);
c293498b 2964 BUG_ON(slot > nritems);
143bede5 2965 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
74123bd7 2966 if (slot != nritems) {
f3ea38da
JS
2967 if (tree_mod_log && level)
2968 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
2969 slot, nritems - slot);
5f39d397
CM
2970 memmove_extent_buffer(lower,
2971 btrfs_node_key_ptr_offset(slot + 1),
2972 btrfs_node_key_ptr_offset(slot),
d6025579 2973 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2974 }
f3ea38da
JS
2975 if (tree_mod_log && level) {
2976 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
2977 MOD_LOG_KEY_ADD);
2978 BUG_ON(ret < 0);
2979 }
5f39d397 2980 btrfs_set_node_key(lower, key, slot);
db94535d 2981 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
2982 WARN_ON(trans->transid == 0);
2983 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
2984 btrfs_set_header_nritems(lower, nritems + 1);
2985 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
2986}
2987
97571fd0
CM
2988/*
2989 * split the node at the specified level in path in two.
2990 * The path is corrected to point to the appropriate node after the split
2991 *
2992 * Before splitting this tries to make some room in the node by pushing
2993 * left and right, if either one works, it returns right away.
aa5d6bed
CM
2994 *
2995 * returns 0 on success and < 0 on failure
97571fd0 2996 */
e02119d5
CM
2997static noinline int split_node(struct btrfs_trans_handle *trans,
2998 struct btrfs_root *root,
2999 struct btrfs_path *path, int level)
be0e5c09 3000{
5f39d397
CM
3001 struct extent_buffer *c;
3002 struct extent_buffer *split;
3003 struct btrfs_disk_key disk_key;
be0e5c09 3004 int mid;
5c680ed6 3005 int ret;
7518a238 3006 u32 c_nritems;
eb60ceac 3007
5f39d397 3008 c = path->nodes[level];
7bb86316 3009 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 3010 if (c == root->node) {
5c680ed6 3011 /* trying to split the root, lets make a new one */
e089f05c 3012 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
3013 if (ret)
3014 return ret;
b3612421 3015 } else {
e66f709b 3016 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
3017 c = path->nodes[level];
3018 if (!ret && btrfs_header_nritems(c) <
c448acf0 3019 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 3020 return 0;
54aa1f4d
CM
3021 if (ret < 0)
3022 return ret;
be0e5c09 3023 }
e66f709b 3024
5f39d397 3025 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
3026 mid = (c_nritems + 1) / 2;
3027 btrfs_node_key(c, &disk_key, mid);
7bb86316 3028
5d4f98a2 3029 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 3030 root->root_key.objectid,
5581a51a 3031 &disk_key, level, c->start, 0);
5f39d397
CM
3032 if (IS_ERR(split))
3033 return PTR_ERR(split);
3034
f0486c68
YZ
3035 root_add_used(root, root->nodesize);
3036
5d4f98a2 3037 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 3038 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 3039 btrfs_set_header_bytenr(split, split->start);
5f39d397 3040 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 3041 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3042 btrfs_set_header_owner(split, root->root_key.objectid);
3043 write_extent_buffer(split, root->fs_info->fsid,
3044 (unsigned long)btrfs_header_fsid(split),
3045 BTRFS_FSID_SIZE);
e17cade2
CM
3046 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3047 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3048 BTRFS_UUID_SIZE);
54aa1f4d 3049
f230475e 3050 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
5f39d397
CM
3051 copy_extent_buffer(split, c,
3052 btrfs_node_key_ptr_offset(0),
3053 btrfs_node_key_ptr_offset(mid),
3054 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3055 btrfs_set_header_nritems(split, c_nritems - mid);
3056 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
3057 ret = 0;
3058
5f39d397
CM
3059 btrfs_mark_buffer_dirty(c);
3060 btrfs_mark_buffer_dirty(split);
3061
143bede5 3062 insert_ptr(trans, root, path, &disk_key, split->start,
f3ea38da 3063 path->slots[level + 1] + 1, level + 1, 1);
aa5d6bed 3064
5de08d7d 3065 if (path->slots[level] >= mid) {
5c680ed6 3066 path->slots[level] -= mid;
925baedd 3067 btrfs_tree_unlock(c);
5f39d397
CM
3068 free_extent_buffer(c);
3069 path->nodes[level] = split;
5c680ed6
CM
3070 path->slots[level + 1] += 1;
3071 } else {
925baedd 3072 btrfs_tree_unlock(split);
5f39d397 3073 free_extent_buffer(split);
be0e5c09 3074 }
aa5d6bed 3075 return ret;
be0e5c09
CM
3076}
3077
74123bd7
CM
3078/*
3079 * how many bytes are required to store the items in a leaf. start
3080 * and nr indicate which items in the leaf to check. This totals up the
3081 * space used both by the item structs and the item data
3082 */
5f39d397 3083static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
3084{
3085 int data_len;
5f39d397 3086 int nritems = btrfs_header_nritems(l);
d4dbff95 3087 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
3088
3089 if (!nr)
3090 return 0;
5f39d397
CM
3091 data_len = btrfs_item_end_nr(l, start);
3092 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 3093 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 3094 WARN_ON(data_len < 0);
be0e5c09
CM
3095 return data_len;
3096}
3097
d4dbff95
CM
3098/*
3099 * The space between the end of the leaf items and
3100 * the start of the leaf data. IOW, how much room
3101 * the leaf has left for both items and data
3102 */
d397712b 3103noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 3104 struct extent_buffer *leaf)
d4dbff95 3105{
5f39d397
CM
3106 int nritems = btrfs_header_nritems(leaf);
3107 int ret;
3108 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3109 if (ret < 0) {
d397712b
CM
3110 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3111 "used %d nritems %d\n",
ae2f5411 3112 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
3113 leaf_space_used(leaf, 0, nritems), nritems);
3114 }
3115 return ret;
d4dbff95
CM
3116}
3117
99d8f83c
CM
3118/*
3119 * min slot controls the lowest index we're willing to push to the
3120 * right. We'll push up to and including min_slot, but no lower
3121 */
44871b1b
CM
3122static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3123 struct btrfs_root *root,
3124 struct btrfs_path *path,
3125 int data_size, int empty,
3126 struct extent_buffer *right,
99d8f83c
CM
3127 int free_space, u32 left_nritems,
3128 u32 min_slot)
00ec4c51 3129{
5f39d397 3130 struct extent_buffer *left = path->nodes[0];
44871b1b 3131 struct extent_buffer *upper = path->nodes[1];
cfed81a0 3132 struct btrfs_map_token token;
5f39d397 3133 struct btrfs_disk_key disk_key;
00ec4c51 3134 int slot;
34a38218 3135 u32 i;
00ec4c51
CM
3136 int push_space = 0;
3137 int push_items = 0;
0783fcfc 3138 struct btrfs_item *item;
34a38218 3139 u32 nr;
7518a238 3140 u32 right_nritems;
5f39d397 3141 u32 data_end;
db94535d 3142 u32 this_item_size;
00ec4c51 3143
cfed81a0
CM
3144 btrfs_init_map_token(&token);
3145
34a38218
CM
3146 if (empty)
3147 nr = 0;
3148 else
99d8f83c 3149 nr = max_t(u32, 1, min_slot);
34a38218 3150
31840ae1 3151 if (path->slots[0] >= left_nritems)
87b29b20 3152 push_space += data_size;
31840ae1 3153
44871b1b 3154 slot = path->slots[1];
34a38218
CM
3155 i = left_nritems - 1;
3156 while (i >= nr) {
5f39d397 3157 item = btrfs_item_nr(left, i);
db94535d 3158
31840ae1
ZY
3159 if (!empty && push_items > 0) {
3160 if (path->slots[0] > i)
3161 break;
3162 if (path->slots[0] == i) {
3163 int space = btrfs_leaf_free_space(root, left);
3164 if (space + push_space * 2 > free_space)
3165 break;
3166 }
3167 }
3168
00ec4c51 3169 if (path->slots[0] == i)
87b29b20 3170 push_space += data_size;
db94535d 3171
db94535d
CM
3172 this_item_size = btrfs_item_size(left, item);
3173 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 3174 break;
31840ae1 3175
00ec4c51 3176 push_items++;
db94535d 3177 push_space += this_item_size + sizeof(*item);
34a38218
CM
3178 if (i == 0)
3179 break;
3180 i--;
db94535d 3181 }
5f39d397 3182
925baedd
CM
3183 if (push_items == 0)
3184 goto out_unlock;
5f39d397 3185
34a38218 3186 if (!empty && push_items == left_nritems)
a429e513 3187 WARN_ON(1);
5f39d397 3188
00ec4c51 3189 /* push left to right */
5f39d397 3190 right_nritems = btrfs_header_nritems(right);
34a38218 3191
5f39d397 3192 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 3193 push_space -= leaf_data_end(root, left);
5f39d397 3194
00ec4c51 3195 /* make room in the right data area */
5f39d397
CM
3196 data_end = leaf_data_end(root, right);
3197 memmove_extent_buffer(right,
3198 btrfs_leaf_data(right) + data_end - push_space,
3199 btrfs_leaf_data(right) + data_end,
3200 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3201
00ec4c51 3202 /* copy from the left data area */
5f39d397 3203 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
3204 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3205 btrfs_leaf_data(left) + leaf_data_end(root, left),
3206 push_space);
5f39d397
CM
3207
3208 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3209 btrfs_item_nr_offset(0),
3210 right_nritems * sizeof(struct btrfs_item));
3211
00ec4c51 3212 /* copy the items from left to right */
5f39d397
CM
3213 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3214 btrfs_item_nr_offset(left_nritems - push_items),
3215 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
3216
3217 /* update the item pointers */
7518a238 3218 right_nritems += push_items;
5f39d397 3219 btrfs_set_header_nritems(right, right_nritems);
123abc88 3220 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 3221 for (i = 0; i < right_nritems; i++) {
5f39d397 3222 item = btrfs_item_nr(right, i);
cfed81a0
CM
3223 push_space -= btrfs_token_item_size(right, item, &token);
3224 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d
CM
3225 }
3226
7518a238 3227 left_nritems -= push_items;
5f39d397 3228 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 3229
34a38218
CM
3230 if (left_nritems)
3231 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
3232 else
3233 clean_tree_block(trans, root, left);
3234
5f39d397 3235 btrfs_mark_buffer_dirty(right);
a429e513 3236
5f39d397
CM
3237 btrfs_item_key(right, &disk_key, 0);
3238 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 3239 btrfs_mark_buffer_dirty(upper);
02217ed2 3240
00ec4c51 3241 /* then fixup the leaf pointer in the path */
7518a238
CM
3242 if (path->slots[0] >= left_nritems) {
3243 path->slots[0] -= left_nritems;
925baedd
CM
3244 if (btrfs_header_nritems(path->nodes[0]) == 0)
3245 clean_tree_block(trans, root, path->nodes[0]);
3246 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3247 free_extent_buffer(path->nodes[0]);
3248 path->nodes[0] = right;
00ec4c51
CM
3249 path->slots[1] += 1;
3250 } else {
925baedd 3251 btrfs_tree_unlock(right);
5f39d397 3252 free_extent_buffer(right);
00ec4c51
CM
3253 }
3254 return 0;
925baedd
CM
3255
3256out_unlock:
3257 btrfs_tree_unlock(right);
3258 free_extent_buffer(right);
3259 return 1;
00ec4c51 3260}
925baedd 3261
44871b1b
CM
3262/*
3263 * push some data in the path leaf to the right, trying to free up at
3264 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3265 *
3266 * returns 1 if the push failed because the other node didn't have enough
3267 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
3268 *
3269 * this will push starting from min_slot to the end of the leaf. It won't
3270 * push any slot lower than min_slot
44871b1b
CM
3271 */
3272static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3273 *root, struct btrfs_path *path,
3274 int min_data_size, int data_size,
3275 int empty, u32 min_slot)
44871b1b
CM
3276{
3277 struct extent_buffer *left = path->nodes[0];
3278 struct extent_buffer *right;
3279 struct extent_buffer *upper;
3280 int slot;
3281 int free_space;
3282 u32 left_nritems;
3283 int ret;
3284
3285 if (!path->nodes[1])
3286 return 1;
3287
3288 slot = path->slots[1];
3289 upper = path->nodes[1];
3290 if (slot >= btrfs_header_nritems(upper) - 1)
3291 return 1;
3292
3293 btrfs_assert_tree_locked(path->nodes[1]);
3294
3295 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
3296 if (right == NULL)
3297 return 1;
3298
44871b1b
CM
3299 btrfs_tree_lock(right);
3300 btrfs_set_lock_blocking(right);
3301
3302 free_space = btrfs_leaf_free_space(root, right);
3303 if (free_space < data_size)
3304 goto out_unlock;
3305
3306 /* cow and double check */
3307 ret = btrfs_cow_block(trans, root, right, upper,
3308 slot + 1, &right);
3309 if (ret)
3310 goto out_unlock;
3311
3312 free_space = btrfs_leaf_free_space(root, right);
3313 if (free_space < data_size)
3314 goto out_unlock;
3315
3316 left_nritems = btrfs_header_nritems(left);
3317 if (left_nritems == 0)
3318 goto out_unlock;
3319
99d8f83c
CM
3320 return __push_leaf_right(trans, root, path, min_data_size, empty,
3321 right, free_space, left_nritems, min_slot);
44871b1b
CM
3322out_unlock:
3323 btrfs_tree_unlock(right);
3324 free_extent_buffer(right);
3325 return 1;
3326}
3327
74123bd7
CM
3328/*
3329 * push some data in the path leaf to the left, trying to free up at
3330 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3331 *
3332 * max_slot can put a limit on how far into the leaf we'll push items. The
3333 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3334 * items
74123bd7 3335 */
44871b1b
CM
3336static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3337 struct btrfs_root *root,
3338 struct btrfs_path *path, int data_size,
3339 int empty, struct extent_buffer *left,
99d8f83c
CM
3340 int free_space, u32 right_nritems,
3341 u32 max_slot)
be0e5c09 3342{
5f39d397
CM
3343 struct btrfs_disk_key disk_key;
3344 struct extent_buffer *right = path->nodes[0];
be0e5c09 3345 int i;
be0e5c09
CM
3346 int push_space = 0;
3347 int push_items = 0;
0783fcfc 3348 struct btrfs_item *item;
7518a238 3349 u32 old_left_nritems;
34a38218 3350 u32 nr;
aa5d6bed 3351 int ret = 0;
db94535d
CM
3352 u32 this_item_size;
3353 u32 old_left_item_size;
cfed81a0
CM
3354 struct btrfs_map_token token;
3355
3356 btrfs_init_map_token(&token);
be0e5c09 3357
34a38218 3358 if (empty)
99d8f83c 3359 nr = min(right_nritems, max_slot);
34a38218 3360 else
99d8f83c 3361 nr = min(right_nritems - 1, max_slot);
34a38218
CM
3362
3363 for (i = 0; i < nr; i++) {
5f39d397 3364 item = btrfs_item_nr(right, i);
db94535d 3365
31840ae1
ZY
3366 if (!empty && push_items > 0) {
3367 if (path->slots[0] < i)
3368 break;
3369 if (path->slots[0] == i) {
3370 int space = btrfs_leaf_free_space(root, right);
3371 if (space + push_space * 2 > free_space)
3372 break;
3373 }
3374 }
3375
be0e5c09 3376 if (path->slots[0] == i)
87b29b20 3377 push_space += data_size;
db94535d
CM
3378
3379 this_item_size = btrfs_item_size(right, item);
3380 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 3381 break;
db94535d 3382
be0e5c09 3383 push_items++;
db94535d
CM
3384 push_space += this_item_size + sizeof(*item);
3385 }
3386
be0e5c09 3387 if (push_items == 0) {
925baedd
CM
3388 ret = 1;
3389 goto out;
be0e5c09 3390 }
34a38218 3391 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 3392 WARN_ON(1);
5f39d397 3393
be0e5c09 3394 /* push data from right to left */
5f39d397
CM
3395 copy_extent_buffer(left, right,
3396 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3397 btrfs_item_nr_offset(0),
3398 push_items * sizeof(struct btrfs_item));
3399
123abc88 3400 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 3401 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
3402
3403 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
3404 leaf_data_end(root, left) - push_space,
3405 btrfs_leaf_data(right) +
5f39d397 3406 btrfs_item_offset_nr(right, push_items - 1),
d6025579 3407 push_space);
5f39d397 3408 old_left_nritems = btrfs_header_nritems(left);
87b29b20 3409 BUG_ON(old_left_nritems <= 0);
eb60ceac 3410
db94535d 3411 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 3412 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 3413 u32 ioff;
db94535d 3414
5f39d397 3415 item = btrfs_item_nr(left, i);
db94535d 3416
cfed81a0
CM
3417 ioff = btrfs_token_item_offset(left, item, &token);
3418 btrfs_set_token_item_offset(left, item,
3419 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3420 &token);
be0e5c09 3421 }
5f39d397 3422 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
3423
3424 /* fixup right node */
34a38218 3425 if (push_items > right_nritems) {
d397712b
CM
3426 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3427 right_nritems);
34a38218
CM
3428 WARN_ON(1);
3429 }
3430
3431 if (push_items < right_nritems) {
3432 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3433 leaf_data_end(root, right);
3434 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3435 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3436 btrfs_leaf_data(right) +
3437 leaf_data_end(root, right), push_space);
3438
3439 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
3440 btrfs_item_nr_offset(push_items),
3441 (btrfs_header_nritems(right) - push_items) *
3442 sizeof(struct btrfs_item));
34a38218 3443 }
eef1c494
Y
3444 right_nritems -= push_items;
3445 btrfs_set_header_nritems(right, right_nritems);
123abc88 3446 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
3447 for (i = 0; i < right_nritems; i++) {
3448 item = btrfs_item_nr(right, i);
db94535d 3449
cfed81a0
CM
3450 push_space = push_space - btrfs_token_item_size(right,
3451 item, &token);
3452 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d 3453 }
eb60ceac 3454
5f39d397 3455 btrfs_mark_buffer_dirty(left);
34a38218
CM
3456 if (right_nritems)
3457 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
3458 else
3459 clean_tree_block(trans, root, right);
098f59c2 3460
5f39d397 3461 btrfs_item_key(right, &disk_key, 0);
143bede5 3462 fixup_low_keys(trans, root, path, &disk_key, 1);
be0e5c09
CM
3463
3464 /* then fixup the leaf pointer in the path */
3465 if (path->slots[0] < push_items) {
3466 path->slots[0] += old_left_nritems;
925baedd 3467 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3468 free_extent_buffer(path->nodes[0]);
3469 path->nodes[0] = left;
be0e5c09
CM
3470 path->slots[1] -= 1;
3471 } else {
925baedd 3472 btrfs_tree_unlock(left);
5f39d397 3473 free_extent_buffer(left);
be0e5c09
CM
3474 path->slots[0] -= push_items;
3475 }
eb60ceac 3476 BUG_ON(path->slots[0] < 0);
aa5d6bed 3477 return ret;
925baedd
CM
3478out:
3479 btrfs_tree_unlock(left);
3480 free_extent_buffer(left);
3481 return ret;
be0e5c09
CM
3482}
3483
44871b1b
CM
3484/*
3485 * push some data in the path leaf to the left, trying to free up at
3486 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3487 *
3488 * max_slot can put a limit on how far into the leaf we'll push items. The
3489 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3490 * items
44871b1b
CM
3491 */
3492static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3493 *root, struct btrfs_path *path, int min_data_size,
3494 int data_size, int empty, u32 max_slot)
44871b1b
CM
3495{
3496 struct extent_buffer *right = path->nodes[0];
3497 struct extent_buffer *left;
3498 int slot;
3499 int free_space;
3500 u32 right_nritems;
3501 int ret = 0;
3502
3503 slot = path->slots[1];
3504 if (slot == 0)
3505 return 1;
3506 if (!path->nodes[1])
3507 return 1;
3508
3509 right_nritems = btrfs_header_nritems(right);
3510 if (right_nritems == 0)
3511 return 1;
3512
3513 btrfs_assert_tree_locked(path->nodes[1]);
3514
3515 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
3516 if (left == NULL)
3517 return 1;
3518
44871b1b
CM
3519 btrfs_tree_lock(left);
3520 btrfs_set_lock_blocking(left);
3521
3522 free_space = btrfs_leaf_free_space(root, left);
3523 if (free_space < data_size) {
3524 ret = 1;
3525 goto out;
3526 }
3527
3528 /* cow and double check */
3529 ret = btrfs_cow_block(trans, root, left,
3530 path->nodes[1], slot - 1, &left);
3531 if (ret) {
3532 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
3533 if (ret == -ENOSPC)
3534 ret = 1;
44871b1b
CM
3535 goto out;
3536 }
3537
3538 free_space = btrfs_leaf_free_space(root, left);
3539 if (free_space < data_size) {
3540 ret = 1;
3541 goto out;
3542 }
3543
99d8f83c
CM
3544 return __push_leaf_left(trans, root, path, min_data_size,
3545 empty, left, free_space, right_nritems,
3546 max_slot);
44871b1b
CM
3547out:
3548 btrfs_tree_unlock(left);
3549 free_extent_buffer(left);
3550 return ret;
3551}
3552
3553/*
3554 * split the path's leaf in two, making sure there is at least data_size
3555 * available for the resulting leaf level of the path.
44871b1b 3556 */
143bede5
JM
3557static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3558 struct btrfs_root *root,
3559 struct btrfs_path *path,
3560 struct extent_buffer *l,
3561 struct extent_buffer *right,
3562 int slot, int mid, int nritems)
44871b1b
CM
3563{
3564 int data_copy_size;
3565 int rt_data_off;
3566 int i;
44871b1b 3567 struct btrfs_disk_key disk_key;
cfed81a0
CM
3568 struct btrfs_map_token token;
3569
3570 btrfs_init_map_token(&token);
44871b1b
CM
3571
3572 nritems = nritems - mid;
3573 btrfs_set_header_nritems(right, nritems);
3574 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3575
3576 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3577 btrfs_item_nr_offset(mid),
3578 nritems * sizeof(struct btrfs_item));
3579
3580 copy_extent_buffer(right, l,
3581 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3582 data_copy_size, btrfs_leaf_data(l) +
3583 leaf_data_end(root, l), data_copy_size);
3584
3585 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3586 btrfs_item_end_nr(l, mid);
3587
3588 for (i = 0; i < nritems; i++) {
3589 struct btrfs_item *item = btrfs_item_nr(right, i);
3590 u32 ioff;
3591
cfed81a0
CM
3592 ioff = btrfs_token_item_offset(right, item, &token);
3593 btrfs_set_token_item_offset(right, item,
3594 ioff + rt_data_off, &token);
44871b1b
CM
3595 }
3596
44871b1b 3597 btrfs_set_header_nritems(l, mid);
44871b1b 3598 btrfs_item_key(right, &disk_key, 0);
143bede5 3599 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3600 path->slots[1] + 1, 1, 0);
44871b1b
CM
3601
3602 btrfs_mark_buffer_dirty(right);
3603 btrfs_mark_buffer_dirty(l);
3604 BUG_ON(path->slots[0] != slot);
3605
44871b1b
CM
3606 if (mid <= slot) {
3607 btrfs_tree_unlock(path->nodes[0]);
3608 free_extent_buffer(path->nodes[0]);
3609 path->nodes[0] = right;
3610 path->slots[0] -= mid;
3611 path->slots[1] += 1;
3612 } else {
3613 btrfs_tree_unlock(right);
3614 free_extent_buffer(right);
3615 }
3616
3617 BUG_ON(path->slots[0] < 0);
44871b1b
CM
3618}
3619
99d8f83c
CM
3620/*
3621 * double splits happen when we need to insert a big item in the middle
3622 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3623 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3624 * A B C
3625 *
3626 * We avoid this by trying to push the items on either side of our target
3627 * into the adjacent leaves. If all goes well we can avoid the double split
3628 * completely.
3629 */
3630static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3631 struct btrfs_root *root,
3632 struct btrfs_path *path,
3633 int data_size)
3634{
3635 int ret;
3636 int progress = 0;
3637 int slot;
3638 u32 nritems;
3639
3640 slot = path->slots[0];
3641
3642 /*
3643 * try to push all the items after our slot into the
3644 * right leaf
3645 */
3646 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3647 if (ret < 0)
3648 return ret;
3649
3650 if (ret == 0)
3651 progress++;
3652
3653 nritems = btrfs_header_nritems(path->nodes[0]);
3654 /*
3655 * our goal is to get our slot at the start or end of a leaf. If
3656 * we've done so we're done
3657 */
3658 if (path->slots[0] == 0 || path->slots[0] == nritems)
3659 return 0;
3660
3661 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3662 return 0;
3663
3664 /* try to push all the items before our slot into the next leaf */
3665 slot = path->slots[0];
3666 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3667 if (ret < 0)
3668 return ret;
3669
3670 if (ret == 0)
3671 progress++;
3672
3673 if (progress)
3674 return 0;
3675 return 1;
3676}
3677
74123bd7
CM
3678/*
3679 * split the path's leaf in two, making sure there is at least data_size
3680 * available for the resulting leaf level of the path.
aa5d6bed
CM
3681 *
3682 * returns 0 if all went well and < 0 on failure.
74123bd7 3683 */
e02119d5
CM
3684static noinline int split_leaf(struct btrfs_trans_handle *trans,
3685 struct btrfs_root *root,
3686 struct btrfs_key *ins_key,
3687 struct btrfs_path *path, int data_size,
3688 int extend)
be0e5c09 3689{
5d4f98a2 3690 struct btrfs_disk_key disk_key;
5f39d397 3691 struct extent_buffer *l;
7518a238 3692 u32 nritems;
eb60ceac
CM
3693 int mid;
3694 int slot;
5f39d397 3695 struct extent_buffer *right;
d4dbff95 3696 int ret = 0;
aa5d6bed 3697 int wret;
5d4f98a2 3698 int split;
cc0c5538 3699 int num_doubles = 0;
99d8f83c 3700 int tried_avoid_double = 0;
aa5d6bed 3701
a5719521
YZ
3702 l = path->nodes[0];
3703 slot = path->slots[0];
3704 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3705 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3706 return -EOVERFLOW;
3707
40689478 3708 /* first try to make some room by pushing left and right */
99d8f83c
CM
3709 if (data_size) {
3710 wret = push_leaf_right(trans, root, path, data_size,
3711 data_size, 0, 0);
d397712b 3712 if (wret < 0)
eaee50e8 3713 return wret;
3685f791 3714 if (wret) {
99d8f83c
CM
3715 wret = push_leaf_left(trans, root, path, data_size,
3716 data_size, 0, (u32)-1);
3685f791
CM
3717 if (wret < 0)
3718 return wret;
3719 }
3720 l = path->nodes[0];
aa5d6bed 3721
3685f791 3722 /* did the pushes work? */
87b29b20 3723 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 3724 return 0;
3326d1b0 3725 }
aa5d6bed 3726
5c680ed6 3727 if (!path->nodes[1]) {
e089f05c 3728 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
3729 if (ret)
3730 return ret;
3731 }
cc0c5538 3732again:
5d4f98a2 3733 split = 1;
cc0c5538 3734 l = path->nodes[0];
eb60ceac 3735 slot = path->slots[0];
5f39d397 3736 nritems = btrfs_header_nritems(l);
d397712b 3737 mid = (nritems + 1) / 2;
54aa1f4d 3738
5d4f98a2
YZ
3739 if (mid <= slot) {
3740 if (nritems == 1 ||
3741 leaf_space_used(l, mid, nritems - mid) + data_size >
3742 BTRFS_LEAF_DATA_SIZE(root)) {
3743 if (slot >= nritems) {
3744 split = 0;
3745 } else {
3746 mid = slot;
3747 if (mid != nritems &&
3748 leaf_space_used(l, mid, nritems - mid) +
3749 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3750 if (data_size && !tried_avoid_double)
3751 goto push_for_double;
5d4f98a2
YZ
3752 split = 2;
3753 }
3754 }
3755 }
3756 } else {
3757 if (leaf_space_used(l, 0, mid) + data_size >
3758 BTRFS_LEAF_DATA_SIZE(root)) {
3759 if (!extend && data_size && slot == 0) {
3760 split = 0;
3761 } else if ((extend || !data_size) && slot == 0) {
3762 mid = 1;
3763 } else {
3764 mid = slot;
3765 if (mid != nritems &&
3766 leaf_space_used(l, mid, nritems - mid) +
3767 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3768 if (data_size && !tried_avoid_double)
3769 goto push_for_double;
5d4f98a2
YZ
3770 split = 2 ;
3771 }
3772 }
3773 }
3774 }
3775
3776 if (split == 0)
3777 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3778 else
3779 btrfs_item_key(l, &disk_key, mid);
3780
3781 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 3782 root->root_key.objectid,
5581a51a 3783 &disk_key, 0, l->start, 0);
f0486c68 3784 if (IS_ERR(right))
5f39d397 3785 return PTR_ERR(right);
f0486c68
YZ
3786
3787 root_add_used(root, root->leafsize);
5f39d397
CM
3788
3789 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 3790 btrfs_set_header_bytenr(right, right->start);
5f39d397 3791 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 3792 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3793 btrfs_set_header_owner(right, root->root_key.objectid);
3794 btrfs_set_header_level(right, 0);
3795 write_extent_buffer(right, root->fs_info->fsid,
3796 (unsigned long)btrfs_header_fsid(right),
3797 BTRFS_FSID_SIZE);
e17cade2
CM
3798
3799 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3800 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3801 BTRFS_UUID_SIZE);
44871b1b 3802
5d4f98a2
YZ
3803 if (split == 0) {
3804 if (mid <= slot) {
3805 btrfs_set_header_nritems(right, 0);
143bede5 3806 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3807 path->slots[1] + 1, 1, 0);
5d4f98a2
YZ
3808 btrfs_tree_unlock(path->nodes[0]);
3809 free_extent_buffer(path->nodes[0]);
3810 path->nodes[0] = right;
3811 path->slots[0] = 0;
3812 path->slots[1] += 1;
3813 } else {
3814 btrfs_set_header_nritems(right, 0);
143bede5 3815 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3816 path->slots[1], 1, 0);
5d4f98a2
YZ
3817 btrfs_tree_unlock(path->nodes[0]);
3818 free_extent_buffer(path->nodes[0]);
3819 path->nodes[0] = right;
3820 path->slots[0] = 0;
143bede5
JM
3821 if (path->slots[1] == 0)
3822 fixup_low_keys(trans, root, path,
3823 &disk_key, 1);
d4dbff95 3824 }
5d4f98a2
YZ
3825 btrfs_mark_buffer_dirty(right);
3826 return ret;
d4dbff95 3827 }
74123bd7 3828
143bede5 3829 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1 3830
5d4f98a2 3831 if (split == 2) {
cc0c5538
CM
3832 BUG_ON(num_doubles != 0);
3833 num_doubles++;
3834 goto again;
a429e513 3835 }
44871b1b 3836
143bede5 3837 return 0;
99d8f83c
CM
3838
3839push_for_double:
3840 push_for_double_split(trans, root, path, data_size);
3841 tried_avoid_double = 1;
3842 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3843 return 0;
3844 goto again;
be0e5c09
CM
3845}
3846
ad48fd75
YZ
3847static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3848 struct btrfs_root *root,
3849 struct btrfs_path *path, int ins_len)
459931ec 3850{
ad48fd75 3851 struct btrfs_key key;
459931ec 3852 struct extent_buffer *leaf;
ad48fd75
YZ
3853 struct btrfs_file_extent_item *fi;
3854 u64 extent_len = 0;
3855 u32 item_size;
3856 int ret;
459931ec
CM
3857
3858 leaf = path->nodes[0];
ad48fd75
YZ
3859 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3860
3861 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3862 key.type != BTRFS_EXTENT_CSUM_KEY);
3863
3864 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3865 return 0;
459931ec
CM
3866
3867 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
3868 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3869 fi = btrfs_item_ptr(leaf, path->slots[0],
3870 struct btrfs_file_extent_item);
3871 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3872 }
b3b4aa74 3873 btrfs_release_path(path);
459931ec 3874
459931ec 3875 path->keep_locks = 1;
ad48fd75
YZ
3876 path->search_for_split = 1;
3877 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 3878 path->search_for_split = 0;
ad48fd75
YZ
3879 if (ret < 0)
3880 goto err;
459931ec 3881
ad48fd75
YZ
3882 ret = -EAGAIN;
3883 leaf = path->nodes[0];
459931ec 3884 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
3885 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3886 goto err;
3887
109f6aef
CM
3888 /* the leaf has changed, it now has room. return now */
3889 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3890 goto err;
3891
ad48fd75
YZ
3892 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3893 fi = btrfs_item_ptr(leaf, path->slots[0],
3894 struct btrfs_file_extent_item);
3895 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3896 goto err;
459931ec
CM
3897 }
3898
b9473439 3899 btrfs_set_path_blocking(path);
ad48fd75 3900 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
3901 if (ret)
3902 goto err;
459931ec 3903
ad48fd75 3904 path->keep_locks = 0;
b9473439 3905 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
3906 return 0;
3907err:
3908 path->keep_locks = 0;
3909 return ret;
3910}
3911
3912static noinline int split_item(struct btrfs_trans_handle *trans,
3913 struct btrfs_root *root,
3914 struct btrfs_path *path,
3915 struct btrfs_key *new_key,
3916 unsigned long split_offset)
3917{
3918 struct extent_buffer *leaf;
3919 struct btrfs_item *item;
3920 struct btrfs_item *new_item;
3921 int slot;
3922 char *buf;
3923 u32 nritems;
3924 u32 item_size;
3925 u32 orig_offset;
3926 struct btrfs_disk_key disk_key;
3927
b9473439
CM
3928 leaf = path->nodes[0];
3929 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3930
b4ce94de
CM
3931 btrfs_set_path_blocking(path);
3932
459931ec
CM
3933 item = btrfs_item_nr(leaf, path->slots[0]);
3934 orig_offset = btrfs_item_offset(leaf, item);
3935 item_size = btrfs_item_size(leaf, item);
3936
459931ec 3937 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
3938 if (!buf)
3939 return -ENOMEM;
3940
459931ec
CM
3941 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3942 path->slots[0]), item_size);
459931ec 3943
ad48fd75 3944 slot = path->slots[0] + 1;
459931ec 3945 nritems = btrfs_header_nritems(leaf);
459931ec
CM
3946 if (slot != nritems) {
3947 /* shift the items */
3948 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
3949 btrfs_item_nr_offset(slot),
3950 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
3951 }
3952
3953 btrfs_cpu_key_to_disk(&disk_key, new_key);
3954 btrfs_set_item_key(leaf, &disk_key, slot);
3955
3956 new_item = btrfs_item_nr(leaf, slot);
3957
3958 btrfs_set_item_offset(leaf, new_item, orig_offset);
3959 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3960
3961 btrfs_set_item_offset(leaf, item,
3962 orig_offset + item_size - split_offset);
3963 btrfs_set_item_size(leaf, item, split_offset);
3964
3965 btrfs_set_header_nritems(leaf, nritems + 1);
3966
3967 /* write the data for the start of the original item */
3968 write_extent_buffer(leaf, buf,
3969 btrfs_item_ptr_offset(leaf, path->slots[0]),
3970 split_offset);
3971
3972 /* write the data for the new item */
3973 write_extent_buffer(leaf, buf + split_offset,
3974 btrfs_item_ptr_offset(leaf, slot),
3975 item_size - split_offset);
3976 btrfs_mark_buffer_dirty(leaf);
3977
ad48fd75 3978 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 3979 kfree(buf);
ad48fd75
YZ
3980 return 0;
3981}
3982
3983/*
3984 * This function splits a single item into two items,
3985 * giving 'new_key' to the new item and splitting the
3986 * old one at split_offset (from the start of the item).
3987 *
3988 * The path may be released by this operation. After
3989 * the split, the path is pointing to the old item. The
3990 * new item is going to be in the same node as the old one.
3991 *
3992 * Note, the item being split must be smaller enough to live alone on
3993 * a tree block with room for one extra struct btrfs_item
3994 *
3995 * This allows us to split the item in place, keeping a lock on the
3996 * leaf the entire time.
3997 */
3998int btrfs_split_item(struct btrfs_trans_handle *trans,
3999 struct btrfs_root *root,
4000 struct btrfs_path *path,
4001 struct btrfs_key *new_key,
4002 unsigned long split_offset)
4003{
4004 int ret;
4005 ret = setup_leaf_for_split(trans, root, path,
4006 sizeof(struct btrfs_item));
4007 if (ret)
4008 return ret;
4009
4010 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
4011 return ret;
4012}
4013
ad48fd75
YZ
4014/*
4015 * This function duplicate a item, giving 'new_key' to the new item.
4016 * It guarantees both items live in the same tree leaf and the new item
4017 * is contiguous with the original item.
4018 *
4019 * This allows us to split file extent in place, keeping a lock on the
4020 * leaf the entire time.
4021 */
4022int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4023 struct btrfs_root *root,
4024 struct btrfs_path *path,
4025 struct btrfs_key *new_key)
4026{
4027 struct extent_buffer *leaf;
4028 int ret;
4029 u32 item_size;
4030
4031 leaf = path->nodes[0];
4032 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4033 ret = setup_leaf_for_split(trans, root, path,
4034 item_size + sizeof(struct btrfs_item));
4035 if (ret)
4036 return ret;
4037
4038 path->slots[0]++;
143bede5
JM
4039 setup_items_for_insert(trans, root, path, new_key, &item_size,
4040 item_size, item_size +
4041 sizeof(struct btrfs_item), 1);
ad48fd75
YZ
4042 leaf = path->nodes[0];
4043 memcpy_extent_buffer(leaf,
4044 btrfs_item_ptr_offset(leaf, path->slots[0]),
4045 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4046 item_size);
4047 return 0;
4048}
4049
d352ac68
CM
4050/*
4051 * make the item pointed to by the path smaller. new_size indicates
4052 * how small to make it, and from_end tells us if we just chop bytes
4053 * off the end of the item or if we shift the item to chop bytes off
4054 * the front.
4055 */
143bede5
JM
4056void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4057 struct btrfs_root *root,
4058 struct btrfs_path *path,
4059 u32 new_size, int from_end)
b18c6685 4060{
b18c6685 4061 int slot;
5f39d397
CM
4062 struct extent_buffer *leaf;
4063 struct btrfs_item *item;
b18c6685
CM
4064 u32 nritems;
4065 unsigned int data_end;
4066 unsigned int old_data_start;
4067 unsigned int old_size;
4068 unsigned int size_diff;
4069 int i;
cfed81a0
CM
4070 struct btrfs_map_token token;
4071
4072 btrfs_init_map_token(&token);
b18c6685 4073
5f39d397 4074 leaf = path->nodes[0];
179e29e4
CM
4075 slot = path->slots[0];
4076
4077 old_size = btrfs_item_size_nr(leaf, slot);
4078 if (old_size == new_size)
143bede5 4079 return;
b18c6685 4080
5f39d397 4081 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
4082 data_end = leaf_data_end(root, leaf);
4083
5f39d397 4084 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 4085
b18c6685
CM
4086 size_diff = old_size - new_size;
4087
4088 BUG_ON(slot < 0);
4089 BUG_ON(slot >= nritems);
4090
4091 /*
4092 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4093 */
4094 /* first correct the data pointers */
4095 for (i = slot; i < nritems; i++) {
5f39d397
CM
4096 u32 ioff;
4097 item = btrfs_item_nr(leaf, i);
db94535d 4098
cfed81a0
CM
4099 ioff = btrfs_token_item_offset(leaf, item, &token);
4100 btrfs_set_token_item_offset(leaf, item,
4101 ioff + size_diff, &token);
b18c6685 4102 }
db94535d 4103
b18c6685 4104 /* shift the data */
179e29e4
CM
4105 if (from_end) {
4106 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4107 data_end + size_diff, btrfs_leaf_data(leaf) +
4108 data_end, old_data_start + new_size - data_end);
4109 } else {
4110 struct btrfs_disk_key disk_key;
4111 u64 offset;
4112
4113 btrfs_item_key(leaf, &disk_key, slot);
4114
4115 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4116 unsigned long ptr;
4117 struct btrfs_file_extent_item *fi;
4118
4119 fi = btrfs_item_ptr(leaf, slot,
4120 struct btrfs_file_extent_item);
4121 fi = (struct btrfs_file_extent_item *)(
4122 (unsigned long)fi - size_diff);
4123
4124 if (btrfs_file_extent_type(leaf, fi) ==
4125 BTRFS_FILE_EXTENT_INLINE) {
4126 ptr = btrfs_item_ptr_offset(leaf, slot);
4127 memmove_extent_buffer(leaf, ptr,
d397712b
CM
4128 (unsigned long)fi,
4129 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
4130 disk_bytenr));
4131 }
4132 }
4133
4134 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4135 data_end + size_diff, btrfs_leaf_data(leaf) +
4136 data_end, old_data_start - data_end);
4137
4138 offset = btrfs_disk_key_offset(&disk_key);
4139 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4140 btrfs_set_item_key(leaf, &disk_key, slot);
4141 if (slot == 0)
4142 fixup_low_keys(trans, root, path, &disk_key, 1);
4143 }
5f39d397
CM
4144
4145 item = btrfs_item_nr(leaf, slot);
4146 btrfs_set_item_size(leaf, item, new_size);
4147 btrfs_mark_buffer_dirty(leaf);
b18c6685 4148
5f39d397
CM
4149 if (btrfs_leaf_free_space(root, leaf) < 0) {
4150 btrfs_print_leaf(root, leaf);
b18c6685 4151 BUG();
5f39d397 4152 }
b18c6685
CM
4153}
4154
d352ac68
CM
4155/*
4156 * make the item pointed to by the path bigger, data_size is the new size.
4157 */
143bede5
JM
4158void btrfs_extend_item(struct btrfs_trans_handle *trans,
4159 struct btrfs_root *root, struct btrfs_path *path,
4160 u32 data_size)
6567e837 4161{
6567e837 4162 int slot;
5f39d397
CM
4163 struct extent_buffer *leaf;
4164 struct btrfs_item *item;
6567e837
CM
4165 u32 nritems;
4166 unsigned int data_end;
4167 unsigned int old_data;
4168 unsigned int old_size;
4169 int i;
cfed81a0
CM
4170 struct btrfs_map_token token;
4171
4172 btrfs_init_map_token(&token);
6567e837 4173
5f39d397 4174 leaf = path->nodes[0];
6567e837 4175
5f39d397 4176 nritems = btrfs_header_nritems(leaf);
6567e837
CM
4177 data_end = leaf_data_end(root, leaf);
4178
5f39d397
CM
4179 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4180 btrfs_print_leaf(root, leaf);
6567e837 4181 BUG();
5f39d397 4182 }
6567e837 4183 slot = path->slots[0];
5f39d397 4184 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
4185
4186 BUG_ON(slot < 0);
3326d1b0
CM
4187 if (slot >= nritems) {
4188 btrfs_print_leaf(root, leaf);
d397712b
CM
4189 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4190 slot, nritems);
3326d1b0
CM
4191 BUG_ON(1);
4192 }
6567e837
CM
4193
4194 /*
4195 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4196 */
4197 /* first correct the data pointers */
4198 for (i = slot; i < nritems; i++) {
5f39d397
CM
4199 u32 ioff;
4200 item = btrfs_item_nr(leaf, i);
db94535d 4201
cfed81a0
CM
4202 ioff = btrfs_token_item_offset(leaf, item, &token);
4203 btrfs_set_token_item_offset(leaf, item,
4204 ioff - data_size, &token);
6567e837 4205 }
5f39d397 4206
6567e837 4207 /* shift the data */
5f39d397 4208 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
4209 data_end - data_size, btrfs_leaf_data(leaf) +
4210 data_end, old_data - data_end);
5f39d397 4211
6567e837 4212 data_end = old_data;
5f39d397
CM
4213 old_size = btrfs_item_size_nr(leaf, slot);
4214 item = btrfs_item_nr(leaf, slot);
4215 btrfs_set_item_size(leaf, item, old_size + data_size);
4216 btrfs_mark_buffer_dirty(leaf);
6567e837 4217
5f39d397
CM
4218 if (btrfs_leaf_free_space(root, leaf) < 0) {
4219 btrfs_print_leaf(root, leaf);
6567e837 4220 BUG();
5f39d397 4221 }
6567e837
CM
4222}
4223
f3465ca4
JB
4224/*
4225 * Given a key and some data, insert items into the tree.
4226 * This does all the path init required, making room in the tree if needed.
4227 * Returns the number of keys that were inserted.
4228 */
4229int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
4230 struct btrfs_root *root,
4231 struct btrfs_path *path,
4232 struct btrfs_key *cpu_key, u32 *data_size,
4233 int nr)
4234{
4235 struct extent_buffer *leaf;
4236 struct btrfs_item *item;
4237 int ret = 0;
4238 int slot;
f3465ca4
JB
4239 int i;
4240 u32 nritems;
4241 u32 total_data = 0;
4242 u32 total_size = 0;
4243 unsigned int data_end;
4244 struct btrfs_disk_key disk_key;
4245 struct btrfs_key found_key;
cfed81a0
CM
4246 struct btrfs_map_token token;
4247
4248 btrfs_init_map_token(&token);
f3465ca4 4249
87b29b20
YZ
4250 for (i = 0; i < nr; i++) {
4251 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
4252 BTRFS_LEAF_DATA_SIZE(root)) {
4253 break;
4254 nr = i;
4255 }
f3465ca4 4256 total_data += data_size[i];
87b29b20
YZ
4257 total_size += data_size[i] + sizeof(struct btrfs_item);
4258 }
4259 BUG_ON(nr == 0);
f3465ca4 4260
f3465ca4
JB
4261 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4262 if (ret == 0)
4263 return -EEXIST;
4264 if (ret < 0)
4265 goto out;
4266
f3465ca4
JB
4267 leaf = path->nodes[0];
4268
4269 nritems = btrfs_header_nritems(leaf);
4270 data_end = leaf_data_end(root, leaf);
4271
4272 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4273 for (i = nr; i >= 0; i--) {
4274 total_data -= data_size[i];
4275 total_size -= data_size[i] + sizeof(struct btrfs_item);
4276 if (total_size < btrfs_leaf_free_space(root, leaf))
4277 break;
4278 }
4279 nr = i;
4280 }
4281
4282 slot = path->slots[0];
4283 BUG_ON(slot < 0);
4284
4285 if (slot != nritems) {
4286 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4287
4288 item = btrfs_item_nr(leaf, slot);
4289 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4290
4291 /* figure out how many keys we can insert in here */
4292 total_data = data_size[0];
4293 for (i = 1; i < nr; i++) {
5d4f98a2 4294 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
4295 break;
4296 total_data += data_size[i];
4297 }
4298 nr = i;
4299
4300 if (old_data < data_end) {
4301 btrfs_print_leaf(root, leaf);
d397712b 4302 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
4303 slot, old_data, data_end);
4304 BUG_ON(1);
4305 }
4306 /*
4307 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4308 */
4309 /* first correct the data pointers */
f3465ca4
JB
4310 for (i = slot; i < nritems; i++) {
4311 u32 ioff;
4312
4313 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4314 ioff = btrfs_token_item_offset(leaf, item, &token);
4315 btrfs_set_token_item_offset(leaf, item,
4316 ioff - total_data, &token);
f3465ca4 4317 }
f3465ca4
JB
4318 /* shift the items */
4319 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4320 btrfs_item_nr_offset(slot),
4321 (nritems - slot) * sizeof(struct btrfs_item));
4322
4323 /* shift the data */
4324 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4325 data_end - total_data, btrfs_leaf_data(leaf) +
4326 data_end, old_data - data_end);
4327 data_end = old_data;
4328 } else {
4329 /*
4330 * this sucks but it has to be done, if we are inserting at
4331 * the end of the leaf only insert 1 of the items, since we
4332 * have no way of knowing whats on the next leaf and we'd have
4333 * to drop our current locks to figure it out
4334 */
4335 nr = 1;
4336 }
4337
4338 /* setup the item for the new data */
4339 for (i = 0; i < nr; i++) {
4340 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4341 btrfs_set_item_key(leaf, &disk_key, slot + i);
4342 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4343 btrfs_set_token_item_offset(leaf, item,
4344 data_end - data_size[i], &token);
f3465ca4 4345 data_end -= data_size[i];
cfed81a0 4346 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
f3465ca4
JB
4347 }
4348 btrfs_set_header_nritems(leaf, nritems + nr);
4349 btrfs_mark_buffer_dirty(leaf);
4350
4351 ret = 0;
4352 if (slot == 0) {
4353 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4354 fixup_low_keys(trans, root, path, &disk_key, 1);
f3465ca4
JB
4355 }
4356
4357 if (btrfs_leaf_free_space(root, leaf) < 0) {
4358 btrfs_print_leaf(root, leaf);
4359 BUG();
4360 }
4361out:
4362 if (!ret)
4363 ret = nr;
4364 return ret;
4365}
4366
74123bd7 4367/*
44871b1b
CM
4368 * this is a helper for btrfs_insert_empty_items, the main goal here is
4369 * to save stack depth by doing the bulk of the work in a function
4370 * that doesn't call btrfs_search_slot
74123bd7 4371 */
143bede5
JM
4372void setup_items_for_insert(struct btrfs_trans_handle *trans,
4373 struct btrfs_root *root, struct btrfs_path *path,
4374 struct btrfs_key *cpu_key, u32 *data_size,
4375 u32 total_data, u32 total_size, int nr)
be0e5c09 4376{
5f39d397 4377 struct btrfs_item *item;
9c58309d 4378 int i;
7518a238 4379 u32 nritems;
be0e5c09 4380 unsigned int data_end;
e2fa7227 4381 struct btrfs_disk_key disk_key;
44871b1b
CM
4382 struct extent_buffer *leaf;
4383 int slot;
cfed81a0
CM
4384 struct btrfs_map_token token;
4385
4386 btrfs_init_map_token(&token);
e2fa7227 4387
5f39d397 4388 leaf = path->nodes[0];
44871b1b 4389 slot = path->slots[0];
74123bd7 4390
5f39d397 4391 nritems = btrfs_header_nritems(leaf);
123abc88 4392 data_end = leaf_data_end(root, leaf);
eb60ceac 4393
f25956cc 4394 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 4395 btrfs_print_leaf(root, leaf);
d397712b 4396 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 4397 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 4398 BUG();
d4dbff95 4399 }
5f39d397 4400
be0e5c09 4401 if (slot != nritems) {
5f39d397 4402 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 4403
5f39d397
CM
4404 if (old_data < data_end) {
4405 btrfs_print_leaf(root, leaf);
d397712b 4406 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
4407 slot, old_data, data_end);
4408 BUG_ON(1);
4409 }
be0e5c09
CM
4410 /*
4411 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4412 */
4413 /* first correct the data pointers */
0783fcfc 4414 for (i = slot; i < nritems; i++) {
5f39d397 4415 u32 ioff;
db94535d 4416
5f39d397 4417 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4418 ioff = btrfs_token_item_offset(leaf, item, &token);
4419 btrfs_set_token_item_offset(leaf, item,
4420 ioff - total_data, &token);
0783fcfc 4421 }
be0e5c09 4422 /* shift the items */
9c58309d 4423 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 4424 btrfs_item_nr_offset(slot),
d6025579 4425 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
4426
4427 /* shift the data */
5f39d397 4428 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 4429 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 4430 data_end, old_data - data_end);
be0e5c09
CM
4431 data_end = old_data;
4432 }
5f39d397 4433
62e2749e 4434 /* setup the item for the new data */
9c58309d
CM
4435 for (i = 0; i < nr; i++) {
4436 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4437 btrfs_set_item_key(leaf, &disk_key, slot + i);
4438 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4439 btrfs_set_token_item_offset(leaf, item,
4440 data_end - data_size[i], &token);
9c58309d 4441 data_end -= data_size[i];
cfed81a0 4442 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
9c58309d 4443 }
44871b1b 4444
9c58309d 4445 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed 4446
5a01a2e3
CM
4447 if (slot == 0) {
4448 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4449 fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 4450 }
b9473439
CM
4451 btrfs_unlock_up_safe(path, 1);
4452 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 4453
5f39d397
CM
4454 if (btrfs_leaf_free_space(root, leaf) < 0) {
4455 btrfs_print_leaf(root, leaf);
be0e5c09 4456 BUG();
5f39d397 4457 }
44871b1b
CM
4458}
4459
4460/*
4461 * Given a key and some data, insert items into the tree.
4462 * This does all the path init required, making room in the tree if needed.
4463 */
4464int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4465 struct btrfs_root *root,
4466 struct btrfs_path *path,
4467 struct btrfs_key *cpu_key, u32 *data_size,
4468 int nr)
4469{
44871b1b
CM
4470 int ret = 0;
4471 int slot;
4472 int i;
4473 u32 total_size = 0;
4474 u32 total_data = 0;
4475
4476 for (i = 0; i < nr; i++)
4477 total_data += data_size[i];
4478
4479 total_size = total_data + (nr * sizeof(struct btrfs_item));
4480 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4481 if (ret == 0)
4482 return -EEXIST;
4483 if (ret < 0)
143bede5 4484 return ret;
44871b1b 4485
44871b1b
CM
4486 slot = path->slots[0];
4487 BUG_ON(slot < 0);
4488
143bede5 4489 setup_items_for_insert(trans, root, path, cpu_key, data_size,
44871b1b 4490 total_data, total_size, nr);
143bede5 4491 return 0;
62e2749e
CM
4492}
4493
4494/*
4495 * Given a key and some data, insert an item into the tree.
4496 * This does all the path init required, making room in the tree if needed.
4497 */
e089f05c
CM
4498int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4499 *root, struct btrfs_key *cpu_key, void *data, u32
4500 data_size)
62e2749e
CM
4501{
4502 int ret = 0;
2c90e5d6 4503 struct btrfs_path *path;
5f39d397
CM
4504 struct extent_buffer *leaf;
4505 unsigned long ptr;
62e2749e 4506
2c90e5d6 4507 path = btrfs_alloc_path();
db5b493a
TI
4508 if (!path)
4509 return -ENOMEM;
2c90e5d6 4510 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 4511 if (!ret) {
5f39d397
CM
4512 leaf = path->nodes[0];
4513 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4514 write_extent_buffer(leaf, data, ptr, data_size);
4515 btrfs_mark_buffer_dirty(leaf);
62e2749e 4516 }
2c90e5d6 4517 btrfs_free_path(path);
aa5d6bed 4518 return ret;
be0e5c09
CM
4519}
4520
74123bd7 4521/*
5de08d7d 4522 * delete the pointer from a given node.
74123bd7 4523 *
d352ac68
CM
4524 * the tree should have been previously balanced so the deletion does not
4525 * empty a node.
74123bd7 4526 */
143bede5 4527static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
4528 struct btrfs_path *path, int level, int slot,
4529 int tree_mod_log)
be0e5c09 4530{
5f39d397 4531 struct extent_buffer *parent = path->nodes[level];
7518a238 4532 u32 nritems;
f3ea38da 4533 int ret;
be0e5c09 4534
5f39d397 4535 nritems = btrfs_header_nritems(parent);
d397712b 4536 if (slot != nritems - 1) {
f3ea38da
JS
4537 if (tree_mod_log && level)
4538 tree_mod_log_eb_move(root->fs_info, parent, slot,
4539 slot + 1, nritems - slot - 1);
5f39d397
CM
4540 memmove_extent_buffer(parent,
4541 btrfs_node_key_ptr_offset(slot),
4542 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
4543 sizeof(struct btrfs_key_ptr) *
4544 (nritems - slot - 1));
bb803951 4545 }
f3ea38da
JS
4546
4547 if (tree_mod_log && level) {
4548 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4549 MOD_LOG_KEY_REMOVE);
4550 BUG_ON(ret < 0);
4551 }
4552
7518a238 4553 nritems--;
5f39d397 4554 btrfs_set_header_nritems(parent, nritems);
7518a238 4555 if (nritems == 0 && parent == root->node) {
5f39d397 4556 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 4557 /* just turn the root into a leaf and break */
5f39d397 4558 btrfs_set_header_level(root->node, 0);
bb803951 4559 } else if (slot == 0) {
5f39d397
CM
4560 struct btrfs_disk_key disk_key;
4561
4562 btrfs_node_key(parent, &disk_key, 0);
143bede5 4563 fixup_low_keys(trans, root, path, &disk_key, level + 1);
be0e5c09 4564 }
d6025579 4565 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
4566}
4567
323ac95b
CM
4568/*
4569 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 4570 * path->nodes[1].
323ac95b
CM
4571 *
4572 * This deletes the pointer in path->nodes[1] and frees the leaf
4573 * block extent. zero is returned if it all worked out, < 0 otherwise.
4574 *
4575 * The path must have already been setup for deleting the leaf, including
4576 * all the proper balancing. path->nodes[1] must be locked.
4577 */
143bede5
JM
4578static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4579 struct btrfs_root *root,
4580 struct btrfs_path *path,
4581 struct extent_buffer *leaf)
323ac95b 4582{
5d4f98a2 4583 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
f3ea38da 4584 del_ptr(trans, root, path, 1, path->slots[1], 1);
323ac95b 4585
4d081c41
CM
4586 /*
4587 * btrfs_free_extent is expensive, we want to make sure we
4588 * aren't holding any locks when we call it
4589 */
4590 btrfs_unlock_up_safe(path, 0);
4591
f0486c68
YZ
4592 root_sub_used(root, leaf->len);
4593
3083ee2e 4594 extent_buffer_get(leaf);
5581a51a 4595 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3083ee2e 4596 free_extent_buffer_stale(leaf);
323ac95b 4597}
74123bd7
CM
4598/*
4599 * delete the item at the leaf level in path. If that empties
4600 * the leaf, remove it from the tree
4601 */
85e21bac
CM
4602int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4603 struct btrfs_path *path, int slot, int nr)
be0e5c09 4604{
5f39d397
CM
4605 struct extent_buffer *leaf;
4606 struct btrfs_item *item;
85e21bac
CM
4607 int last_off;
4608 int dsize = 0;
aa5d6bed
CM
4609 int ret = 0;
4610 int wret;
85e21bac 4611 int i;
7518a238 4612 u32 nritems;
cfed81a0
CM
4613 struct btrfs_map_token token;
4614
4615 btrfs_init_map_token(&token);
be0e5c09 4616
5f39d397 4617 leaf = path->nodes[0];
85e21bac
CM
4618 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4619
4620 for (i = 0; i < nr; i++)
4621 dsize += btrfs_item_size_nr(leaf, slot + i);
4622
5f39d397 4623 nritems = btrfs_header_nritems(leaf);
be0e5c09 4624
85e21bac 4625 if (slot + nr != nritems) {
123abc88 4626 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
4627
4628 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
4629 data_end + dsize,
4630 btrfs_leaf_data(leaf) + data_end,
85e21bac 4631 last_off - data_end);
5f39d397 4632
85e21bac 4633 for (i = slot + nr; i < nritems; i++) {
5f39d397 4634 u32 ioff;
db94535d 4635
5f39d397 4636 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4637 ioff = btrfs_token_item_offset(leaf, item, &token);
4638 btrfs_set_token_item_offset(leaf, item,
4639 ioff + dsize, &token);
0783fcfc 4640 }
db94535d 4641
5f39d397 4642 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 4643 btrfs_item_nr_offset(slot + nr),
d6025579 4644 sizeof(struct btrfs_item) *
85e21bac 4645 (nritems - slot - nr));
be0e5c09 4646 }
85e21bac
CM
4647 btrfs_set_header_nritems(leaf, nritems - nr);
4648 nritems -= nr;
5f39d397 4649
74123bd7 4650 /* delete the leaf if we've emptied it */
7518a238 4651 if (nritems == 0) {
5f39d397
CM
4652 if (leaf == root->node) {
4653 btrfs_set_header_level(leaf, 0);
9a8dd150 4654 } else {
f0486c68
YZ
4655 btrfs_set_path_blocking(path);
4656 clean_tree_block(trans, root, leaf);
143bede5 4657 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 4658 }
be0e5c09 4659 } else {
7518a238 4660 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 4661 if (slot == 0) {
5f39d397
CM
4662 struct btrfs_disk_key disk_key;
4663
4664 btrfs_item_key(leaf, &disk_key, 0);
143bede5 4665 fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed 4666 }
aa5d6bed 4667
74123bd7 4668 /* delete the leaf if it is mostly empty */
d717aa1d 4669 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
4670 /* push_leaf_left fixes the path.
4671 * make sure the path still points to our leaf
4672 * for possible call to del_ptr below
4673 */
4920c9ac 4674 slot = path->slots[1];
5f39d397
CM
4675 extent_buffer_get(leaf);
4676
b9473439 4677 btrfs_set_path_blocking(path);
99d8f83c
CM
4678 wret = push_leaf_left(trans, root, path, 1, 1,
4679 1, (u32)-1);
54aa1f4d 4680 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 4681 ret = wret;
5f39d397
CM
4682
4683 if (path->nodes[0] == leaf &&
4684 btrfs_header_nritems(leaf)) {
99d8f83c
CM
4685 wret = push_leaf_right(trans, root, path, 1,
4686 1, 1, 0);
54aa1f4d 4687 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
4688 ret = wret;
4689 }
5f39d397
CM
4690
4691 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 4692 path->slots[1] = slot;
143bede5 4693 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 4694 free_extent_buffer(leaf);
143bede5 4695 ret = 0;
5de08d7d 4696 } else {
925baedd
CM
4697 /* if we're still in the path, make sure
4698 * we're dirty. Otherwise, one of the
4699 * push_leaf functions must have already
4700 * dirtied this buffer
4701 */
4702 if (path->nodes[0] == leaf)
4703 btrfs_mark_buffer_dirty(leaf);
5f39d397 4704 free_extent_buffer(leaf);
be0e5c09 4705 }
d5719762 4706 } else {
5f39d397 4707 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
4708 }
4709 }
aa5d6bed 4710 return ret;
be0e5c09
CM
4711}
4712
7bb86316 4713/*
925baedd 4714 * search the tree again to find a leaf with lesser keys
7bb86316
CM
4715 * returns 0 if it found something or 1 if there are no lesser leaves.
4716 * returns < 0 on io errors.
d352ac68
CM
4717 *
4718 * This may release the path, and so you may lose any locks held at the
4719 * time you call it.
7bb86316
CM
4720 */
4721int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4722{
925baedd
CM
4723 struct btrfs_key key;
4724 struct btrfs_disk_key found_key;
4725 int ret;
7bb86316 4726
925baedd 4727 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 4728
925baedd
CM
4729 if (key.offset > 0)
4730 key.offset--;
4731 else if (key.type > 0)
4732 key.type--;
4733 else if (key.objectid > 0)
4734 key.objectid--;
4735 else
4736 return 1;
7bb86316 4737
b3b4aa74 4738 btrfs_release_path(path);
925baedd
CM
4739 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4740 if (ret < 0)
4741 return ret;
4742 btrfs_item_key(path->nodes[0], &found_key, 0);
4743 ret = comp_keys(&found_key, &key);
4744 if (ret < 0)
4745 return 0;
4746 return 1;
7bb86316
CM
4747}
4748
3f157a2f
CM
4749/*
4750 * A helper function to walk down the tree starting at min_key, and looking
4751 * for nodes or leaves that are either in cache or have a minimum
d352ac68 4752 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
4753 *
4754 * This does not cow, but it does stuff the starting key it finds back
4755 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4756 * key and get a writable path.
4757 *
4758 * This does lock as it descends, and path->keep_locks should be set
4759 * to 1 by the caller.
4760 *
4761 * This honors path->lowest_level to prevent descent past a given level
4762 * of the tree.
4763 *
d352ac68
CM
4764 * min_trans indicates the oldest transaction that you are interested
4765 * in walking through. Any nodes or leaves older than min_trans are
4766 * skipped over (without reading them).
4767 *
3f157a2f
CM
4768 * returns zero if something useful was found, < 0 on error and 1 if there
4769 * was nothing in the tree that matched the search criteria.
4770 */
4771int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 4772 struct btrfs_key *max_key,
3f157a2f
CM
4773 struct btrfs_path *path, int cache_only,
4774 u64 min_trans)
4775{
4776 struct extent_buffer *cur;
4777 struct btrfs_key found_key;
4778 int slot;
9652480b 4779 int sret;
3f157a2f
CM
4780 u32 nritems;
4781 int level;
4782 int ret = 1;
4783
934d375b 4784 WARN_ON(!path->keep_locks);
3f157a2f 4785again:
bd681513 4786 cur = btrfs_read_lock_root_node(root);
3f157a2f 4787 level = btrfs_header_level(cur);
e02119d5 4788 WARN_ON(path->nodes[level]);
3f157a2f 4789 path->nodes[level] = cur;
bd681513 4790 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
4791
4792 if (btrfs_header_generation(cur) < min_trans) {
4793 ret = 1;
4794 goto out;
4795 }
d397712b 4796 while (1) {
3f157a2f
CM
4797 nritems = btrfs_header_nritems(cur);
4798 level = btrfs_header_level(cur);
9652480b 4799 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 4800
323ac95b
CM
4801 /* at the lowest level, we're done, setup the path and exit */
4802 if (level == path->lowest_level) {
e02119d5
CM
4803 if (slot >= nritems)
4804 goto find_next_key;
3f157a2f
CM
4805 ret = 0;
4806 path->slots[level] = slot;
4807 btrfs_item_key_to_cpu(cur, &found_key, slot);
4808 goto out;
4809 }
9652480b
Y
4810 if (sret && slot > 0)
4811 slot--;
3f157a2f
CM
4812 /*
4813 * check this node pointer against the cache_only and
4814 * min_trans parameters. If it isn't in cache or is too
4815 * old, skip to the next one.
4816 */
d397712b 4817 while (slot < nritems) {
3f157a2f
CM
4818 u64 blockptr;
4819 u64 gen;
4820 struct extent_buffer *tmp;
e02119d5
CM
4821 struct btrfs_disk_key disk_key;
4822
3f157a2f
CM
4823 blockptr = btrfs_node_blockptr(cur, slot);
4824 gen = btrfs_node_ptr_generation(cur, slot);
4825 if (gen < min_trans) {
4826 slot++;
4827 continue;
4828 }
4829 if (!cache_only)
4830 break;
4831
e02119d5
CM
4832 if (max_key) {
4833 btrfs_node_key(cur, &disk_key, slot);
4834 if (comp_keys(&disk_key, max_key) >= 0) {
4835 ret = 1;
4836 goto out;
4837 }
4838 }
4839
3f157a2f
CM
4840 tmp = btrfs_find_tree_block(root, blockptr,
4841 btrfs_level_size(root, level - 1));
4842
b9fab919 4843 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
3f157a2f
CM
4844 free_extent_buffer(tmp);
4845 break;
4846 }
4847 if (tmp)
4848 free_extent_buffer(tmp);
4849 slot++;
4850 }
e02119d5 4851find_next_key:
3f157a2f
CM
4852 /*
4853 * we didn't find a candidate key in this node, walk forward
4854 * and find another one
4855 */
4856 if (slot >= nritems) {
e02119d5 4857 path->slots[level] = slot;
b4ce94de 4858 btrfs_set_path_blocking(path);
e02119d5 4859 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 4860 cache_only, min_trans);
e02119d5 4861 if (sret == 0) {
b3b4aa74 4862 btrfs_release_path(path);
3f157a2f
CM
4863 goto again;
4864 } else {
4865 goto out;
4866 }
4867 }
4868 /* save our key for returning back */
4869 btrfs_node_key_to_cpu(cur, &found_key, slot);
4870 path->slots[level] = slot;
4871 if (level == path->lowest_level) {
4872 ret = 0;
f7c79f30 4873 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
4874 goto out;
4875 }
b4ce94de 4876 btrfs_set_path_blocking(path);
3f157a2f 4877 cur = read_node_slot(root, cur, slot);
79787eaa 4878 BUG_ON(!cur); /* -ENOMEM */
3f157a2f 4879
bd681513 4880 btrfs_tree_read_lock(cur);
b4ce94de 4881
bd681513 4882 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 4883 path->nodes[level - 1] = cur;
f7c79f30 4884 unlock_up(path, level, 1, 0, NULL);
bd681513 4885 btrfs_clear_path_blocking(path, NULL, 0);
3f157a2f
CM
4886 }
4887out:
4888 if (ret == 0)
4889 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4890 btrfs_set_path_blocking(path);
3f157a2f
CM
4891 return ret;
4892}
4893
4894/*
4895 * this is similar to btrfs_next_leaf, but does not try to preserve
4896 * and fixup the path. It looks for and returns the next key in the
4897 * tree based on the current path and the cache_only and min_trans
4898 * parameters.
4899 *
4900 * 0 is returned if another key is found, < 0 if there are any errors
4901 * and 1 is returned if there are no higher keys in the tree
4902 *
4903 * path->keep_locks should be set to 1 on the search made before
4904 * calling this function.
4905 */
e7a84565 4906int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
33c66f43 4907 struct btrfs_key *key, int level,
3f157a2f 4908 int cache_only, u64 min_trans)
e7a84565 4909{
e7a84565
CM
4910 int slot;
4911 struct extent_buffer *c;
4912
934d375b 4913 WARN_ON(!path->keep_locks);
d397712b 4914 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
4915 if (!path->nodes[level])
4916 return 1;
4917
4918 slot = path->slots[level] + 1;
4919 c = path->nodes[level];
3f157a2f 4920next:
e7a84565 4921 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
4922 int ret;
4923 int orig_lowest;
4924 struct btrfs_key cur_key;
4925 if (level + 1 >= BTRFS_MAX_LEVEL ||
4926 !path->nodes[level + 1])
e7a84565 4927 return 1;
33c66f43
YZ
4928
4929 if (path->locks[level + 1]) {
4930 level++;
4931 continue;
4932 }
4933
4934 slot = btrfs_header_nritems(c) - 1;
4935 if (level == 0)
4936 btrfs_item_key_to_cpu(c, &cur_key, slot);
4937 else
4938 btrfs_node_key_to_cpu(c, &cur_key, slot);
4939
4940 orig_lowest = path->lowest_level;
b3b4aa74 4941 btrfs_release_path(path);
33c66f43
YZ
4942 path->lowest_level = level;
4943 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4944 0, 0);
4945 path->lowest_level = orig_lowest;
4946 if (ret < 0)
4947 return ret;
4948
4949 c = path->nodes[level];
4950 slot = path->slots[level];
4951 if (ret == 0)
4952 slot++;
4953 goto next;
e7a84565 4954 }
33c66f43 4955
e7a84565
CM
4956 if (level == 0)
4957 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
4958 else {
4959 u64 blockptr = btrfs_node_blockptr(c, slot);
4960 u64 gen = btrfs_node_ptr_generation(c, slot);
4961
4962 if (cache_only) {
4963 struct extent_buffer *cur;
4964 cur = btrfs_find_tree_block(root, blockptr,
4965 btrfs_level_size(root, level - 1));
b9fab919
CM
4966 if (!cur ||
4967 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
3f157a2f
CM
4968 slot++;
4969 if (cur)
4970 free_extent_buffer(cur);
4971 goto next;
4972 }
4973 free_extent_buffer(cur);
4974 }
4975 if (gen < min_trans) {
4976 slot++;
4977 goto next;
4978 }
e7a84565 4979 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 4980 }
e7a84565
CM
4981 return 0;
4982 }
4983 return 1;
4984}
4985
97571fd0 4986/*
925baedd 4987 * search the tree again to find a leaf with greater keys
0f70abe2
CM
4988 * returns 0 if it found something or 1 if there are no greater leaves.
4989 * returns < 0 on io errors.
97571fd0 4990 */
234b63a0 4991int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
4992{
4993 int slot;
8e73f275 4994 int level;
5f39d397 4995 struct extent_buffer *c;
8e73f275 4996 struct extent_buffer *next;
925baedd
CM
4997 struct btrfs_key key;
4998 u32 nritems;
4999 int ret;
8e73f275 5000 int old_spinning = path->leave_spinning;
bd681513 5001 int next_rw_lock = 0;
925baedd
CM
5002
5003 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 5004 if (nritems == 0)
925baedd 5005 return 1;
925baedd 5006
8e73f275
CM
5007 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5008again:
5009 level = 1;
5010 next = NULL;
bd681513 5011 next_rw_lock = 0;
b3b4aa74 5012 btrfs_release_path(path);
8e73f275 5013
a2135011 5014 path->keep_locks = 1;
31533fb2 5015 path->leave_spinning = 1;
8e73f275 5016
925baedd
CM
5017 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5018 path->keep_locks = 0;
5019
5020 if (ret < 0)
5021 return ret;
5022
a2135011 5023 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
5024 /*
5025 * by releasing the path above we dropped all our locks. A balance
5026 * could have added more items next to the key that used to be
5027 * at the very end of the block. So, check again here and
5028 * advance the path if there are now more items available.
5029 */
a2135011 5030 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
5031 if (ret == 0)
5032 path->slots[0]++;
8e73f275 5033 ret = 0;
925baedd
CM
5034 goto done;
5035 }
d97e63b6 5036
d397712b 5037 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
5038 if (!path->nodes[level]) {
5039 ret = 1;
5040 goto done;
5041 }
5f39d397 5042
d97e63b6
CM
5043 slot = path->slots[level] + 1;
5044 c = path->nodes[level];
5f39d397 5045 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 5046 level++;
8e73f275
CM
5047 if (level == BTRFS_MAX_LEVEL) {
5048 ret = 1;
5049 goto done;
5050 }
d97e63b6
CM
5051 continue;
5052 }
5f39d397 5053
925baedd 5054 if (next) {
bd681513 5055 btrfs_tree_unlock_rw(next, next_rw_lock);
5f39d397 5056 free_extent_buffer(next);
925baedd 5057 }
5f39d397 5058
8e73f275 5059 next = c;
bd681513 5060 next_rw_lock = path->locks[level];
8e73f275 5061 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5062 slot, &key, 0);
8e73f275
CM
5063 if (ret == -EAGAIN)
5064 goto again;
5f39d397 5065
76a05b35 5066 if (ret < 0) {
b3b4aa74 5067 btrfs_release_path(path);
76a05b35
CM
5068 goto done;
5069 }
5070
5cd57b2c 5071 if (!path->skip_locking) {
bd681513 5072 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
5073 if (!ret) {
5074 btrfs_set_path_blocking(path);
bd681513 5075 btrfs_tree_read_lock(next);
31533fb2 5076 btrfs_clear_path_blocking(path, next,
bd681513 5077 BTRFS_READ_LOCK);
8e73f275 5078 }
31533fb2 5079 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5080 }
d97e63b6
CM
5081 break;
5082 }
5083 path->slots[level] = slot;
d397712b 5084 while (1) {
d97e63b6
CM
5085 level--;
5086 c = path->nodes[level];
925baedd 5087 if (path->locks[level])
bd681513 5088 btrfs_tree_unlock_rw(c, path->locks[level]);
8e73f275 5089
5f39d397 5090 free_extent_buffer(c);
d97e63b6
CM
5091 path->nodes[level] = next;
5092 path->slots[level] = 0;
a74a4b97 5093 if (!path->skip_locking)
bd681513 5094 path->locks[level] = next_rw_lock;
d97e63b6
CM
5095 if (!level)
5096 break;
b4ce94de 5097
8e73f275 5098 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5099 0, &key, 0);
8e73f275
CM
5100 if (ret == -EAGAIN)
5101 goto again;
5102
76a05b35 5103 if (ret < 0) {
b3b4aa74 5104 btrfs_release_path(path);
76a05b35
CM
5105 goto done;
5106 }
5107
5cd57b2c 5108 if (!path->skip_locking) {
bd681513 5109 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
5110 if (!ret) {
5111 btrfs_set_path_blocking(path);
bd681513 5112 btrfs_tree_read_lock(next);
31533fb2 5113 btrfs_clear_path_blocking(path, next,
bd681513
CM
5114 BTRFS_READ_LOCK);
5115 }
31533fb2 5116 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5117 }
d97e63b6 5118 }
8e73f275 5119 ret = 0;
925baedd 5120done:
f7c79f30 5121 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
5122 path->leave_spinning = old_spinning;
5123 if (!old_spinning)
5124 btrfs_set_path_blocking(path);
5125
5126 return ret;
d97e63b6 5127}
0b86a832 5128
3f157a2f
CM
5129/*
5130 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5131 * searching until it gets past min_objectid or finds an item of 'type'
5132 *
5133 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5134 */
0b86a832
CM
5135int btrfs_previous_item(struct btrfs_root *root,
5136 struct btrfs_path *path, u64 min_objectid,
5137 int type)
5138{
5139 struct btrfs_key found_key;
5140 struct extent_buffer *leaf;
e02119d5 5141 u32 nritems;
0b86a832
CM
5142 int ret;
5143
d397712b 5144 while (1) {
0b86a832 5145 if (path->slots[0] == 0) {
b4ce94de 5146 btrfs_set_path_blocking(path);
0b86a832
CM
5147 ret = btrfs_prev_leaf(root, path);
5148 if (ret != 0)
5149 return ret;
5150 } else {
5151 path->slots[0]--;
5152 }
5153 leaf = path->nodes[0];
e02119d5
CM
5154 nritems = btrfs_header_nritems(leaf);
5155 if (nritems == 0)
5156 return 1;
5157 if (path->slots[0] == nritems)
5158 path->slots[0]--;
5159
0b86a832 5160 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
5161 if (found_key.objectid < min_objectid)
5162 break;
0a4eefbb
YZ
5163 if (found_key.type == type)
5164 return 0;
e02119d5
CM
5165 if (found_key.objectid == min_objectid &&
5166 found_key.type < type)
5167 break;
0b86a832
CM
5168 }
5169 return 1;
5170}