return NULL;
 }
 
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+               struct btrfs_delayed_ref_node *ins)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *node = &ins->ref_node;
+       struct rb_node *parent_node = NULL;
+       struct btrfs_delayed_ref_node *entry;
+
+       while (*p) {
+               int comp;
+
+               parent_node = *p;
+               entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
+                                ref_node);
+               comp = comp_refs(ins, entry, true);
+               if (comp < 0)
+                       p = &(*p)->rb_left;
+               else if (comp > 0)
+                       p = &(*p)->rb_right;
+               else
+                       return entry;
+       }
+
+       rb_link_node(node, parent_node, p);
+       rb_insert_color(node, root);
+       return NULL;
+}
+
 /*
  * find an head entry based on bytenr. This returns the delayed ref
  * head if it was able to find one, or NULL if nothing was in that spot.
                                    struct btrfs_delayed_ref_node *ref)
 {
        assert_spin_locked(&head->lock);
-       list_del(&ref->list);
+       rb_erase(&ref->ref_node, &head->ref_tree);
+       RB_CLEAR_NODE(&ref->ref_node);
        if (!list_empty(&ref->add_list))
                list_del(&ref->add_list);
        ref->in_tree = 0;
                      u64 seq)
 {
        struct btrfs_delayed_ref_node *next;
+       struct rb_node *node = rb_next(&ref->ref_node);
        bool done = false;
 
-       next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
-                               list);
-       while (!done && &next->list != &head->ref_list) {
+       while (!done && node) {
                int mod;
-               struct btrfs_delayed_ref_node *next2;
-
-               next2 = list_next_entry(next, list);
-
-               if (next == ref)
-                       goto next;
 
+               next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+               node = rb_next(node);
                if (seq && next->seq >= seq)
-                       goto next;
-
+                       break;
                if (comp_refs(ref, next, false))
-                       goto next;
+                       break;
 
                if (ref->action == next->action) {
                        mod = next->ref_mod;
                        WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
                                ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
                }
-next:
-               next = next2;
        }
 
        return done;
                              struct btrfs_delayed_ref_head *head)
 {
        struct btrfs_delayed_ref_node *ref;
+       struct rb_node *node;
        u64 seq = 0;
 
        assert_spin_locked(&head->lock);
 
-       if (list_empty(&head->ref_list))
+       if (RB_EMPTY_ROOT(&head->ref_tree))
                return;
 
        /* We don't have too many refs to merge for data. */
        }
        spin_unlock(&fs_info->tree_mod_seq_lock);
 
-       ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
-                              list);
-       while (&ref->list != &head->ref_list) {
+again:
+       for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+               ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
                if (seq && ref->seq >= seq)
-                       goto next;
-
-               if (merge_ref(trans, delayed_refs, head, ref, seq)) {
-                       if (list_empty(&head->ref_list))
-                               break;
-                       ref = list_first_entry(&head->ref_list,
-                                              struct btrfs_delayed_ref_node,
-                                              list);
                        continue;
-               }
-next:
-               ref = list_next_entry(ref, list);
+               if (merge_ref(trans, delayed_refs, head, ref, seq))
+                       goto again;
        }
 }
 
  * Return 0 for insert.
  * Return >0 for merge.
  */
-static int
-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
-                          struct btrfs_delayed_ref_root *root,
-                          struct btrfs_delayed_ref_head *href,
-                          struct btrfs_delayed_ref_node *ref)
+static int insert_delayed_ref(struct btrfs_trans_handle *trans,
+                             struct btrfs_delayed_ref_root *root,
+                             struct btrfs_delayed_ref_head *href,
+                             struct btrfs_delayed_ref_node *ref)
 {
        struct btrfs_delayed_ref_node *exist;
        int mod;
        int ret = 0;
 
        spin_lock(&href->lock);
-       /* Check whether we can merge the tail node with ref */
-       if (list_empty(&href->ref_list))
-               goto add_tail;
-       exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
-                          list);
-       /* No need to compare bytenr nor is_head */
-       if (comp_refs(exist, ref, true))
-               goto add_tail;
+       exist = tree_insert(&href->ref_tree, ref);
+       if (!exist)
+               goto inserted;
 
        /* Now we are sure we can merge */
        ret = 1;
                drop_delayed_ref(trans, root, href, exist);
        spin_unlock(&href->lock);
        return ret;
-
-add_tail:
-       list_add_tail(&ref->list, &href->ref_list);
+inserted:
        if (ref->action == BTRFS_ADD_DELAYED_REF)
                list_add_tail(&ref->add_list, &href->ref_add_list);
        atomic_inc(&root->num_entries);
        head_ref->ref_mod = count_mod;
        head_ref->must_insert_reserved = must_insert_reserved;
        head_ref->is_data = is_data;
-       INIT_LIST_HEAD(&head_ref->ref_list);
+       head_ref->ref_tree = RB_ROOT;
        INIT_LIST_HEAD(&head_ref->ref_add_list);
        RB_CLEAR_NODE(&head_ref->href_node);
        head_ref->processing = 0;
        ref->is_head = 0;
        ref->in_tree = 1;
        ref->seq = seq;
-       INIT_LIST_HEAD(&ref->list);
+       RB_CLEAR_NODE(&ref->ref_node);
        INIT_LIST_HEAD(&ref->add_list);
 
        full_ref = btrfs_delayed_node_to_tree_ref(ref);
 
        trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
 
-       ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
+       ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
 
        /*
         * XXX: memory should be freed at the same level allocated.
        ref->is_head = 0;
        ref->in_tree = 1;
        ref->seq = seq;
-       INIT_LIST_HEAD(&ref->list);
+       RB_CLEAR_NODE(&ref->ref_node);
        INIT_LIST_HEAD(&ref->add_list);
 
        full_ref = btrfs_delayed_node_to_data_ref(ref);
 
        trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
 
-       ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
-
+       ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
        if (ret > 0)
                kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
 }
 
 {
        struct btrfs_delayed_ref_node *ref;
 
-       if (list_empty(&head->ref_list))
+       if (RB_EMPTY_ROOT(&head->ref_tree))
                return NULL;
 
        /*
                return list_first_entry(&head->ref_add_list,
                                struct btrfs_delayed_ref_node, add_list);
 
-       ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
-                              list);
+       ref = rb_entry(rb_first(&head->ref_tree),
+                      struct btrfs_delayed_ref_node, ref_node);
        ASSERT(list_empty(&ref->add_list));
        return ref;
 }
        spin_unlock(&head->lock);
        spin_lock(&delayed_refs->lock);
        spin_lock(&head->lock);
-       if (!list_empty(&head->ref_list) || head->extent_op) {
+       if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
                spin_unlock(&head->lock);
                spin_unlock(&delayed_refs->lock);
                return 1;
 
                actual_count++;
                ref->in_tree = 0;
-               list_del(&ref->list);
+               rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+               RB_CLEAR_NODE(&ref->ref_node);
                if (!list_empty(&ref->add_list))
                        list_del(&ref->add_list);
                /*
        struct btrfs_delayed_data_ref *data_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_transaction *cur_trans;
+       struct rb_node *node;
        int ret = 0;
 
        cur_trans = root->fs_info->running_transaction;
        spin_unlock(&delayed_refs->lock);
 
        spin_lock(&head->lock);
-       list_for_each_entry(ref, &head->ref_list, list) {
+       /*
+        * XXX: We should replace this with a proper search function in the
+        * future.
+        */
+       for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+               ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
                /* If it's a shared ref we know a cross reference exists */
                if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
                        ret = 1;
                goto out_delayed_unlock;
 
        spin_lock(&head->lock);
-       if (!list_empty(&head->ref_list))
+       if (!RB_EMPTY_ROOT(&head->ref_tree))
                goto out;
 
        if (head->extent_op) {