struct list_head *p;
struct btrfs_delayed_node *next = NULL;
- delayed_root = node->root->fs_info->delayed_root;
+ delayed_root = &node->root->fs_info->delayed_root;
spin_lock(&delayed_root->lock);
if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
/* not in the list */
if (!delayed_node)
return;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
mutex_lock(&delayed_node->mutex);
if (delayed_node->count)
delayed_node->index_cnt = ins->index + 1;
delayed_node->count++;
- atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+ atomic_inc(&delayed_node->root->fs_info->delayed_root.items);
return 0;
}
/* If it's in a rbtree, then we need to have delayed node locked. */
lockdep_assert_held(&delayed_node->mutex);
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count--;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
finish_one_item(delayed_root);
}
}
ASSERT(delayed_node->root);
delayed_node->count--;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
finish_one_item(delayed_root);
}
}
block_rsv = trans->block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
- delayed_root = fs_info->delayed_root;
+ delayed_root = &fs_info->delayed_root;
curr_node = btrfs_first_delayed_node(delayed_root, &curr_delayed_node_tracker);
while (curr_node && (!count || nr--)) {
struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_node *node;
- node = btrfs_first_delayed_node( fs_info->delayed_root, &delayed_node_tracker);
+ node = btrfs_first_delayed_node(&fs_info->delayed_root, &delayed_node_tracker);
if (WARN_ON(node)) {
btrfs_delayed_node_ref_tracker_free(node,
&delayed_node_tracker);
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
+ struct btrfs_delayed_root *delayed_root = &fs_info->delayed_root;
if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
btrfs_workqueue_normal_congested(fs_info->delayed_workers))
fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count++;
- atomic_inc(&root->fs_info->delayed_root->items);
+ atomic_inc(&root->fs_info->delayed_root.items);
release_node:
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
mutex_lock(&delayed_node->mutex);
if (!test_and_set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
delayed_node->count++;
- atomic_inc(&fs_info->delayed_root->items);
+ atomic_inc(&fs_info->delayed_root.items);
}
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
- curr_node = btrfs_first_delayed_node(fs_info->delayed_root,
+ curr_node = btrfs_first_delayed_node(&fs_info->delayed_root,
&curr_delayed_node_tracker);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);
BTRFS_DELAYED_DELETION_ITEM
};
-struct btrfs_delayed_root {
- spinlock_t lock;
- struct list_head node_list;
- /*
- * Used for delayed nodes which is waiting to be dealt with by the
- * worker. If the delayed node is inserted into the work queue, we
- * drop it from this list.
- */
- struct list_head prepare_list;
- atomic_t items; /* for delayed items */
- atomic_t items_seq; /* for delayed items */
- int nodes; /* for delayed nodes */
- wait_queue_head_t wait;
-};
-
struct btrfs_ref_tracker_dir {
#ifdef CONFIG_BTRFS_DEBUG
struct ref_tracker_dir dir;
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
+#include "delayed-inode.h"
#include "bio.h"
#include "print-tree.h"
#include "locking.h"
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
kfree(fs_info->balance_ctl);
- kfree(fs_info->delayed_root);
free_global_roots(fs_info);
btrfs_put_root(fs_info->tree_root);
btrfs_put_root(fs_info->chunk_root);
if (ret)
return ret;
- fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
- GFP_KERNEL);
- if (!fs_info->delayed_root)
- return -ENOMEM;
- btrfs_init_delayed_root(fs_info->delayed_root);
+ btrfs_init_delayed_root(&fs_info->delayed_root);
if (sb_rdonly(sb))
set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
struct btrfs_root;
struct btrfs_fs_devices;
struct btrfs_transaction;
-struct btrfs_delayed_root;
struct btrfs_balance_control;
struct btrfs_subpage_info;
struct btrfs_stripe_hash_table;
u64 critical_section_start_time;
};
+struct btrfs_delayed_root {
+ spinlock_t lock;
+ struct list_head node_list;
+ /*
+ * Used for delayed nodes which is waiting to be dealt with by the
+ * worker. If the delayed node is inserted into the work queue, we
+ * drop it from this list.
+ */
+ struct list_head prepare_list;
+ atomic_t items; /* for delayed items */
+ atomic_t items_seq; /* for delayed items */
+ int nodes; /* for delayed nodes */
+ wait_queue_head_t wait;
+};
+
struct btrfs_fs_info {
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
unsigned long flags;
/* Filesystem state */
unsigned long fs_state;
- struct btrfs_delayed_root *delayed_root;
+ struct btrfs_delayed_root delayed_root;
/* Entries are eb->start >> nodesize_bits */
struct xarray buffer_tree;