bch2_btree_node_unlock_write_inlined(trans, path, b);
}
-void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
+static struct six_lock_count btree_node_lock_counts(struct btree_trans *trans,
+ struct btree_path *skip,
+ struct btree *b,
+ unsigned level)
{
- struct btree_path *linked;
- unsigned readers = 0;
+ struct btree_path *path;
+ struct six_lock_count ret = { 0, 0 };
+
+ if ((unsigned long) b < 128)
+ return ret;
- trans_for_each_path(trans, linked)
- if (linked->l[b->c.level].b == b &&
- btree_node_read_locked(linked, b->c.level))
- readers++;
+ trans_for_each_path(trans, path)
+ if (path != skip && path->l[level].b == b) {
+ ret.read += btree_node_read_locked(path, level);
+ ret.intent += btree_node_intent_locked(path, level);
+ }
+
+ return ret;
+}
+
+static inline void six_lock_readers_add(struct six_lock *lock, int nr)
+{
+ if (!lock->readers)
+ atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
+ else
+ this_cpu_add(*lock->readers, nr);
+}
+
+void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
+{
+ int readers = btree_node_lock_counts(trans, NULL, b, b->c.level).read;
/*
* Must drop our read locks before calling six_lock_write() -
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- if (!b->c.lock.readers)
- atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
- else
- this_cpu_sub(*b->c.lock.readers, readers);
-
+ six_lock_readers_add(&b->c.lock, -readers);
six_lock_write(&b->c.lock, NULL, NULL);
-
- if (!b->c.lock.readers)
- atomic64_add(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
- else
- this_cpu_add(*b->c.lock.readers, readers);
+ six_lock_readers_add(&b->c.lock, readers);
}
bool __bch2_btree_node_relock(struct btree_trans *trans,
goto success;
}
+ trace_btree_node_upgrade_fail(trans->fn, _RET_IP_,
+ path->btree_id,
+ &path->pos,
+ btree_node_locked(path, level),
+ btree_node_lock_counts(trans, NULL, b, level),
+ six_lock_counts(&b->c.lock));
return false;
success:
mark_btree_node_intent_locked(trans, path, level);
static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
{
+ trace_btree_path_free(trans->fn, _RET_IP_, path->btree_id, &path->pos);
__bch2_btree_path_unlock(trans, path);
btree_path_list_remove(trans, path);
trans->paths_allocated &= ~(1ULL << path->idx);
__btree_path_get(path_pos, intent);
path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
} else {
+ trace_btree_path_alloc(trans->fn, _RET_IP_, btree_id, &pos, locks_want);
+
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->node_lock_seq)
);
+TRACE_EVENT(btree_node_upgrade_fail,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ bool locked,
+ struct six_lock_count self_lock_count,
+ struct six_lock_count lock_count),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos,
+ locked, self_lock_count, lock_count),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 24 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ __field(u8, locked )
+ __field(u8, self_read_count )
+ __field(u8, read_count )
+ __field(u8, self_intent_count)
+ __field(u8, intent_count )
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ __entry->locked = locked;
+ __entry->self_read_count = self_lock_count.read;
+ __entry->self_intent_count = self_lock_count.intent;
+ __entry->read_count = lock_count.read;
+ __entry->intent_count = lock_count.intent;
+ ),
+
+ TP_printk("%s %pS btree %u pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->locked,
+ __entry->self_read_count,
+ __entry->self_intent_count,
+ __entry->read_count,
+ __entry->intent_count)
+);
+
/* Garbage collection */
DEFINE_EVENT(bch_fs, gc_gens_start,
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
),
TP_ARGS(trans_fn, caller_ip)
);
-DEFINE_EVENT(transaction_event, transaction_restart_ip,
- TP_PROTO(const char *trans_fn,
- unsigned long caller_ip),
- TP_ARGS(trans_fn, caller_ip)
-);
-
DEFINE_EVENT(transaction_event, transaction_restart_injected,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
+DEFINE_EVENT(transaction_event, transaction_restart_key_cache_upgrade,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip),
+ TP_ARGS(trans_fn, caller_ip)
+);
+
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->in_traverse_all = in_traverse_all;
__entry->reason = reason;
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
),
TP_printk("%s", __entry->trans_fn)
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->bytes = bytes;
),
),
TP_fast_assign(
- strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->inode = pos->inode;
__entry->new_u64s)
);
+TRACE_EVENT(btree_path_alloc,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ unsigned locks_want),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos, locks_want),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 24 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, locks_want )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->locks_want = locks_want;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ ),
+
+ TP_printk("%s %pS btree %u locks_want %u pos %llu:%llu:%u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->locks_want,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+DEFINE_EVENT(transaction_restart_iter, btree_path_free,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
#endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */