high_word(f, r),
b->nr_key_bits);
- EBUG_ON(ret != bkey_cmp(bkey_unpack_pos(b, l),
+ EBUG_ON(ret != bpos_cmp(bkey_unpack_pos(b, l),
bkey_unpack_pos(b, r)));
return ret;
}
const struct bkey_packed *l,
const struct bpos *r)
{
- return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r);
+ return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
}
__pure __flatten
r = (void*) &unpacked;
}
- return bkey_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
+ return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
}
__pure __flatten
const struct bkey *l_unpacked;
return unlikely(l_unpacked = packed_to_bkey_c(l))
- ? bkey_cmp(l_unpacked->p, *r)
+ ? bpos_cmp(l_unpacked->p, *r)
: __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
}
return bkey_cmp_left_packed(b, l, &r);
}
-#if 1
+static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
+{
+ return cmp_int(l.inode, r.inode) ?:
+ cmp_int(l.offset, r.offset) ?:
+ cmp_int(l.snapshot, r.snapshot);
+}
+
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{
- if (l.inode != r.inode)
- return l.inode < r.inode ? -1 : 1;
- if (l.offset != r.offset)
- return l.offset < r.offset ? -1 : 1;
- if (l.snapshot != r.snapshot)
- return l.snapshot < r.snapshot ? -1 : 1;
- return 0;
+ return cmp_int(l.inode, r.inode) ?:
+ cmp_int(l.offset, r.offset);
}
-#else
-int bkey_cmp(struct bpos l, struct bpos r);
-#endif
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
{
- return bkey_cmp(l, r) < 0 ? l : r;
+ return bpos_cmp(l, r) < 0 ? l : r;
}
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
{
- return bkey_cmp(l, r) > 0 ? l : r;
+ return bpos_cmp(l, r) > 0 ? l : r;
}
#define sbb(a, b, borrow) \
static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
{
- if (bkey_cmp(l, r) > 0)
+ if (bpos_cmp(l, r) > 0)
swap(l, r);
return bpos_sub(r, l);
const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
{
- if (bkey_cmp(k.k->p, b->data->min_key) < 0)
+ if (bpos_cmp(k.k->p, b->data->min_key) < 0)
return "key before start of btree node";
- if (bkey_cmp(k.k->p, b->data->max_key) > 0)
+ if (bpos_cmp(k.k->p, b->data->max_key) > 0)
return "key past end of btree node";
return NULL;
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
{
- if (!bkey_cmp(pos, POS_MIN))
+ if (!bpos_cmp(pos, POS_MIN))
pr_buf(out, "POS_MIN");
- else if (!bkey_cmp(pos, POS_MAX))
+ else if (!bpos_cmp(pos, POS_MAX))
pr_buf(out, "POS_MAX");
else {
if (pos.inode == U64_MAX)
!ops->key_merge ||
l.k->type != r.k->type ||
bversion_cmp(l.k->version, r.k->version) ||
- bkey_cmp(l.k->p, bkey_start_pos(r.k)))
+ bpos_cmp(l.k->p, bkey_start_pos(r.k)))
return BCH_MERGE_NOMERGE;
ret = ops->key_merge(c, l, r);
n = bkey_unpack_key(b, _n);
- if (bkey_cmp(bkey_start_pos(&n), k.k->p) < 0) {
+ if (bpos_cmp(n.p, k.k->p) < 0) {
printk(KERN_ERR "Key skipped backwards\n");
continue;
}
if (!bkey_deleted(k.k) &&
- !bkey_cmp(n.p, k.k->p))
+ !bpos_cmp(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n");
}
}
goto start;
while (1) {
if (rw_aux_to_bkey(b, t, j) == k) {
- BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
+ BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k,
bkey_unpack_pos(b, k)));
start:
if (++j == t->size)
while (l + 1 != r) {
unsigned m = (l + r) >> 1;
- if (bkey_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
+ if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
l = m;
else
r = m;
* start and end - handle that here:
*/
- if (bkey_cmp(*search, t->max_key) > 0)
+ if (bpos_cmp(*search, t->max_key) > 0)
return btree_bkey_last(b, t);
return bset_search_tree(b, t, search, lossy_packed_search);
struct bkey_packed *k[MAX_BSETS];
unsigned i;
- EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
+ EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0);
bset_aux_tree_verify(b);
memset(iter, 0, sizeof(*iter));
EBUG_ON(r_packed && !bkey_packed(r_packed));
if (unlikely(!bkey_packed(l)))
- return bkey_cmp(packed_to_bkey_c(l)->p, *r);
+ return bpos_cmp(packed_to_bkey_c(l)->p, *r);
if (likely(r_packed))
return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
return bch2_bkey_prev_filter(b, t, k, 1);
}
-enum bch_extent_overlap {
- BCH_EXTENT_OVERLAP_ALL = 0,
- BCH_EXTENT_OVERLAP_BACK = 1,
- BCH_EXTENT_OVERLAP_FRONT = 2,
- BCH_EXTENT_OVERLAP_MIDDLE = 3,
-};
-
-/* Returns how k overlaps with m */
-static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
- const struct bkey *m)
-{
- int cmp1 = bkey_cmp(k->p, m->p) < 0;
- int cmp2 = bkey_cmp(bkey_start_pos(k),
- bkey_start_pos(m)) > 0;
-
- return (cmp1 << 1) + cmp2;
-}
-
/* Btree key iteration */
void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
EBUG_ON(b->c.btree_id != iter->btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- EBUG_ON(bkey_cmp(b->data->max_key, k->k.p));
+ EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bkey_cmp(b->data->min_key,
+ bpos_cmp(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
return b;
EBUG_ON(b->c.btree_id != btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- EBUG_ON(bkey_cmp(b->data->max_key, k->k.p));
+ EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bkey_cmp(b->data->min_key,
+ bpos_cmp(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
out:
bch2_btree_cache_cannibalize_unlock(c);
if (sib != btree_prev_sib)
swap(n1, n2);
- if (bkey_cmp(bkey_successor(n1->key.k.p),
+ if (bpos_cmp(bkey_successor(n1->key.k.p),
n2->data->min_key)) {
char buf1[200], buf2[200];
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
}
- if (fsck_err_on(bkey_cmp(expected_start, bp->v.min_key), c,
+ if (fsck_err_on(bpos_cmp(expected_start, bp->v.min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" cur %s",
}
if (fsck_err_on(is_last &&
- bkey_cmp(cur.k->k.p, node_end), c,
+ bpos_cmp(cur.k->k.p, node_end), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bkey_cmp(k.k->p, b->data->min_key) < 0);
- BUG_ON(bkey_cmp(k.k->p, b->data->max_key) > 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
&k, &max_stale, true);
return 0;
six_lock_read(&b->c.lock, NULL, NULL);
- if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c,
+ if (fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %s",
(bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
BUG();
}
- if (fsck_err_on(bkey_cmp(b->data->max_key, POS_MAX), c,
+ if (fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c,
"btree root with incorrect max_key: %s",
(bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
BUG();
unsigned j;
for (j = 0; j < nr_new_nodes; j++)
- if (!bkey_cmp(old_nodes[i]->key.k.p,
+ if (!bpos_cmp(old_nodes[i]->key.k.p,
new_nodes[j]->key.k.p))
goto next;
static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
{
- if (l.phase != r.phase)
- return l.phase < r.phase ? -1 : 1;
- if (bkey_cmp(l.pos, r.pos))
- return bkey_cmp(l.pos, r.pos);
- if (l.level != r.level)
- return l.level < r.level ? -1 : 1;
- return 0;
+ return cmp_int(l.phase, r.phase) ?:
+ bpos_cmp(l.pos, r.pos) ?:
+ cmp_int(l.level, r.level);
}
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
- BUG_ON(bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
+ BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
}
#endif
}
b->data->max_key = b->key.k.p;
}
- btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
+ btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
"incorrect min_key: got %s should be %s",
(bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
(bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
}
- btree_err_on(bkey_cmp(bn->max_key, b->key.k.p),
+ btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, ca, b, i,
"incorrect max key %s",
(bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
- bkey_cmp(bn->min_key, POS_MIN) &&
+ bpos_cmp(bn->min_key, POS_MIN) &&
write)
bn->min_key = bkey_predecessor(bn->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
- bkey_cmp(bn->min_key, POS_MIN) &&
+ bpos_cmp(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bkey_successor(bn->min_key);
}
static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
struct btree *b)
{
- return bkey_cmp(iter->real_pos, b->data->min_key) < 0;
+ return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
}
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
struct btree *b)
{
- return bkey_cmp(b->key.k.p, iter->real_pos) < 0;
+ return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
- bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
+ bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
btree_iter_type(linked))) <= 0) {
deadlock_iter = linked;
reason = 7;
if (!b)
return NULL;
- BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
+ BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
iter->pos = iter->real_pos = b->key.k.p;
if (!b)
return NULL;
- if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
+ if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
/*
* Haven't gotten to the end of the parent node: go back down to
* the next child node
static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
{
- int cmp = bkey_cmp(new_pos, iter->real_pos);
+ int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level;
if (!cmp)
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
- bool ret = bkey_cmp(pos, POS_MAX) != 0;
+ bool ret = bpos_cmp(pos, POS_MAX) != 0;
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(pos);
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = bkey_cmp(pos, POS_MIN) != 0;
+ bool ret = bpos_cmp(pos, POS_MIN) != 0;
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(pos);
static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
{
struct bpos next_pos = iter->l[0].b->key.k.p;
- bool ret = bkey_cmp(next_pos, POS_MAX) != 0;
+ bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
/*
* Typically, we don't want to modify iter->pos here, since that
static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
{
struct bpos next_pos = iter->l[0].b->data->min_key;
- bool ret = bkey_cmp(next_pos, POS_MIN) != 0;
+ bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
if (ret)
btree_iter_set_search_pos(iter, bkey_predecessor(next_pos));
const struct bkey_cached_key *key = arg->key;
return cmp_int(ck->key.btree_id, key->btree_id) ?:
- bkey_cmp(ck->key.pos, key->pos);
+ bpos_cmp(ck->key.pos, key->pos);
}
static const struct rhashtable_params bch2_btree_key_cache_params = {
const struct btree_iter *iter = p;
return ck->key.btree_id == iter->btree_id &&
- !bkey_cmp(ck->key.pos, iter->pos) ? 0 : -1;
+ !bpos_cmp(ck->key.pos, iter->pos) ? 0 : -1;
}
__flatten
if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
bkey_cached_check_fn, iter, _THIS_IP_)) {
if (ck->key.btree_id != iter->btree_id ||
- bkey_cmp(ck->key.pos, iter->pos)) {
+ bpos_cmp(ck->key.pos, iter->pos)) {
goto retry;
}
}
if (ck->key.btree_id != iter->btree_id ||
- bkey_cmp(ck->key.pos, iter->pos)) {
+ bpos_cmp(ck->key.pos, iter->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
break;
bp = bkey_s_c_to_btree_ptr_v2(k);
- if (bkey_cmp(next_node, bp.v->min_key)) {
+ if (bpos_cmp(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b);
panic("expected next min_key %s got %s\n",
(bch2_bpos_to_text(&PBUF(buf1), next_node), buf1),
bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) {
- if (bkey_cmp(k.k->p, b->key.k.p)) {
+ if (bpos_cmp(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b);
panic("expected end %s got %s\n",
(bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1),
{
return cmp_int(l->btree_id, r->btree_id) ?:
-cmp_int(l->level, r->level) ?:
- bkey_cmp(l->k->k.p, r->k->k.p);
+ bpos_cmp(l->k->k.p, r->k->k.p);
}
static inline bool same_leaf_as_prev(struct btree_trans *trans,
EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
- EBUG_ON(bkey_cmp(insert->k.p, b->data->min_key) < 0);
- EBUG_ON(bkey_cmp(insert->k.p, b->data->max_key) > 0);
+ EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
+ EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(iter->trans->c, b));
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
BUG_ON(bch2_debug_check_bkeys &&
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type));
- BUG_ON(bkey_cmp(i->k->k.p, i->iter->real_pos));
+ BUG_ON(bpos_cmp(i->k->k.p, i->iter->real_pos));
BUG_ON(i->level != i->iter->level);
BUG_ON(i->btree_id != i->iter->btree_id);
}
if (err)
return err;
- if (!i->size || !bkey_cmp(POS_MAX, i->from))
+ if (!i->size || !bpos_cmp(POS_MAX, i->from))
return i->ret;
bch2_trans_init(&trans, i->c, 0, 0);
* can't easily correctly restart a btree node traversal across
* all nodes, meh
*/
- i->from = bkey_cmp(POS_MAX, b->key.k.p)
+ i->from = bpos_cmp(POS_MAX, b->key.k.p)
? bkey_successor(b->key.k.p)
: b->key.k.p;
/* Generic extent code: */
+enum bch_extent_overlap {
+ BCH_EXTENT_OVERLAP_ALL = 0,
+ BCH_EXTENT_OVERLAP_BACK = 1,
+ BCH_EXTENT_OVERLAP_FRONT = 2,
+ BCH_EXTENT_OVERLAP_MIDDLE = 3,
+};
+
+/* Returns how k overlaps with m */
+static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
+ const struct bkey *m)
+{
+ int cmp1 = bkey_cmp(k->p, m->p) < 0;
+ int cmp2 = bkey_cmp(bkey_start_pos(k),
+ bkey_start_pos(m)) > 0;
+
+ return (cmp1 << 1) + cmp2;
+}
+
int bch2_cut_front_s(struct bpos, struct bkey_s);
int bch2_cut_back_s(struct bpos, struct bkey_s);
{
return (cmp_int(l_btree_id, r->btree_id) ?:
cmp_int(l_level, r->level) ?:
- bkey_cmp(l_pos, r->k->k.p));
+ bpos_cmp(l_pos, r->k->k.p));
}
static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
{
return (cmp_int(l->btree_id, r->btree_id) ?:
cmp_int(l->level, r->level) ?:
- bkey_cmp(l->k->k.p, r->k->k.p));
+ bpos_cmp(l->k->k.p, r->k->k.p));
}
static size_t journal_key_search(struct journal_keys *journal_keys,
if (iter->idx > idx ||
(iter->idx == idx &&
biter->last &&
- bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
+ bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
iter->idx++;
}
bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
if (btree_k.k && journal_k.k) {
- int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
+ int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
if (!cmp)
bch2_journal_iter_advance_btree(iter);
ret = iter->last == journal ? journal_k : btree_k;
if (iter->b &&
- bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
+ bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
iter->journal.idx = iter->journal.keys->nr;
iter->last = none;
return bkey_s_c_null;
return cmp_int(l->btree_id, r->btree_id) ?:
cmp_int(l->level, r->level) ?:
- bkey_cmp(l->k->k.p, r->k->k.p) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p) ?:
cmp_int(l->journal_seq, r->journal_seq) ?:
cmp_int(l->journal_offset, r->journal_offset);
}
while (src + 1 < keys.d + keys.nr &&
src[0].btree_id == src[1].btree_id &&
src[0].level == src[1].level &&
- !bkey_cmp(src[0].k->k.p, src[1].k->k.p))
+ !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
src++;
*dst++ = *src++;
return cmp_int(r->level, l->level) ?:
cmp_int(l->journal_seq, r->journal_seq) ?:
cmp_int(l->btree_id, r->btree_id) ?:
- bkey_cmp(l->k->k.p, r->k->k.p);
+ bpos_cmp(l->k->k.p, r->k->k.p);
}
static int bch2_journal_replay(struct bch_fs *c,