&X_SEARCH(acl_to_xattr_type(type), "", 0),
0);
if (ret) {
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (ret != -ENOENT)
acl = ERR_PTR(ret);
btree_err:
bch2_trans_iter_exit(&trans, &inode_iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (unlikely(ret))
goto err;
GFP_KERNEL);
*discard_pos_done = iter.pos;
- ret = bch2_trans_relock(trans) ? 0 : -EINTR;
+ ret = bch2_trans_relock(trans);
if (ret)
goto out;
}
for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
alloc_cursor < k.k->p.offset;
alloc_cursor++) {
- if (btree_trans_too_many_iters(trans)) {
- ob = ERR_PTR(-EINTR);
+ ret = btree_trans_too_many_iters(trans);
+ if (ret) {
+ ob = ERR_PTR(ret);
break;
}
break;
}
}
- if (ob)
+
+ if (ob || ret)
break;
}
bch2_trans_iter_exit(trans, &iter);
ret = PTR_ERR_OR_ZERO(ob);
if (ret) {
- if (ret == -EINTR || cl)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
break;
continue;
}
target, erasure_code,
nr_replicas, nr_effective,
have_cache, flags, _cl);
- if (ret == -EINTR ||
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
return ret;
nr_replicas, nr_effective, have_cache,
reserve, flags, cl);
if (ret &&
- ret != -EINTR &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
!cl && _cl) {
cl = _cl;
nr_replicas, &nr_effective,
&have_cache, reserve,
ob_flags, NULL);
- if (!ret || ret == -EINTR)
+ if (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto alloc_done;
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
#include "btree_iter.h"
#include "btree_locking.h"
#include "debug.h"
+#include "errcode.h"
#include "error.h"
#include "trace.h"
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_trans_restart_relock_parent_for_fill(trans->fn,
_THIS_IP_, btree_id, &path->pos);
- btree_trans_restart(trans);
- return ERR_PTR(-EINTR);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
}
b = bch2_btree_node_mem_alloc(c, level != 0);
trans->memory_allocation_failure = true;
trace_trans_restart_memory_allocation_failure(trans->fn,
_THIS_IP_, btree_id, &path->pos);
- btree_trans_restart(trans);
- return ERR_PTR(-EINTR);
+
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
}
if (IS_ERR(b))
if (!sync)
return NULL;
- if (trans &&
- (!bch2_trans_relock(trans) ||
- !bch2_btree_path_relock_intent(trans, path))) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(-EINTR);
+ if (trans) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(ret);
+ }
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_,
btree_id, &path->pos);
- btree_trans_restart(trans);
- return ERR_PTR(-EINTR);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
}
return b;
struct btree *b = container_of(lock, struct btree, c.lock);
const struct bkey_i *k = p;
- return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
+ if (b->hash_val != btree_ptr_hash_val(k))
+ return BCH_ERR_lock_fail_node_reused;
+ return 0;
}
static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
+ int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (btree_node_read_locked(path, level + 1))
btree_node_unlock(trans, path, level + 1);
- if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
- lock_node_check_fn, (void *) k, trace_ip)) {
- if (!trans->restarted)
+ ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type,
+ lock_node_check_fn, (void *) k, trace_ip);
+ if (unlikely(ret)) {
+ if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
goto retry;
- return ERR_PTR(-EINTR);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+ BUG();
}
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
trace_ip,
path->btree_id,
&path->pos);
- btree_trans_restart(trans);
- return ERR_PTR(-EINTR);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
}
}
* should_be_locked is not set on this path yet, so we need to
* relock it specifically:
*/
- if (trans &&
- (!bch2_trans_relock(trans) ||
- !bch2_btree_path_relock_intent(trans, path))) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(-EINTR);
+ if (trans) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(ret);
+ }
}
if (!six_relock_type(&b->c.lock, lock_type, seq))
if (need_resched() || race_fault()) {
bch2_trans_unlock(trans);
schedule();
- return bch2_trans_relock(trans) ? 0 : -EINTR;
+ return bch2_trans_relock(trans);
} else {
return 0;
}
}
/* Slowpath: */
-bool __bch2_btree_node_lock(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct bpos pos, unsigned level,
- enum six_lock_type type,
- six_lock_should_sleep_fn should_sleep_fn, void *p,
- unsigned long ip)
+int __bch2_btree_node_lock(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct bpos pos, unsigned level,
+ enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p,
+ unsigned long ip)
{
struct btree_path *linked;
unsigned reason;
path->btree_id,
path->cached,
&pos);
- btree_trans_restart(trans);
- return false;
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
}
/* Btree iterator locking: */
/*
* Only for btree_cache.c - only relocks intent locks
*/
-bool bch2_btree_path_relock_intent(struct btree_trans *trans,
- struct btree_path *path)
+int bch2_btree_path_relock_intent(struct btree_trans *trans,
+ struct btree_path *path)
{
unsigned l;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
- btree_trans_restart(trans);
- return false;
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
}
}
- return true;
+ return 0;
}
noinline __flatten
-static bool __bch2_btree_path_relock(struct btree_trans *trans,
+static int __bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
bool ret = btree_path_get_locks(trans, path, false);
if (!ret) {
trace_trans_restart_relock_path(trans->fn, trace_ip,
path->btree_id, &path->pos);
- btree_trans_restart(trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
}
- return ret;
+
+ return 0;
}
-static inline bool bch2_btree_path_relock(struct btree_trans *trans,
+static inline int bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
return btree_node_locked(path, path->level)
- ? true
+ ? 0
: __bch2_btree_path_relock(trans, path, trace_ip);
}
/* Btree transaction locking: */
-bool bch2_trans_relock(struct btree_trans *trans)
+int bch2_trans_relock(struct btree_trans *trans)
{
struct btree_path *path;
if (unlikely(trans->restarted))
- return false;
+ return -BCH_ERR_transaction_restart_relock;
trans_for_each_path(trans, path)
if (path->should_be_locked &&
- !bch2_btree_path_relock(trans, path, _RET_IP_)) {
+ bch2_btree_path_relock(trans, path, _RET_IP_)) {
trace_trans_restart_relock(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
BUG_ON(!trans->restarted);
- return false;
+ return -BCH_ERR_transaction_restart_relock;
}
- return true;
+ return 0;
}
void bch2_trans_unlock(struct btree_trans *trans)
struct btree *b = container_of(lock, struct btree, c.lock);
struct btree **rootp = p;
- return b == *rootp ? 0 : -1;
+ if (b != *rootp)
+ return BCH_ERR_lock_fail_root_changed;
+ return 0;
}
static inline int btree_path_lock_root(struct btree_trans *trans,
struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
enum six_lock_type lock_type;
unsigned i;
+ int ret;
EBUG_ON(path->nodes_locked);
}
lock_type = __btree_lock_want(path, path->level);
- if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
- path->level, lock_type,
- lock_root_check_fn, rootp,
- trace_ip))) {
- if (trans->restarted)
- return -EINTR;
- continue;
+ ret = btree_node_lock(trans, path, b, SPOS_MAX,
+ path->level, lock_type,
+ lock_root_check_fn, rootp,
+ trace_ip);
+ if (unlikely(ret)) {
+ if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
+ continue;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+ BUG();
}
if (likely(b == READ_ONCE(*rootp) &&
int i, ret = 0;
if (trans->in_traverse_all)
- return -EINTR;
+ return -BCH_ERR_transaction_restart_in_traverse_all;
trans->in_traverse_all = true;
retry_all:
prev = NULL;
- trans->restarted = false;
+ trans->restarted = 0;
trans_for_each_path(trans, path)
path->should_be_locked = false;
*/
if (path->uptodate) {
ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
- if (ret == -EINTR || ret == -ENOMEM)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ ret == -ENOMEM)
goto retry_all;
if (ret)
goto err;
unsigned long trace_ip)
{
unsigned depth_want = path->level;
- int ret = 0;
+ int ret = trans->restarted;
- if (unlikely(trans->restarted)) {
- ret = -EINTR;
+ if (unlikely(ret))
goto out;
- }
/*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart:
*/
if (path->should_be_locked) {
- ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
+ ret = bch2_btree_path_relock(trans, path, trace_ip);
goto out;
}
path->uptodate = BTREE_ITER_UPTODATE;
out:
- BUG_ON((ret == -EINTR) != !!trans->restarted);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
bch2_btree_path_verify(trans, path);
return ret;
}
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
path->btree_id, &path->pos);
- btree_trans_restart(trans);
- ret = -EINTR;
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
goto err;
}
BUG_ON(!iter->path->nodes_locked);
out:
if (iter->update_path) {
- if (unlikely(!bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_))) {
- k = bkey_s_c_err(-EINTR);
+ ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
+ if (unlikely(ret)) {
+ k = bkey_s_c_err(ret);
} else {
BUG_ON(!(iter->update_path->nodes_locked & 1));
iter->update_path->should_be_locked = true;
if (old_bytes) {
trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
- btree_trans_restart(trans);
- return ERR_PTR(-EINTR);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
}
}
* bch2_trans_begin() - reset a transaction after a interrupted attempt
* @trans: transaction to reset
*
- * While iterating over nodes or updating nodes a attempt to lock a btree
- * node may return EINTR when the trylock fails. When this occurs
- * bch2_trans_begin() should be called and the transaction retried.
+ * While iterating over nodes or updating nodes a attempt to lock a btree node
+ * may return BCH_ERR_transaction_restart when the trylock fails. When this
+ * occurs bch2_trans_begin() should be called and the transaction retried.
*/
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree *, struct btree_node_iter *,
struct bkey_packed *, unsigned, unsigned);
-bool bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
+int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
-bool bch2_trans_relock(struct btree_trans *);
+int bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
-static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
+static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
{
- return restart_count != trans->restart_count ? -EINTR : 0;
+ return restart_count != trans->restart_count;
}
void bch2_trans_verify_not_restarted(struct btree_trans *, u32);
__always_inline
-static inline int btree_trans_restart(struct btree_trans *trans)
+static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
- trans->restarted = true;
+ BUG_ON(err <= 0);
+ BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
+
+ trans->restarted = err;
trans->restart_count++;
- bch2_trans_unlock(trans);
- return -EINTR;
+ return -err;
+}
+
+__always_inline
+static inline int btree_trans_restart(struct btree_trans *trans, int err)
+{
+ btree_trans_restart_nounlock(trans, err);
+ return -err;
}
bool bch2_btree_node_upgrade(struct btree_trans *,
struct btree *b;
while (b = bch2_btree_iter_peek_node(iter),
- PTR_ERR_OR_ZERO(b) == -EINTR)
+ bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
bch2_trans_begin(trans);
return b;
{
if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2) {
trace_trans_restart_too_many_iters(trans->fn, _THIS_IP_);
- return btree_trans_restart(trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
}
return 0;
while (btree_trans_too_many_iters(trans) ||
(k = bch2_btree_iter_peek_type(iter, flags),
- bkey_err(k) == -EINTR))
+ bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
bch2_trans_begin(trans);
return k;
do { \
bch2_trans_begin(_trans); \
_ret = (_do); \
- } while (_ret == -EINTR); \
+ } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
\
_ret; \
})
* These are like lockrestart_do() and commit_do(), with two differences:
*
* - We don't call bch2_trans_begin() unless we had a transaction restart
- * - We return -EINTR if we succeeded after a transaction restart
+ * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
+ * transaction restart
*/
#define nested_lockrestart_do(_trans, _do) \
({ \
\
_restart_count = _orig_restart_count = (_trans)->restart_count; \
\
- while ((_ret = (_do)) == -EINTR) \
+ while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
_restart_count = bch2_trans_begin(_trans); \
\
if (!_ret) \
bch2_trans_verify_not_restarted(_trans, _restart_count);\
\
- _ret ?: trans_was_restarted(_trans, _orig_restart_count); \
+ if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \
+ _ret = -BCH_ERR_transaction_restart_nested; \
+ \
+ _ret; \
})
#define for_each_btree_key2(_trans, _iter, _btree_id, \
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
\
- do { \
+ while (1) { \
bch2_trans_begin(_trans); \
(_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
if (!(_k).k) { \
} \
\
_ret = bkey_err(_k) ?: (_do); \
- if (!_ret) \
- bch2_btree_iter_advance(&(_iter)); \
- } while (_ret == 0 || _ret == -EINTR); \
+ if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
+ continue; \
+ if (_ret) \
+ break; \
+ bch2_btree_iter_advance(&(_iter)); \
+ } \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret; \
#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update.h"
+#include "errcode.h"
#include "error.h"
#include "journal.h"
#include "journal_reclaim.h"
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_trans_restart_relock_key_cache_fill(trans->fn,
_THIS_IP_, ck_path->btree_id, &ck_path->pos);
- ret = btree_trans_restart(trans);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
goto err;
}
struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
const struct btree_path *path = p;
- return ck->key.btree_id == path->btree_id &&
- !bpos_cmp(ck->key.pos, path->pos) ? 0 : -1;
+ if (ck->key.btree_id != path->btree_id &&
+ bpos_cmp(ck->key.pos, path->pos))
+ return BCH_ERR_lock_fail_node_reused;
+ return 0;
}
__flatten
} else {
enum six_lock_type lock_want = __btree_lock_want(path, 0);
- if (!btree_node_lock(trans, path, (void *) ck, path->pos, 0,
- lock_want,
- bkey_cached_check_fn, path, _THIS_IP_)) {
- if (!trans->restarted)
+ ret = btree_node_lock(trans, path, (void *) ck, path->pos, 0,
+ lock_want,
+ bkey_cached_check_fn, path, _THIS_IP_);
+ if (ret) {
+ if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
goto retry;
-
- ret = -EINTR;
- goto err;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+ BUG();
}
if (ck->key.btree_id != path->btree_id ||
if (!path->locks_want &&
!__bch2_btree_path_upgrade(trans, path, 1)) {
trace_transaction_restart_ip(trans->fn, _THIS_IP_);
- ret = btree_trans_restart(trans);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
goto err;
}
return ret;
err:
- if (ret != -EINTR) {
+ if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
btree_node_unlock(trans, path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
}
? JOURNAL_WATERMARK_reserved
: 0)|
commit_flags);
- if (ret) {
- bch2_fs_fatal_err_on(ret != -EINTR &&
- ret != -EAGAIN &&
- !bch2_journal_error(j), c,
- "error flushing key cache: %i", ret);
+
+ bch2_fs_fatal_err_on(ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+ !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
+ !bch2_journal_error(j), c,
+ "error flushing key cache: %s", bch2_err_str(ret));
+ if (ret)
goto out;
- }
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
}
}
-static inline bool btree_node_lock_type(struct btree_trans *trans,
+static inline int btree_node_lock_type(struct btree_trans *trans,
struct btree_path *path,
struct btree *b,
struct bpos pos, unsigned level,
{
struct bch_fs *c = trans->c;
u64 start_time;
- bool ret;
+ int ret;
if (six_trylock_type(&b->c.lock, type))
- return true;
+ return 0;
start_time = local_clock();
trans->locking_level = level;
trans->locking_lock_type = type;
trans->locking = b;
- ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
+ ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p);
trans->locking = NULL;
if (ret)
- bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
+ return ret;
- return ret;
+ bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
+ return 0;
}
/*
return false;
}
-bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
- struct btree *, struct bpos, unsigned,
- enum six_lock_type,
- six_lock_should_sleep_fn, void *,
- unsigned long);
+int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
+ struct btree *, struct bpos, unsigned,
+ enum six_lock_type,
+ six_lock_should_sleep_fn, void *,
+ unsigned long);
-static inline bool btree_node_lock(struct btree_trans *trans,
+static inline int btree_node_lock(struct btree_trans *trans,
struct btree_path *path,
struct btree *b, struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
+ int ret = 0;
+
EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->c.lock, type)) ||
- btree_node_lock_increment(trans, b, level, type) ||
- __bch2_btree_node_lock(trans, path, b, pos, level, type,
- should_sleep_fn, p, ip)) {
+ btree_node_lock_increment(trans, b, level, type) ||
+ !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
+ should_sleep_fn, p, ip))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->c.level].lock_taken_time = ktime_get_ns();
#endif
- return true;
- } else {
- return false;
}
+
+ return ret;
}
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
u8 nr_updates;
bool used_mempool:1;
bool in_traverse_all:1;
- bool restarted:1;
bool paths_sorted:1;
bool memory_allocation_failure:1;
bool journal_transaction_names:1;
bool journal_replay_not_finished:1;
+ enum bch_errcode restarted:16;
u32 restart_count;
unsigned long last_restarted_ip;
* This is main entry point for btree updates.
*
* Return values:
- * -EINTR: locking changed, this function should be called again.
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
trace_trans_restart_iter_upgrade(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
- ret = btree_trans_restart(trans);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
return ERR_PTR(ret);
}
else if (!down_read_trylock(&c->gc_lock)) {
bch2_trans_unlock(trans);
down_read(&c->gc_lock);
- if (!bch2_trans_relock(trans)) {
+ ret = bch2_trans_relock(trans);
+ if (ret) {
up_read(&c->gc_lock);
- return ERR_PTR(-EINTR);
+ return ERR_PTR(ret);
}
}
journal_flags);
if (ret) {
trace_trans_restart_journal_preres_get(trans->fn, _RET_IP_);
- btree_trans_restart(trans);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
goto err;
}
goto err;
}
- if (!bch2_trans_relock(trans)) {
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
goto err;
- }
return as;
err:
int ret = 0;
if (!btree_node_intent_locked(path, b->c.level) &&
- !bch2_btree_path_upgrade(trans, path, b->c.level + 1)) {
- btree_trans_restart(trans);
- return -EINTR;
- }
+ !bch2_btree_path_upgrade(trans, path, b->c.level + 1))
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
closure_init_stack(&cl);
if (ret) {
bch2_trans_unlock(trans);
closure_sync(&cl);
- if (!bch2_trans_relock(trans))
- return -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ return ret;
}
new_hash = bch2_btree_node_mem_alloc(c, false);
#include "btree_locking.h"
#include "buckets.h"
#include "debug.h"
+#include "errcode.h"
#include "error.h"
#include "extent_update.h"
#include "journal.h"
if (ret)
return ret;
- if (!bch2_trans_relock(trans)) {
+ ret = bch2_trans_relock(trans);
+ if (ret) {
trace_trans_restart_journal_preres_get(trans->fn, trace_ip);
- return -EINTR;
+ return ret;
}
return 0;
trace_trans_restart_key_cache_key_realloced(trans->fn, _RET_IP_,
path->btree_id, &path->pos,
old_u64s, new_u64s);
- /*
- * Not using btree_trans_restart() because we can't unlock here, we have
- * write locks held:
- */
- trans->restarted = true;
- return -EINTR;
+ return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced);
}
/* Triggers: */
if (race_fault()) {
trace_trans_restart_fault_inject(trans->fn, trace_ip);
- trans->restarted = true;
- return -EINTR;
+ return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
}
/*
static inline int trans_lock_write(struct btree_trans *trans)
{
struct btree_insert_entry *i;
+ int ret;
trans_for_each_update(trans, i) {
if (same_leaf_as_prev(trans, i))
if (have_conflicting_read_lock(trans, i->path))
goto fail;
- btree_node_lock_type(trans, i->path,
+ ret = btree_node_lock_type(trans, i->path,
insert_l(i)->b,
i->path->pos, i->level,
SIX_LOCK_write, NULL, NULL);
+ BUG_ON(ret);
}
bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
}
trace_trans_restart_would_deadlock_write(trans->fn);
- return btree_trans_restart(trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
}
static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
switch (ret) {
case BTREE_INSERT_BTREE_NODE_FULL:
ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
- if (!ret)
- return 0;
-
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
trace_trans_restart_btree_node_split(trans->fn, trace_ip,
i->btree_id, &i->path->pos);
break;
if (ret)
break;
- if (bch2_trans_relock(trans))
- return 0;
-
- trace_trans_restart_mark_replicas(trans->fn, trace_ip);
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ trace_trans_restart_mark_replicas(trans->fn, trace_ip);
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
bch2_trans_unlock(trans);
if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
!(trans->flags & JOURNAL_WATERMARK_reserved)) {
- trans->restarted = true;
- ret = -EAGAIN;
+ ret = -BCH_ERR_journal_reclaim_would_deadlock;
break;
}
if (ret)
break;
- if (bch2_trans_relock(trans))
- return 0;
-
- trace_trans_restart_journal_res_get(trans->fn, trace_ip);
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ trace_trans_restart_journal_res_get(trans->fn, trace_ip);
break;
case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
bch2_trans_unlock(trans);
if (ret < 0)
break;
- if (bch2_trans_relock(trans))
- return 0;
-
- trace_trans_restart_journal_reclaim(trans->fn, trace_ip);
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ trace_trans_restart_journal_reclaim(trans->fn, trace_ip);
break;
default:
BUG_ON(ret >= 0);
break;
}
- BUG_ON((ret == EINTR || ret == -EAGAIN) && !trans->restarted);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
BUG_ON(ret == -ENOSPC &&
!(trans->flags & BTREE_INSERT_NOWAIT) &&
(trans->flags & BTREE_INSERT_NOFAIL));
bch2_trans_unlock(trans);
- ret = bch2_fs_read_write_early(c);
+ ret = bch2_fs_read_write_early(c) ?:
+ bch2_trans_relock(trans);
if (ret)
return ret;
- if (!bch2_trans_relock(trans))
- return -EINTR;
-
percpu_ref_get(&c->writes);
return 0;
}
if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
trace_trans_restart_upgrade(trans->fn, _RET_IP_,
i->btree_id, &i->path->pos);
- ret = btree_trans_restart(trans);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
goto out;
}
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
trace_trans_restart_key_cache_raced(trans->fn, _RET_IP_);
- btree_trans_restart(trans);
- return -EINTR;
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}
iter->key_cache_path->should_be_locked = true;
break;
}
- if (ret == -EINTR) {
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
ret = 0;
goto retry;
}
bch2_ob_add_backpointer(c, ec_ob, &insert->k);
}
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
if (ret)
break;
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
- BUG_ON(ret == -EINTR);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
}
ret = __bch2_dirent_lookup_trans(&trans, &iter, dir, hash_info,
name, inum, 0);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (!ret)
bch2_trans_iter_exit(&trans, &iter);
}
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
struct btree_iter *iter)
{
size_t idx = iter->pos.offset;
- int ret = 0;
if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
- return ret;
+ return 0;
bch2_trans_unlock(trans);
- ret = -EINTR;
- if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
- return ret;
-
- return -ENOMEM;
+ return __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
+ bch2_trans_relock(trans);
}
static ssize_t stripe_idx_to_delete(struct bch_fs *c)
x(0, freelist_empty) \
x(freelist_empty, no_buckets_found) \
x(0, insufficient_devices) \
- x(0, need_snapshot_cleanup)
+ x(0, need_snapshot_cleanup) \
+ x(0, transaction_restart) \
+ x(transaction_restart, transaction_restart_fault_inject) \
+ x(transaction_restart, transaction_restart_relock) \
+ x(transaction_restart, transaction_restart_relock_path) \
+ x(transaction_restart, transaction_restart_relock_path_intent) \
+ x(transaction_restart, transaction_restart_relock_after_fill) \
+ x(transaction_restart, transaction_restart_too_many_iters) \
+ x(transaction_restart, transaction_restart_lock_node_reused) \
+ x(transaction_restart, transaction_restart_fill_relock) \
+ x(transaction_restart, transaction_restart_fill_mem_alloc_fail)\
+ x(transaction_restart, transaction_restart_mem_realloced) \
+ x(transaction_restart, transaction_restart_in_traverse_all) \
+ x(transaction_restart, transaction_restart_would_deadlock) \
+ x(transaction_restart, transaction_restart_would_deadlock_write)\
+ x(transaction_restart, transaction_restart_upgrade) \
+ x(transaction_restart, transaction_restart_key_cache_fill) \
+ x(transaction_restart, transaction_restart_key_cache_raced) \
+ x(transaction_restart, transaction_restart_key_cache_realloced)\
+ x(transaction_restart, transaction_restart_journal_preres_get) \
+ x(transaction_restart, transaction_restart_nested) \
+ x(0, lock_fail_node_reused) \
+ x(0, lock_fail_root_changed) \
+ x(0, journal_reclaim_would_deadlock)
enum bch_errcode {
BCH_ERR_START = 2048,
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
* read_extent -> io_time_reset may cause a transaction restart
* without returning an error, we need to check for that here:
*/
- if (!bch2_trans_relock(trans)) {
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
break;
- }
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
err:
bch2_trans_iter_exit(trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (ret) {
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (err == -EINTR)
+ if (bch2_err_matches(err, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
start = iter.pos;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
bch2_trans_copy_iter(&dst, &src);
bch2_trans_copy_iter(&del, &src);
- while (ret == 0 || ret == -EINTR) {
+ while (ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
bch2_disk_reservation_put(c, &disk_res);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
}
}
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
}
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
bch2_trans_iter_exit(&trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
KEY_TYPE_QUOTA_WARN);
err_before_quota:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
goto err_trans;
}
btree_err:
bch2_trans_iter_exit(&trans, &inode_iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (unlikely(ret))
goto err_trans;
start = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (!ret && have_extent)
memcpy(name, d.v->d_name, name_len);
name[name_len] = '\0';
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_iter_exit(&trans, &iter1);
ret = bch2_inode_unpack(k, inode);
err:
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(trans->c, "error fetching inode %llu: %s",
inode_nr, bch2_err_str(ret));
bch2_trans_iter_exit(trans, &iter);
if (!ret)
*snapshot = iter.pos.snapshot;
err:
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(trans->c, "error fetching inode %llu:%u: %s",
inode_nr, *snapshot, bch2_err_str(ret));
bch2_trans_iter_exit(trans, &iter);
BTREE_INSERT_NOFAIL);
err:
bch2_trans_iter_exit(trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
return ret;
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
bch2_trans_iter_exit(trans, &iter);
err:
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error from __remove_dirent(): %s", bch2_err_str(ret));
return ret;
}
goto create_lostfound;
}
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error looking up lost+found: %s", bch2_err_str(ret));
if (ret)
return ret;
lostfound, &lostfound_str,
0, 0, S_IFDIR|0700, 0, NULL, NULL,
(subvol_inum) { }, 0);
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error creating lost+found: %s", bch2_err_str(ret));
return ret;
}
ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
if (ret) {
- bch_err(c, "hash_redo_key err %i", ret);
+ bch_err(c, "hash_redo_key err %s", bch2_err_str(ret));
return ret;
}
- ret = -EINTR;
+ ret = -BCH_ERR_transaction_restart_nested;
fsck_err:
goto out;
}
ret = write_inode(trans, &i->inode, i->snapshot);
if (ret)
break;
- ret2 = -EINTR;
+ ret2 = -BCH_ERR_transaction_restart_nested;
}
fsck_err:
if (ret)
* it shouldn't be but we need to fix the new i_sectors check
* code and delete the old bch2_count_inode_sectors() first
*/
- return -EINTR;
+ return -BCH_ERR_transaction_restart_nested;
}
#if 0
if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
bch2_bkey_val_to_text(&PBUF(buf2), c, k);
if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2)) {
- ret = fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
+ ret = fix_overlapping_extent(trans, k, prev.k->k.p)
+ ?: -BCH_ERR_transaction_restart_nested;
goto out;
}
}
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from check_extent()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "error from check_extent(): %s", bch2_err_str(ret));
return ret;
}
ret = write_inode(trans, &i->inode, i->snapshot);
if (ret)
break;
- ret2 = -EINTR;
+ ret2 = -BCH_ERR_transaction_restart_nested;
}
}
fsck_err:
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error from check_target(): %s", bch2_err_str(ret));
return ret;
}
if (!iter->path->should_be_locked) {
/* hack: see check_extent() */
- return -EINTR;
+ return -BCH_ERR_transaction_restart_nested;
}
ret = __walk_inode(trans, dir, equiv);
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error from check_dirent(): %s", bch2_err_str(ret));
return ret;
}
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
fsck_err:
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error from check_xattr(): %s", bch2_err_str(ret));
return ret;
}
}
bch2_trans_iter_exit(&trans, &iter);
- BUG_ON(ret == -EINTR);
-
darray_exit(&path);
bch2_trans_exit(&trans);
bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
err:
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
BTREE_INSERT_NOFAIL);
err:
bch2_trans_iter_exit(&trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
}
/*
- * Returns -EINTR if we had to drop locks:
+ * Returns -BCH_ERR_transacton_restart if we had to drop locks:
*/
int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
subvol_inum inum, u64 end,
int ret = 0, ret2 = 0;
u32 snapshot;
- while (!ret || ret == -EINTR) {
+ while (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
- return ret == -EINTR ? 0 : ret;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+
+ return ret;
}
static int bch2_write_index_default(struct bch_write_op *op)
ret = bch2_subvolume_get_snapshot(&trans, inum.subvol,
&sk.k->k.p.snapshot);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
op->flags & BCH_WRITE_CHECK_ENOSPC);
bch2_trans_iter_exit(&trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
? bch2_write_index_default(op)
: bch2_data_update_index_update(op);
- BUG_ON(ret == -EINTR);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
BUG_ON(keylist_sectors(keys) && !ret);
op->written += sectors_start - keylist_sectors(keys);
* read_extent -> io_time_reset may cause a transaction restart
* without returning an error, we need to check for that here:
*/
- if (!bch2_trans_relock(&trans)) {
- ret = -EINTR;
+ ret = bch2_trans_relock(&trans);
+ if (ret)
break;
- }
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, bvec_iter.bi_sector));
err:
bch2_trans_iter_exit(&trans, &iter);
- if (ret == -EINTR || ret == READ_RETRY || ret == READ_RETRY_AVOID)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ ret == READ_RETRY ||
+ ret == READ_RETRY_AVOID)
goto retry;
bch2_trans_exit(&trans);
!test_bit(BCH_FS_STOPPING, &c->flags))
b = bch2_btree_iter_next_node(&iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_iter_exit(&trans, &iter);
}
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false);
- if (ret == -EINTR) {
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
ret = 0;
continue;
}
next:
bch2_btree_iter_next_node(&iter);
}
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&k, c);
- BUG_ON(ret == -EINTR);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
}
break;
ret = bkey_err(k);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
ret = lookup_inode(&trans,
SPOS(0, k.k->p.inode, k.k->p.snapshot),
&inode);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (!ret)
ret2 = bch2_move_extent(&trans, ctxt, io_opts,
btree_id, k, data_opts);
if (ret2) {
- if (ret2 == -EINTR)
+ if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
if (ret2 == -ENOMEM) {
goto next;
ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret;
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
next:
bch2_btree_iter_next_node(&iter);
}
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_iter_init(&trans, &dst_iter, BTREE_ID_extents, dst_start,
BTREE_ITER_INTENT);
- while ((ret == 0 || ret == -EINTR) &&
+ while ((ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
bkey_cmp(dst_iter.pos, dst_end) < 0) {
struct disk_reservation disk_res = { 0 };
}
bch2_trans_iter_exit(&trans, &inode_iter);
- } while (ret2 == -EINTR);
+ } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&new_src, c);
if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
ret = bch2_subvolume_delete(trans, iter->pos.offset);
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(trans->c, "error deleting subvolume %llu: %s",
iter->pos.offset, bch2_err_str(ret));
if (ret)
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
- if (ret && ret != -EINTR)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
if (ret)
return ret;
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);