/* We force new pushed to 1 to force identifier in update message */
new_pushed = 1;
- HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
while (1) {
struct stksess *ts;
}
HA_ATOMIC_INC(&ts->ref_cnt);
- HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
ret = peer_send_updatemsg(st, appctx, ts, updateid, new_pushed, use_timed);
- HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
HA_ATOMIC_DEC(&ts->ref_cnt);
if (ret <= 0)
break;
}
out:
- HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
return ret;
}
if (!(peer->flags & PEER_F_TEACH_PROCESS)) {
int must_send;
- HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
must_send = (peer->learnstate == PEER_LR_ST_NOTASSIGNED) && (st->last_pushed != st->table->localupdate);
- HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
if (must_send) {
repl = peer_send_teach_process_msgs(appctx, peer, st);
uint updateid, commitid;
st->last_get = st->last_acked = 0;
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
/* if st->update appears to be in future it means
* that the last acked value is very old and we
* remain unconnected a too long time to use this
__ha_cpu_relax();
}
- HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_UPDT_LOCK, &st->table->updt_lock);
}
/* Awake main task to ack the new peer state */
if (ts->upd.node.leaf_p) {
updt_locked = 1;
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
if (HA_ATOMIC_LOAD(&ts->ref_cnt))
goto out_unlock;
}
out_unlock:
if (updt_locked)
- HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
return 1;
}
if (ts->upd.node.leaf_p) {
if (!updt_locked) {
updt_locked = 1;
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
}
/* now we're locked, new peers can't grab it anymore,
* existing ones already have the ref_cnt.
}
if (updt_locked)
- HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
*/
if (!ts->upd.node.leaf_p || _HA_ATOMIC_LOAD(&ts->seen)) {
/* Time to upgrade the read lock to write lock */
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
use_wrlock = 1;
/* here we're write-locked */
*/
if (!ts->upd.node.leaf_p) {
/* Time to upgrade the read lock to write lock if needed */
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
use_wrlock = 1;
/* here we're write-locked */
/* drop the lock now */
if (use_wrlock)
- HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
}
if (decrefcnt)
if (ts->upd.node.leaf_p) {
if (!updt_locked) {
updt_locked = 1;
- HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
}
/* now we're locked, new peers can't grab it anymore,
* existing ones already have the ref_cnt.
out_unlock:
if (updt_locked)
- HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_UPDT_LOCK, &t->updt_lock);
task_exp = tick_first(task_exp, exp_next);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);