lua_settable(L, -3);
hlua_stktable_entry(L, t, ts);
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts->ref_cnt--;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return 1;
}
lua_newtable(L);
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
eb = ebmb_first(&t->keys);
for (n = eb; n; n = ebmb_next(n)) {
ts = ebmb_entry(n, struct stksess, key);
if (!ts) {
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return 1;
}
ts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
/* multi condition/value filter */
skip_entry = 0;
}
if (skip_entry) {
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts->ref_cnt--;
continue;
}
lua_newtable(L);
hlua_stktable_entry(L, t, ts);
lua_settable(L, -3);
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts->ref_cnt--;
}
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return 1;
}
new_pushed = 1;
if (!locked)
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
while (1) {
struct stksess *ts;
updateid = ts->upd.key;
ts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
ret = peer_send_updatemsg(st, appctx, ts, updateid, new_pushed, use_timed);
if (ret <= 0) {
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--;
break;
}
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--;
st->last_pushed = updateid;
out:
if (!locked)
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
return ret;
}
}
if (!(peer->flags & PEER_F_TEACH_PROCESS)) {
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
if (!(peer->flags & PEER_F_LEARN_ASSIGN) &&
(st->last_pushed != st->table->localupdate)) {
repl = peer_send_teach_process_msgs(appctx, peer, st);
if (repl <= 0) {
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
return repl;
}
}
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
}
else if (!(peer->flags & PEER_F_TEACH_FINISHED)) {
if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) {
/* Init cursors */
for (st = peer->tables; st ; st = st->next) {
st->last_get = st->last_acked = 0;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
/* if st->update appears to be in future it means
* that the last acked value is very old and we
* remain unconnected a too long time to use this
st->flags = 0;
if ((int)(st->last_pushed - st->table->commitupdate) > 0)
st->table->commitupdate = st->last_pushed;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
}
/* reset teaching and learning flags to 0 */
/* Init cursors */
for (st = peer->tables; st ; st = st->next) {
st->last_get = st->last_acked = 0;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
/* if st->update appears to be in future it means
* that the last acked value is very old and we
* remain unconnected a too long time to use this
st->flags = 0;
if ((int)(st->last_pushed - st->table->commitupdate) > 0)
st->table->commitupdate = st->last_pushed;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
}
/* Init confirm counter */
dict_entry_unref(&server_key_dict, stktable_data_cast(data, std_t_dict));
stktable_data_cast(data, std_t_dict) = NULL;
}
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
__stksess_free(t, ts);
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
}
/*
{
int ret;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
if (decrefcnt)
ts->ref_cnt--;
ret = __stksess_kill(t, ts);
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return ret;
}
{
int ret;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ret = __stktable_trash_oldest(t, to_batch);
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return ret;
}
{
struct stksess *ts;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts = __stksess_new(t, key);
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return ts;
}
{
struct stksess *ts;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts = __stktable_lookup_key(t, key);
if (ts)
ts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return ts;
}
{
struct stksess *lts;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
lts = __stktable_lookup(t, ts);
if (lts)
lts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return lts;
}
*/
void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt)
{
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
__stktable_touch_with_exp(t, ts, 0, ts->expire);
if (decrefcnt)
ts->ref_cnt--;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
}
/* Update the expiration timer for <ts> but do not touch its expiration node.
{
int expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
__stktable_touch_with_exp(t, ts, 1, expire);
if (decrefcnt)
ts->ref_cnt--;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
}
/* Just decrease the ref_cnt of the current session. Does nothing if <ts> is NULL */
static void stktable_release(struct stktable *t, struct stksess *ts)
{
if (!ts)
return;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
ts->ref_cnt--;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
}
/* Insert new sticky session <ts> in the table. It is assumed that it does not
{
struct stksess *ts;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock);
ts = __stktable_get_entry(table, key);
if (ts)
ts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock);
return ts;
}
{
struct stksess *ts;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock);
ts = __stktable_set_entry(table, nts);
ts->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock);
return ts;
}
struct eb32_node *eb;
int looped = 0;
- HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
while (1) {
/* We have found no task to expire in any tree */
t->exp_next = TICK_ETERNITY;
out_unlock:
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
return t->exp_next;
}
if (ctx->target &&
(strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
/* dump entries only if table explicitly requested */
- HA_SPIN_LOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->lock);
eb = ebmb_first(&ctx->t->keys);
if (eb) {
ctx->entry = ebmb_entry(eb, struct stksess, key);
ctx->entry->ref_cnt++;
ctx->state = STATE_DUMP;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
break;
}
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
}
}
ctx->t = ctx->t->next;
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ctx->entry->lock);
- HA_SPIN_LOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->lock);
ctx->entry->ref_cnt--;
eb = ebmb_next(&ctx->entry->key);
else if (!skip_entry && !ctx->entry->ref_cnt)
__stksess_kill(ctx->t, old);
ctx->entry->ref_cnt++;
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
break;
}
else if (!skip_entry && !ctx->entry->ref_cnt)
__stksess_kill(ctx->t, ctx->entry);
- HA_SPIN_UNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
ctx->t = ctx->t->next;
ctx->state = STATE_NEXT;