return 1;
if (!MT_LIST_ADDED(&wait->list))
- MT_LIST_ADDQ(&buffer_wq, &wait->list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list);
return 0;
}
* Returns 1 if we added the item, 0 otherwise (because it was already in a
* list).
*/
-#define MT_LIST_ADD(_lh, _el) \
+#define MT_LIST_TRY_ADD(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
* Returns 1 if we added the item, 0 otherwise (because it was already in a
* list).
*/
-#define MT_LIST_ADDQ(_lh, _el) \
+#define MT_LIST_TRY_ADDQ(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
(_ret); \
})
+/*
+ * Add an item at the beginning of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked.
+ */
+#define MT_LIST_ADD(_lh, _el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ while (1) { \
+ struct mt_list *n; \
+ struct mt_list *p; \
+ n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ (el)->next = n; \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ n->prev = (el); \
+ __ha_barrier_store(); \
+ p->next = (el); \
+ __ha_barrier_store(); \
+ _ret = 1; \
+ break; \
+ } \
+ (_ret); \
+ })
+
/*
* Add an item at the end of a list.
* It is assumed the element can't already be in a list, so it isn't checked
*/
-#define MT_LIST_ADDQ_NOCHECK(_lh, _el) \
+#define MT_LIST_ADDQ(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
/*
* Detach a list from its head. A pointer to the first element is returned
* and the list is closed. If the list was empty, NULL is returned. This may
- * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This
+ * exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This
* is incompatible with MT_LIST_DEL run concurrently.
* If there's at least one element, the next of the last element will always
* be NULL.
conn->idle_time = now_ms;
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
- MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
+ MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
} else {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
- MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
+ MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
}
} else {
/* this tasklet runs on a specific thread */
- if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
+ if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
if (sleeping_thread_mask & (1UL << thr)) {
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
/* adds list item <item> to work list <work> and wake up the associated task */
static inline void work_list_add(struct work_list *work, struct mt_list *item)
{
- MT_LIST_ADDQ(&work->head, item);
+ MT_LIST_TRY_ADDQ(&work->head, item);
task_wakeup(work->task, TASK_WOKEN_OTHER);
}
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
- MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
+ MT_LIST_TRY_ADDQ(&idle_conns[i].toremove_conns,
(struct mt_list *)&tokill_conn->list);
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
if (b_alloc_margin(buf, global.tune.reserved_bufs))
return 1;
- MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list);
return 0;
}
{
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
if (l->state == LI_READY) {
- MT_LIST_ADDQ(list, &l->wait_queue);
+ MT_LIST_TRY_ADDQ(list, &l->wait_queue);
fd_stop_recv(l->fd);
l->state = LI_LIMITED;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list);
}
return buf;
}
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
- MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
- MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list);
}
return buf;
}
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
- MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
- MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list);
}
return buf;
}
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
- MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
- MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
leave:
if (toremove_nb != -1 && i >= toremove_nb)
break;
MT_LIST_DEL_SAFE_NOINIT(elt1);
- MT_LIST_ADDQ_NOCHECK(toremove_list, &conn->list);
+ MT_LIST_ADDQ(toremove_list, &conn->list);
i++;
}
return i;
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
- MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
- MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}
if (b_alloc_margin(&s->res.buf, 0))
return 1;
- MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
+ MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list);
return 0;
}
/* Beware: tasks that have never run don't have their ->list empty yet! */
LIST_INIT(&((struct tasklet *)t)->list);
- MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
+ MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list,
(struct mt_list *)&((struct tasklet *)t)->list);
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
_HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
* 100% due to rounding, this is not a problem. Note that while in
* theory the sum cannot be NULL as we cannot get there without tasklets
* to process, in practice it seldom happens when multiple writers
- * conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing
+ * conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing
* a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
* one above to finally fail. This is extremely rare and not a problem.
*/
case 0:
lol = malloc(sizeof(*lol));
MT_LIST_INIT(&lol->list_elt);
- MT_LIST_ADD(&pouet_list, &lol->list_elt);
+ MT_LIST_TRY_ADD(&pouet_list, &lol->list_elt);
break;
case 1:
lol = malloc(sizeof(*lol));
MT_LIST_INIT(&lol->list_elt);
- MT_LIST_ADDQ(&pouet_list, &lol->list_elt);
+ MT_LIST_TRY_ADDQ(&pouet_list, &lol->list_elt);
break;
case 2: