return 1;
if (!LIST_INLIST(&wait->list))
- LIST_APPEND(&ti->buffer_wq, &wait->list);
+ LIST_APPEND(&th_ctx->buffer_wq, &wait->list);
return 0;
}
static inline void offer_buffers(void *from, unsigned int count)
{
- if (!LIST_ISEMPTY(&ti->buffer_wq))
+ if (!LIST_ISEMPTY(&th_ctx->buffer_wq))
__offer_buffers(from, count);
}
* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
-#ifdef CONFIG_HAP_POOLS
- struct list pool_lru_head; /* oldest objects */
-#endif
- struct list buffer_wq; /* buffer waiters */
- struct list streams; /* list of streams attached to this thread */
-
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
// 7 bytes hole here
+#ifdef CONFIG_HAP_POOLS
+ struct list pool_lru_head; /* oldest objects */
+#endif
+ struct list buffer_wq; /* buffer waiters */
+ struct list streams; /* list of streams attached to this thread */
+
ALWAYS_ALIGN(2*sizeof(void*));
struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */
unlikely((buf = b_alloc(bptr)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
- LIST_APPEND(&ti->buffer_wq, &check->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
}
return buf;
}
return 0;
for (thr = 0; thr < MAX_THREADS; thr++)
- LIST_INIT(&ha_thread_info[thr].buffer_wq);
+ LIST_INIT(&ha_thread_ctx[thr].buffer_wq);
/* The reserved buffer is what we leave behind us. Thus we always need
* other tasks, but that's a rough estimate. Similarly, for each cached
* event we'll need 1 buffer.
*/
- list_for_each_entry_safe(wait, wait_back, &ti->buffer_wq, list) {
+ list_for_each_entry_safe(wait, wait_back, &th_ctx->buffer_wq, list) {
if (!count)
break;
if (b_alloc(buf))
return 1;
- LIST_APPEND(&ti->buffer_wq, &buffer_wait->list);
+ LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
return 0;
}
unlikely((buf = b_alloc(&h3_uqs->qcs->tx.buf)) == NULL)) {
h3->buf_wait.target = h3_uqs;
h3->buf_wait.wakeup_cb = qcs_buf_available;
- LIST_APPEND(&ti->buffer_wq, &h3->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &h3->buf_wait.list);
}
return buf;
unlikely((buf = b_alloc(bptr)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
- LIST_APPEND(&ti->buffer_wq, &fconn->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
}
return buf;
}
unlikely((buf = b_alloc(bptr)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
- LIST_APPEND(&ti->buffer_wq, &h1c->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
}
return buf;
}
unlikely((buf = b_alloc(bptr)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
- LIST_APPEND(&ti->buffer_wq, &h2c->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
}
return buf;
}
unlikely((buf = b_alloc(bptr)) == NULL)) {
qcc->buf_wait.target = qcc;
qcc->buf_wait.wakeup_cb = qc_buf_available;
- LIST_APPEND(&ti->buffer_wq, &qcc->buf_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &qcc->buf_wait.list);
}
return buf;
struct pool_head *pool;
do {
- item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
+ item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
/* note: by definition we remove oldest objects so they also are the
* oldest in their own pools, thus their next is the pool's head.
*/
struct pool_cache_head *ph = &pool->cache[tid];
LIST_INSERT(&ph->list, &item->by_pool);
- LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
+ LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
ph->count++;
pool_cache_count++;
pool_cache_bytes += pool->size;
int thr;
for (thr = 0; thr < MAX_THREADS; thr++) {
- LIST_INIT(&ha_thread_info[thr].pool_lru_head);
+ LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
}
#endif
detect_allocator();
thread_isolate();
for (thr = 0; thr < global.nbthread; thr++) {
- list_for_each_entry(s, &ha_thread_info[thr].streams, list) {
+ list_for_each_entry(s, &ha_thread_ctx[thr].streams, list) {
stream_shutdown(s, SF_ERR_KILLED);
}
}
s->tunnel_timeout = TICK_ETERNITY;
- LIST_APPEND(&ti->streams, &s->list);
+ LIST_APPEND(&th_ctx->streams, &s->list);
if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
goto out_fail_accept;
* only touch their node under thread isolation.
*/
LIST_DEL_INIT(&bref->users);
- if (s->list.n != &ti->streams)
+ if (s->list.n != &th_ctx->streams)
LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
bref->ref = s->list.n;
__ha_barrier_store();
if (b_alloc(&s->res.buf))
return 1;
- LIST_APPEND(&ti->buffer_wq, &s->buffer_wait.list);
+ LIST_APPEND(&th_ctx->buffer_wq, &s->buffer_wait.list);
return 0;
}
int thr;
for (thr = 0; thr < MAX_THREADS; thr++)
- LIST_INIT(&ha_thread_info[thr].streams);
+ LIST_INIT(&ha_thread_ctx[thr].streams);
}
INITCALL0(STG_INIT, init_stream);
* pointer points back to the head of the streams list.
*/
LIST_INIT(&appctx->ctx.sess.bref.users);
- appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+ appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
appctx->st2 = STAT_ST_LIST;
/* fall through */
struct stream *curr_strm;
int done= 0;
- if (appctx->ctx.sess.bref.ref == &ha_thread_info[appctx->ctx.sess.thr].streams)
+ if (appctx->ctx.sess.bref.ref == &ha_thread_ctx[appctx->ctx.sess.thr].streams)
done = 1;
else {
/* check if we've found a stream created after issuing the "show sess" */
appctx->ctx.sess.thr++;
if (appctx->ctx.sess.thr >= global.nbthread)
break;
- appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+ appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
continue;
}
/* first, look for the requested stream in the stream table */
for (thr = 0; !strm && thr < global.nbthread; thr++) {
- list_for_each_entry(strm, &ha_thread_info[thr].streams, list) {
+ list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
if (strm == ptr) {
stream_shutdown(strm, SF_ERR_KILLED);
break;