return 1;
if (!MT_LIST_ADDED(&wait->list))
- MT_LIST_ADDQ(&buffer_wq, &wait->list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &wait->list);
return 0;
}
#include <haproxy/pool.h>
extern struct pool_head *pool_head_buffer;
-extern struct mt_list buffer_wq;
int init_buffer();
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
* passing a buffer to oneself in case of failed allocations (e.g. need two
* buffers, get one, fail, release it and wake up self again). In case of
* normal buffer release where it is expected that the caller is not waiting
- * for a buffer, NULL is fine.
+ * for a buffer, NULL is fine. It will wake waiters on the current thread only.
*/
void __offer_buffer(void *from, unsigned int threshold);
static inline void offer_buffers(void *from, unsigned int threshold)
{
- if (!MT_LIST_ISEMPTY(&buffer_wq))
+ if (!MT_LIST_ISEMPTY(&ti->buffer_wq))
__offer_buffer(from, threshold);
}
#ifdef CONFIG_HAP_LOCAL_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
+ struct mt_list buffer_wq; /* buffer waiters */
+
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &check->buf_wait.list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
}
return buf;
}
struct pool_head *pool_head_buffer;
-/* list of objects waiting for at least one buffer */
-struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq);
-
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_buffer()
{
void *buffer;
+ int thr;
pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
if (!pool_head_buffer)
return 0;
+ for (thr = 0; thr < MAX_THREADS; thr++)
+ MT_LIST_INIT(&ha_thread_info[thr].buffer_wq);
+
+
/* The reserved buffer is what we leave behind us. Thus we always need
* at least one extra buffer in minavail otherwise we'll end up waking
* up tasks with no memory available, causing a lot of useless wakeups.
*/
avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
- mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) {
+ mt_list_for_each_entry_safe(wait, &ti->buffer_wq, list, elt1, elt2) {
if (avail <= threshold)
break;
if (b_alloc_margin(buf, global.tune.reserved_bufs))
return 1;
- MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
return 0;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
}
return buf;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
}
return buf;
}
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
- MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
}
return buf;
}
if (b_alloc_margin(&s->res.buf, 0))
return 1;
- MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
+ MT_LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);
return 0;
}