- tune.buffers.limit
- tune.buffers.reserve
- tune.bufsize
+ - tune.bufsize.large
- tune.bufsize.small
- tune.comp.maxlevel
- tune.defaults.purge
value set using this parameter will automatically be rounded up to the next
multiple of 8 on 32-bit machines and 16 on 64-bit machines.
+tune.bufsize.large <size>
+ Sets the size in butes for large buffers. By defaults, support for large
+ buffers is not enabled, it must explicitly be enable by setting this value.
+
+ These buffers are designed to be used in some specific contexts where more
+ data must be bufferized without changing the size of regular buffers. The
+ large buffers are not implicitly used.
+
tune.bufsize.small <size>
Sets the size in bytes for small buffers. The defaults value is 1024.
#include <haproxy/pool.h>
extern struct pool_head *pool_head_buffer;
+extern struct pool_head *pool_head_large_buffer;
int init_buffer(void);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
#define __b_free(_buf) \
do { \
char *area = (_buf)->area; \
+ size_t sz = (_buf)->size; \
\
/* let's first clear the area to save an occasional "show sess all" \
* glancing over our shoulder from getting a dangling pointer. \
*/ \
*(_buf) = BUF_NULL; \
__ha_barrier_store(); \
- if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
+ /* if enabled, large buffers are always strictly greater \
+ * than the default buffers */ \
+ if (unlikely(pool_head_large_buffer && sz == pool_head_large_buffer->size)) \
+ pool_free(pool_head_large_buffer, area); \
+ else if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
else \
pool_free(pool_head_buffer, area); \
uint recv_enough; /* how many input bytes at once are "enough" */
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
uint bufsize_small;/* small buffer size in bytes */
+ uint bufsize_large;/* large buffer size in bytes */
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
int reserved_bufs; /* how many buffers can only be allocated for response */
int buf_limit; /* if not null, how many total buffers may only be allocated */
global.nbthread = global.thread_limit;
}
+ if (global.tune.bufsize_large > 0) {
+ if (global.tune.bufsize_large == global.tune.bufsize)
+ global.tune.bufsize_large = 0;
+ else if (global.tune.bufsize_large < global.tune.bufsize) {
+ ha_warning("tune.bufsize.large (%u) is lower than tune.bufsize (%u). large buffers support is disabled. "
+ "Please fix either value to remove this warning.\n",
+ global.tune.bufsize_large, global.tune.bufsize);
+ global.tune.bufsize_large = 0;
+ }
+ }
+
/* in the worst case these were supposed to be set in thread_detect_count() */
BUG_ON(!global.nbthread);
BUG_ON(!global.nbtgroups);
#include <haproxy/tools.h>
struct pool_head *pool_head_buffer __read_mostly;
+struct pool_head *pool_head_large_buffer __read_mostly = NULL;
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
int init_buffer()
if (!pool_head_buffer)
return 0;
+ if (global.tune.bufsize_large) {
+ pool_head_large_buffer = create_aligned_pool("large_buffer", global.tune.bufsize_large, 64, MEM_F_SHARED|MEM_F_EXACT);
+ if (!pool_head_large_buffer)
+ return 0;
+ }
+
/* make sure any change to the queues assignment isn't overlooked */
BUG_ON(DB_PERMANENT - DB_UNLIKELY - 1 != DYNBUF_NBQ);
BUG_ON(DB_MUX_RX_Q < DB_SE_RX_Q || DB_MUX_RX_Q >= DYNBUF_NBQ);
return 0;
}
+/* config parse for global "tune.bufsize.large" */
+static int cfg_parse_tune_bufsize_large(char **args, int section_type,
+ struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line, char **err)
+{
+ const char *res;
+ uint size;
+
+ if (too_many_args(1, args, err, NULL))
+ goto err;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.\n", args[0]);
+ goto err;
+ }
+
+ res = parse_size_err(args[1], &size);
+ if (res != NULL) {
+ memprintf(err, "unexpected '%s' after size passed to '%s'", res, args[0]);
+ goto err;
+ }
+
+ if (size <= 0) {
+ memprintf(err, "'%s' expects a positive integer argument.\n", args[0]);
+ goto err;
+ }
+
+ global.tune.bufsize_large = size;
+ return 0;
+
+ err:
+ return -1;
+}
+
/* config parse for global "tune.bufsize.small" */
static int cfg_parse_tune_bufsize_small(char **args, int section_type,
struct proxy *curpx, const struct proxy *defpx,
static struct cfg_kw_list cfg_kws = {ILH, {
{ CFG_GLOBAL, "tune.buffers.limit", cfg_parse_tune_buffers_limit },
{ CFG_GLOBAL, "tune.buffers.reserve", cfg_parse_tune_buffers_reserve },
+ { CFG_GLOBAL, "tune.bufsize.large", cfg_parse_tune_bufsize_large },
{ CFG_GLOBAL, "tune.bufsize.small", cfg_parse_tune_bufsize_small },
{ 0, NULL, NULL }
}};
.options = GTUNE_LISTENER_MQ_OPT,
.bufsize = (BUFSIZE + 2*sizeof(void *) - 1) & -(2*sizeof(void *)),
.bufsize_small = BUFSIZE_SMALL,
+ .bufsize_large = 0,
.maxrewrite = MAXREWRITE,
.reserved_bufs = RESERVED_BUFS,
.pattern_cache = DEFAULT_PAT_LRU_SIZE,
/* We may want to free the maximum amount of pools if the proxy is stopping */
if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
pool_flush(pool_head_buffer);
+ pool_flush(pool_head_large_buffer);
pool_flush(pool_head_http_txn);
pool_flush(pool_head_requri);
pool_flush(pool_head_capture);