This is the introduction of "minsize-req" and "minsize-res".
These two options allow you to set the minimum payload size required for
compression to be applied.
This helps save CPU on both server and client sides when the payload does
not need to be compressed.
* The response contains a "Content-Encoding" header, indicating that the
response is already compressed (see compression offload)
* The response contains an invalid "ETag" header or multiple ETag headers
+ * The payload size is smaller than the minimum size
+ (see compression minsize-res)
Note: The compression does not emit the Warning header.
compression algo gzip
compression type text/html text/plain
- See also : "compression offload", "compression direction"
+ See also : "compression offload", "compression direction",
+ "compression minsize-req" and "compression minsize-res"
+
+compression minsize-req <size>
+compression minsize-res <size>
+ Sets the minimum payload size in bytes for compression to be applied.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Payloads smaller than this size will not be compressed, avoiding unnecessary
+ CPU overhead for data that would not significantly benefit from compression.
+ "minsize-req" applies on requests and "minsize-res" on responses.
+ The default value is 0.
compression offload
Makes HAProxy work as a compression offloader only.
struct comp_algo *algo_req; /* Algo to use for request */
struct comp_type *types_req; /* Types to be compressed for requests */
struct comp_type *types_res; /* Types to be compressed for responses */
+ unsigned int minsize_res; /* Min response body size to be compressed */
+ unsigned int minsize_req; /* Min request body size to be compressed */
unsigned int flags;
};
--- /dev/null
+varnishtest "Compression ignores small payloads"
+
+#REQUIRE_OPTION=ZLIB|SLZ
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == "/response-lower"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"123\"" \
+ -bodylen 50
+
+ rxreq
+ expect req.url == "/response-equal"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"123\"" \
+ -bodylen 1024
+
+ rxreq
+ expect req.url == "/response-greater"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"123\"" \
+ -bodylen 2000
+
+ rxreq
+ expect req.url == "/request-lower"
+ expect req.http.content-encoding == "<undef>"
+ expect req.method == "POST"
+ expect resp.bodylen == 50
+ txresp
+
+ rxreq
+ expect req.url == "/request-equal"
+ expect req.http.content-encoding == "gzip"
+ expect req.method == "POST"
+ gunzip
+ expect resp.bodylen == 800
+ txresp
+
+ rxreq
+ expect req.url == "/request-greater"
+ expect req.http.content-encoding == "gzip"
+ expect req.method == "POST"
+ gunzip
+ expect resp.bodylen == 2000
+ txresp
+} -start
+
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe-gzip
+ bind "fd@${fe_gzip}"
+ default_backend be-gzip
+
+ backend be-gzip
+ compression direction both
+
+ compression algo-res gzip
+ compression type-res text/plain
+ compression minsize-res 1k
+
+ compression algo-req gzip
+ compression type-req text/plain
+ compression minsize-req 800
+
+ server www ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_gzip_sock} {
+ txreq -url "/response-lower" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "\"123\""
+ expect resp.bodylen == 50
+
+ txreq -url "/response-equal" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"123\""
+ gunzip
+ expect resp.bodylen == 1024
+
+ txreq -url "/response-greater" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"123\""
+ gunzip
+ expect resp.bodylen == 2000
+
+ txreq -method POST \
+ -url "/request-lower" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 50
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST \
+ -url "/request-equal" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 800
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST \
+ -url "/request-greater" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 2000
+ rxresp
+ expect resp.status == 200
+} -run
struct http_txn *txn = s->txn;
struct http_hdr_ctx ctx;
struct comp_type *comp_type;
+ unsigned int comp_minsize = 0;
+ int32_t pos;
+ unsigned long long len = 0;
ctx.blk = NULL;
/* Already compressed, don't bother */
return;
comp_type = NULL;
+ /* compress only if body size is >= than the min size */
+ if ((s->be->comp && (comp_minsize = s->be->comp->minsize_req)) ||
+ (strm_fe(s)->comp && (comp_minsize = strm_fe(s)->comp->minsize_req))) {
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+ len += htx->extra;
+ /* small requests should not be compressed */
+ if (len < comp_minsize)
+ goto fail;
+ }
+
/*
* We don't want to compress content-types not listed in the "compression type" directive if any. If no content-type was found but configuration
* requires one, we don't compress either. Backend has the priority.
struct http_txn *txn = s->txn;
struct http_hdr_ctx ctx;
struct comp_type *comp_type;
+ unsigned int comp_minsize = 0;
+ int32_t pos;
+ unsigned long long len = 0;
/* no common compression algorithm was found in request header */
if (st->comp_algo[COMP_DIR_RES] == NULL)
if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & HTTP_MSGF_BODYLESS)
goto fail;
+ /* compress only if body size is >= than the min size */
+ if ((s->be->comp && (comp_minsize = s->be->comp->minsize_res)) ||
+ (strm_fe(s)->comp && (comp_minsize = strm_fe(s)->comp->minsize_res))) {
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+ len += htx->extra;
+ /* small responses should not be compressed */
+ if (len < comp_minsize)
+ goto fail;
+ }
+
/* content is already compressed */
ctx.blk = NULL;
if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
{
struct comp *comp;
int ret = 0;
+ const char *res;
if (proxy->comp == NULL) {
comp = calloc(1, sizeof(*comp));
continue;
}
}
+ else if (strcmp(args[1], "minsize-req") == 0) {
+ if (*(args[2]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[1]);
+ ret = -1;
+ goto end;
+ }
+ res = parse_size_err(args[2], &comp->minsize_req);
+ if (res != NULL) {
+ memprintf(err, "unexpected '%s' after size passed to '%s'", res, args[1]);
+ ret = -1;
+ goto end;
+ }
+ }
+ else if (strcmp(args[1], "minsize-res") == 0) {
+ if (*(args[2]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[1]);
+ ret = -1;
+ goto end;
+ }
+ res = parse_size_err(args[2], &comp->minsize_res);
+ if (res != NULL) {
+ memprintf(err, "unexpected '%s' after size passed to '%s'", res, args[1]);
+ ret = -1;
+ goto end;
+ }
+ }
else if (strcmp(args[1], "direction") == 0) {
if (!args[2]) {
memprintf(err, "'%s' expects 'request', 'response', or 'both'.", args[0]);
}
}
else {
- memprintf(err, "'%s' expects 'algo', 'type' 'direction' or 'offload'",
+ memprintf(err, "'%s' expects 'algo', 'type', 'direction', 'offload', 'minsize-req' or 'minsize-res'.",
args[0]);
ret = -1;
goto end;
curproxy->comp->algo_req = defproxy->comp->algo_req;
curproxy->comp->types_res = defproxy->comp->types_res;
curproxy->comp->types_req = defproxy->comp->types_req;
+ curproxy->comp->minsize_res = defproxy->comp->minsize_res;
+ curproxy->comp->minsize_req = defproxy->comp->minsize_req;
curproxy->comp->flags = defproxy->comp->flags;
}