From: rousskov <> Date: Sun, 8 Mar 1998 06:42:54 +0000 (+0000) Subject: - Added memory pools for variable size objects (strings). X-Git-Tag: SQUID_3_0_PRE1~3882 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=02922e76c2200b020601f51d8ae6f0f2819f1090;p=thirdparty%2Fsquid.git - Added memory pools for variable size objects (strings). There are three pools; for small, medium, and large objects. - Extended String object to use memory pools. Most fixed size char array fields will be replaced using string pools. Same for most malloc()-ed buffers. - Replaced "char *urlpath" in request_t with "String urlpath". - Fixed some printf-like function bugs. --- diff --git a/ChangeLog b/ChangeLog index f02b9d6772..5d74273697 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + - Added memory pools for variable size objects (strings). + There are three pools; for small, medium, and large objects. + - Extended String object to use memory pools. Most fixed size char + array fields will be replaced using string pools. Same for most + malloc()-ed buffers. - Fixed ICP bug when we send queries, but expect zero replies. - Fixed alignment/casting bugs for ICP messages. @@ -10,7 +15,7 @@ Changes to squid-1.2.beta16 (Mar 4, 1998): - - Added Portugese erorr messages from Pedro Lineu Orso + - Added Portuguese error messages from Pedro Lineu Orso - Added a simple but very effective hack to cachemgr.cgi that tries to interpret lines with '\t' as table records and formats them accordingly. With a few exceptions (see source code), first line diff --git a/TODO b/TODO index 6410ce0267..8c1ee3c194 100644 --- a/TODO +++ b/TODO @@ -59,6 +59,7 @@ Our Todo List (4) ** FTP PUT (KA) (4) ** Everywhere that we use 'pattern' or such, use ACL elements instead. stoplist_pattern, refresh_pattern, ... (DW) +(4) ** Double check that MemBuf.size is used correctly everywhere (AR) (4) Refresh based on content types. This means we'll need an enum of known content types added to StoreEntry. Unknown types will lose. diff --git a/include/util.h b/include/util.h index 8c5905d9b5..4a9a14d3fe 100644 --- a/include/util.h +++ b/include/util.h @@ -1,5 +1,5 @@ /* - * $Id: util.h,v 1.43 1998/03/03 00:30:56 rousskov Exp $ + * $Id: util.h,v 1.44 1998/03/07 23:42:55 rousskov Exp $ * * AUTHOR: Harvest Derived * @@ -178,6 +178,7 @@ extern time_t parse_iso3307_time(const char *buf); extern char *base64_decode(const char *coded); extern const char *base64_encode(const char *decoded); +#if 0 /* trying new "pool"ed strings */ typedef struct _String { char *buf; off_t off; @@ -189,8 +190,10 @@ extern String *stringCreate(size_t); extern void stringAppend(String *, const char *, size_t); extern void stringFree(String *); #define stringLength(S) (S)->off +#endif extern double xpercent(double part, double whole); +extern int xpercentInt(double part, double whole); extern double xdiv(double nom, double denom); diff --git a/lib/util.c b/lib/util.c index e1c2490e7b..851367bc36 100644 --- a/lib/util.c +++ b/lib/util.c @@ -1,6 +1,6 @@ /* - * $Id: util.c,v 1.54 1998/03/06 01:33:12 wessels Exp $ + * $Id: util.c,v 1.55 1998/03/07 23:42:56 rousskov Exp $ * * DEBUG: * AUTHOR: Harvest Derived @@ -130,6 +130,9 @@ #if HAVE_ERRNO_H #include #endif +#if HAVE_MATH_H +#include +#endif #include "util.h" #include "snprintf.h" @@ -742,6 +745,13 @@ xpercent(double part, double whole) return xdiv(100 * part, whole); } +int +xpercentInt(double part, double whole) +{ + return (int)rint(xpercent(part, whole)); +} + + /* somewhat safer division */ double xdiv(double nom, double denom) diff --git a/src/HttpHdrCc.cc b/src/HttpHdrCc.cc index 5181f37962..5c3456708a 100644 --- a/src/HttpHdrCc.cc +++ b/src/HttpHdrCc.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrCc.cc,v 1.3 1998/03/05 20:55:55 rousskov Exp $ + * $Id: HttpHdrCc.cc,v 1.4 1998/03/07 23:42:56 rousskov Exp $ * * DEBUG: section 65 HTTP Cache Control Header * AUTHOR: Alex Rousskov @@ -140,7 +140,7 @@ httpHdrCcDestroy(HttpHdrCc * cc) } HttpHdrCc * -httpHdrCcDup(HttpHdrCc * cc) +httpHdrCcDup(const HttpHdrCc * cc) { HttpHdrCc *dup; assert(cc); @@ -151,13 +151,13 @@ httpHdrCcDup(HttpHdrCc * cc) } void -httpHdrCcPackValueInto(HttpHdrCc * cc, Packer * p) +httpHdrCcPackValueInto(const HttpHdrCc * cc, Packer * p) { http_hdr_cc_type flag; int pcount = 0; assert(cc && p); if (cc->max_age >= 0) { - packerPrintf(p, "max-age=%d", cc->max_age); + packerPrintf(p, "max-age=%d", (int)cc->max_age); pcount++; } for (flag = 0; flag < CC_ENUM_END; flag++) { @@ -169,7 +169,7 @@ httpHdrCcPackValueInto(HttpHdrCc * cc, Packer * p) } void -httpHdrCcJoinWith(HttpHdrCc * cc, HttpHdrCc * new_cc) +httpHdrCcJoinWith(HttpHdrCc * cc, const HttpHdrCc * new_cc) { assert(cc && new_cc); if (cc->max_age < 0) diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc index 36a9054728..af6a9fc436 100644 --- a/src/HttpHdrRange.cc +++ b/src/HttpHdrRange.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrRange.cc,v 1.1 1998/03/05 20:55:55 rousskov Exp $ + * $Id: HttpHdrRange.cc,v 1.2 1998/03/07 23:42:57 rousskov Exp $ * * DEBUG: section 64 HTTP Content-Range Header * AUTHOR: Alex Rousskov @@ -58,6 +58,7 @@ #define known_spec(s) ((s) != range_spec_unknown) #define size_min(a,b) ((a) <= (b) ? (a) : (b)) #define size_diff(a,b) ((a) >= (b) ? ((a)-(b)) : 0) +static HttpHdrRangeSpec *httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec); static int httpHdrRangeSpecCanonize(HttpHdrRangeSpec *spec, size_t clen); /* globals */ @@ -68,11 +69,16 @@ static int RangeParsedCount = 0; */ +static HttpHdrRangeSpec * +httpHdrRangeSpecCreate() +{ + return memAllocate(MEM_HTTP_HDR_RANGE_SPEC); +} + /* parses range-spec and returns new object on success */ static HttpHdrRangeSpec * httpHdrRangeSpecParseCreate(const char *field, int flen) { - HttpHdrRangeSpec *sp = NULL; HttpHdrRangeSpec spec = { range_spec_unknown, range_spec_unknown }; const char *p; if (flen < 2) @@ -103,9 +109,7 @@ httpHdrRangeSpecParseCreate(const char *field, int flen) debug(64, 2) ("ignoring invalid range-spec near: '%s'\n", field); return NULL; } - sp = memAllocate(MEM_HTTP_HDR_RANGE_SPEC); - *sp = spec; - return sp; + return httpHdrRangeSpecDup(&spec); } static void @@ -114,6 +118,16 @@ httpHdrRangeSpecDestroy(HttpHdrRangeSpec *spec) memFree(MEM_HTTP_HDR_RANGE_SPEC, spec); } + +static HttpHdrRangeSpec * +httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec) +{ + HttpHdrRangeSpec *dup = httpHdrRangeSpecCreate(); + dup->offset = spec->offset; + dup->length = spec->length; + return dup; +} + /* fills "absent" positions in range specification based on response body size returns true if the range is still valid range is valid if its intersection with [0,length-1] is not empty @@ -189,6 +203,43 @@ httpHdrRangeDestroy(HttpHdrRange *range) memFree(MEM_HTTP_HDR_RANGE, range); } +HttpHdrRange * +httpHdrRangeDup(const HttpHdrRange * range) +{ + HttpHdrRange *dup; + int i; + assert(range); + dup = httpHdrRangeCreate(); + stackPrePush(&dup->specs, range->specs.count); + for (i = 0; i < range->specs.count; i++) + stackPush(&dup->specs, httpHdrRangeSpecDup(range->specs.items[i])); + assert(range->specs.count == dup->specs.count); + return dup; +} + +void +httpHdrRangePackValueInto(const HttpHdrRange * range, Packer * p) +{ + HttpHdrRangePos pos = HttpHdrRangeInitPos; + HttpHdrRangeSpec spec; + assert(range); + while (httpHdrRangeGetSpec(range, &spec, &pos)) { + packerPrintf(p, (pos == HttpHdrRangeInitPos) ? "%d-%d" : ",%d-%d", + spec.offset, spec.offset+spec.length-1); + } +} + +void +httpHdrRangeJoinWith(HttpHdrRange * range, const HttpHdrRange * new_range) +{ + HttpHdrRangePos pos = HttpHdrRangeInitPos; + HttpHdrRangeSpec spec; + assert(range && new_range); + stackPrePush(&range->specs, new_range->specs.count); + while (httpHdrRangeGetSpec(range, &spec, &pos)) + stackPush(&range->specs, httpHdrRangeSpecDup(&spec)); +} + /* * canonizes all range specs within a set preserving the order * returns true if the set is valid after canonization; @@ -207,19 +258,16 @@ httpHdrRangeCanonize(HttpHdrRange *range, size_t clen) return range->specs.count; } -/* searches for next (unseen) range, returns true if found */ +/* searches for next range, returns true if found */ int -httpHdrRangeGetNext(const HttpHdrRange *range, HttpHdrRangeSpec *spec, size_t seen_len) +httpHdrRangeGetSpec(const HttpHdrRange *range, HttpHdrRangeSpec *spec, int *pos) { - int i; assert(range && spec); - /* simple linear search */ - for (i = 0; i < range->specs.count; i++) { - *spec = *(HttpHdrRangeSpec*)range->specs.items[i]; - if (spec->offset < seen_len) { - assert(spec->offset + spec->length <= seen_len); - } else - return 1; + assert(pos && *pos >= -1 && *pos < range->specs.count); + (*pos)++; + if (*pos < range->specs.count) { + *spec = *(HttpHdrRangeSpec*)range->specs.items[*pos]; + return 1; } spec->offset = spec->length = 0; return 0; diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc index 15717ff69c..469b952f76 100644 --- a/src/HttpHeader.cc +++ b/src/HttpHeader.cc @@ -1,5 +1,5 @@ /* - * $Id: HttpHeader.cc,v 1.17 1998/03/05 20:55:56 rousskov Exp $ + * $Id: HttpHeader.cc,v 1.18 1998/03/07 23:42:58 rousskov Exp $ * * DEBUG: section 55 HTTP Header * AUTHOR: Alex Rousskov @@ -100,7 +100,7 @@ static field_attrs_t Headers[] = { {"Accept", HDR_ACCEPT, ftPChar}, {"Age", HDR_AGE, ftInt}, - {"Cache-Control", HDR_CACHE_CONTROL, ftPSCC}, + {"Cache-Control", HDR_CACHE_CONTROL, ftPCc}, {"Connection", HDR_CONNECTION, ftPChar}, /* for now */ {"Content-Encoding", HDR_CONTENT_ENCODING, ftPChar}, {"Content-Length", HDR_CONTENT_LENGTH, ftInt}, @@ -115,14 +115,14 @@ static field_attrs_t Headers[] = {"Location", HDR_LOCATION, ftPChar}, {"Max-Forwards", HDR_MAX_FORWARDS, ftInt}, {"Proxy-Authenticate", HDR_PROXY_AUTHENTICATE, ftPChar}, + {"Proxy-Connection", HDR_PROXY_KEEPALIVE, ftInt}, /* true/false */ {"Public", HDR_PUBLIC, ftPChar}, + {"Range", HDR_RANGE, ftPRange}, {"Retry-After", HDR_RETRY_AFTER, ftPChar}, /* for now */ - /* fix this: make count-but-treat as OTHER mask @?@ @?@ */ - {"Set-Cookie:", HDR_SET_COOKIE, ftPChar}, + {"Set-Cookie", HDR_SET_COOKIE, ftPChar}, {"Upgrade", HDR_UPGRADE, ftPChar}, /* for now */ {"Warning", HDR_WARNING, ftPChar}, /* for now */ {"WWW-Authenticate", HDR_WWW_AUTHENTICATE, ftPChar}, - {"Proxy-Connection", HDR_PROXY_KEEPALIVE, ftInt}, /* true/false */ {"Other:", HDR_OTHER, ftPExtField} /* ':' will not allow matches */ }; @@ -808,6 +808,12 @@ httpHeaderGetCc(const HttpHeader * hdr) return httpHeaderGet(hdr, HDR_CACHE_CONTROL).v_pcc; } +HttpHdrRange * +httpHeaderGetRange(const HttpHeader * hdr) +{ + return httpHeaderGet(hdr, HDR_RANGE).v_prange; +} + /* updates header masks */ static void httpHeaderSyncMasks(HttpHeader * hdr, const HttpHeaderEntry * e, int add) @@ -873,10 +879,14 @@ httpHeaderEntryClean(HttpHeaderEntry * e) case ftPChar: freeShortString(e->field.v_pchar); break; - case ftPSCC: + case ftPCc: if (e->field.v_pcc) httpHdrCcDestroy(e->field.v_pcc); break; + case ftPRange: + if (e->field.v_prange) + httpHdrRangeDestroy(e->field.v_prange); + break; case ftPExtField: if (e->field.v_pefield) httpHeaderExtFieldDestroy(e->field.v_pefield); @@ -1000,10 +1010,20 @@ httpHeaderEntryParseByTypeInit(HttpHeaderEntry * e, int id, const HttpHeaderExtF */ break; - case ftPSCC: + case ftPCc: field.v_pcc = httpHdrCcParseCreate(f->value); if (!field.v_pcc) { - debug(55, 2) ("failed to parse scc hdr: id: %d, field: '%s: %s'\n", + debug(55, 2) ("failed to parse cc hdr: id: %d, field: '%s: %s'\n", + id, f->name, f->value); + Headers[id].stat.errCount++; + return 0; + } + break; + + case ftPRange: + field.v_prange = httpHdrRangeParseCreate(f->value); + if (!field.v_prange) { + debug(55, 2) ("failed to parse range hdr: id: %d, field: '%s: %s'\n", id, f->name, f->value); Headers[id].stat.errCount++; return 0; @@ -1073,9 +1093,12 @@ httpHeaderEntryPackByType(const HttpHeaderEntry * e, Packer * p) case ftDate_1123: packerPrintf(p, "%s", mkrfc1123(e->field.v_time)); break; - case ftPSCC: + case ftPCc: httpHdrCcPackValueInto(e->field.v_pcc, p); break; + case ftPRange: + httpHdrRangePackValueInto(e->field.v_prange, p); + break; case ftPExtField: packerPrintf(p, "%s", e->field.v_pefield->value); break; @@ -1099,9 +1122,12 @@ httpHeaderEntryJoinWith(HttpHeaderEntry * e, const HttpHeaderEntry * newe) case ftPChar: e->field.v_pchar = appShortStr(e->field.v_pchar, newe->field.v_pchar); break; - case ftPSCC: + case ftPCc: httpHdrCcJoinWith(e->field.v_pcc, newe->field.v_pcc); break; + case ftPRange: + httpHdrRangeJoinWith(e->field.v_prange, newe->field.v_prange); + break; default: debug(55, 0) ("join for invalid/unknown type: id: %d, type: %d\n", e->id, type); assert(0); @@ -1126,8 +1152,10 @@ httpHeaderEntryIsValid(const HttpHeaderEntry * e) return e->field.v_pchar != NULL; case ftDate_1123: return e->field.v_time >= 0; - case ftPSCC: + case ftPCc: return e->field.v_pcc != NULL; + case ftPRange: + return e->field.v_prange != NULL; case ftPExtField: return e->field.v_pefield != NULL; default: @@ -1167,8 +1195,10 @@ httpHeaderFieldDup(field_type type, field_store value) return value; case ftPChar: return ptrField(dupShortStr(value.v_pchar)); - case ftPSCC: + case ftPCc: return ptrField(httpHdrCcDup(value.v_pcc)); + case ftPRange: + return ptrField(httpHdrRangeDup(value.v_prange)); case ftPExtField: return ptrField(httpHeaderExtFieldDup(value.v_pefield)); default: @@ -1190,7 +1220,8 @@ httpHeaderFieldBadValue(field_type type) case ftDate_1123: return timeField(-1); case ftPChar: - case ftPSCC: + case ftPCc: + case ftPRange: case ftPExtField: return ptrField(NULL); case ftInvalid: @@ -1200,136 +1231,6 @@ httpHeaderFieldBadValue(field_type type) return ptrField(NULL); /* not reached */ } -#if 0 /* moved to HttpHdrCC.c */ - -/* - * HttpScc (server cache control) - */ - -static HttpScc * -httpSccCreate() -{ - HttpScc *scc = memAllocate(MEM_HTTP_SCC); - scc->max_age = -1; - return scc; -} - -/* creates an scc object from a 0-terminating string */ -static HttpScc * -httpSccParseCreate(const char *str) -{ - HttpScc *scc = httpSccCreate(); - httpSccParseInit(scc, str); - return scc; -} - -/* parses a 0-terminating string and inits scc */ -static void -httpSccParseInit(HttpScc * scc, const char *str) -{ - const char *item; - const char *p; /* '=' parameter */ - const char *pos = NULL; - int type; - int ilen; - assert(scc && str); - - CcPasredCount++; - /* iterate through comma separated list */ - while (strListGetItem(str, ',', &item, &ilen, &pos)) { - /* strip '=' statements @?@ */ - if ((p = strchr(item, '=')) && (p - item < ilen)) - ilen = p++ - item; - /* find type */ - type = httpHeaderIdByName(item, ilen, - SccAttrs, SCC_ENUM_END, -1); - if (type < 0) { - debug(55, 2) ("cc: unknown cache-directive: near '%s' in '%s'\n", item, str); - continue; - } - if (EBIT_TEST(scc->mask, type)) { - debug(55, 2) ("cc: ignoring duplicate cache-directive: near '%s' in '%s'\n", item, str); - SccAttrs[type].stat.repCount++; - continue; - } - /* update mask */ - EBIT_SET(scc->mask, type); - /* post-processing special cases */ - switch (type) { - case SCC_MAX_AGE: - if (p) - scc->max_age = (time_t) atoi(p); - if (scc->max_age < 0) { - debug(55, 2) ("scc: invalid max-age specs near '%s'\n", item); - scc->max_age = -1; - EBIT_CLR(scc->mask, type); - } - break; - default: - /* note that we ignore most of '=' specs @?@ */ - break; - } - } - return; -} - -static void -httpSccDestroy(HttpScc * scc) -{ - assert(scc); - memFree(MEM_HTTP_SCC, scc); -} - -static HttpScc * -httpSccDup(HttpScc * scc) -{ - HttpScc *dup; - assert(scc); - dup = httpSccCreate(); - dup->mask = scc->mask; - dup->max_age = scc->max_age; - return dup; -} - -static void -httpSccPackValueInto(HttpScc * scc, Packer * p) -{ - http_scc_type flag; - int pcount = 0; - assert(scc && p); - if (scc->max_age >= 0) { - packerPrintf(p, "max-age=%d", scc->max_age); - pcount++; - } - for (flag = 0; flag < SCC_ENUM_END; flag++) { - if (EBIT_TEST(scc->mask, flag)) { - packerPrintf(p, pcount ? ", %s" : "%s", SccAttrs[flag].name); - pcount++; - } - } -} - -static void -httpSccJoinWith(HttpScc * scc, HttpScc * new_scc) -{ - assert(scc && new_scc); - if (scc->max_age < 0) - scc->max_age = new_scc->max_age; - scc->mask |= new_scc->mask; -} - -static void -httpSccUpdateStats(const HttpScc * scc, StatHist * hist) -{ - http_scc_type c; - assert(scc); - for (c = 0; c < SCC_ENUM_END; c++) - if (EBIT_TEST(scc->mask, c)) - statHistCount(hist, c); -} - -#endif - /* * HttpHeaderExtField */ @@ -1405,19 +1306,6 @@ httpHeaderFieldStatDumper(StoreEntry * sentry, int idx, double val, double size, id, name, count, xdiv(count, HeaderParsedCount)); } -#if 0 -static void -httpHeaderCCStatDumper(StoreEntry * sentry, int idx, double val, double size, int count) -{ - const int id = (int) val; - const int valid_id = id >= 0 && id < SCC_ENUM_END; - const char *name = valid_id ? SccAttrs[id].name : "INVALID"; - if (count || valid_id) - storeAppendPrintf(sentry, "%2d\t %-20s\t %5d\t %6.2f\n", - id, name, count, xdiv(count, CcPasredCount)); -} -#endif - static void httpHeaderFldsPerHdrDumper(StoreEntry * sentry, int idx, double val, double size, int count) { @@ -1560,55 +1448,3 @@ freeShortString(char *str) } } -#if 0 -/* - * other routines (move these into lib if you need them somewhere else?) - */ - -/* - * iterates through a 0-terminated string of items separated by 'del's. - * white space around 'del' is considered to be a part of 'del' - * like strtok, but preserves the source. - * - * returns true if next item is found. - * init pos with NULL to start iteration. - */ -static int -strListGetItem(const char *str, char del, const char **item, int *ilen, const char **pos) -{ - size_t len; - assert(str && item && pos); - if (*pos) - if (!**pos) /* end of string */ - return 0; - else - (*pos)++; - else - *pos = str; - - /* skip leading ws (ltrim) */ - *pos += xcountws(*pos); - *item = *pos; /* remember item's start */ - /* find next delimiter */ - *pos = strchr(*item, del); - if (!*pos) /* last item */ - *pos = *item + strlen(*item); - len = *pos - *item; /* *pos points to del or '\0' */ - /* rtrim */ - while (len > 0 && isspace((*item)[len - 1])) - len--; - if (ilen) - *ilen = len; - return len > 0; -} - -/* handy to printf prefixes of potentially very long buffers */ -static const char * -getStringPrefix(const char *str) -{ -#define SHORT_PREFIX_SIZE 256 - LOCAL_ARRAY(char, buf, SHORT_PREFIX_SIZE); - xstrncpy(buf, str, SHORT_PREFIX_SIZE); - return buf; -} -#endif diff --git a/src/Makefile.in b/src/Makefile.in index 66b811f1e9..9f73ad5182 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -1,7 +1,7 @@ # # Makefile for the Squid Object Cache server # -# $Id: Makefile.in,v 1.129 1998/03/06 05:43:33 kostas Exp $ +# $Id: Makefile.in,v 1.130 1998/03/07 23:42:58 rousskov Exp $ # # Uncomment and customize the following to suit your needs: # @@ -124,6 +124,7 @@ OBJS = \ ssl.o \ stat.o \ StatHist.o \ + String.o \ stmem.o \ store.o \ store_clean.o \ diff --git a/src/acl.cc b/src/acl.cc index 249ab66ffe..ff2ac612c6 100644 --- a/src/acl.cc +++ b/src/acl.cc @@ -1,6 +1,6 @@ /* - * $Id: acl.cc,v 1.146 1998/03/06 21:05:47 wessels Exp $ + * $Id: acl.cc,v 1.147 1998/03/07 23:43:00 rousskov Exp $ * * DEBUG: section 28 Access Control * AUTHOR: Duane Wessels @@ -825,31 +825,28 @@ aclParseAclLine(acl ** head) *head = A; } -/* maex@space.net (06.09.96) - * get (if any) the URL from deny_info for a certain acl - */ - -char * -aclGetDenyInfoUrl(acl_deny_info_list ** head, const char *name) +/* does name lookup, returns page_id */ +int +aclGetDenyInfoPage(acl_deny_info_list ** head, const char *name) { acl_deny_info_list *A = NULL; acl_name_list *L = NULL; A = *head; if (NULL == *head) /* empty list */ - return (NULL); + return -1; while (A) { L = A->acl_list; if (NULL == L) /* empty list should never happen, but in case */ continue; while (L) { if (!strcmp(name, L->name)) - return (A->url); + return A->err_page_id; L = L->next; } A = A->next; } - return (NULL); + return -1; } /* maex@space.net (05.09.96) @@ -871,15 +868,20 @@ aclParseDenyInfoLine(acl_deny_info_list ** head) acl_name_list *L = NULL; acl_name_list **Tail = NULL; - /* first expect an url */ + /* first expect a page name */ if ((t = strtok(NULL, w_space)) == NULL) { debug(28, 0) ("%s line %d: %s\n", cfg_filename, config_lineno, config_input_line); - debug(28, 0) ("aclParseDenyInfoLine: missing 'url' parameter.\n"); + debug(28, 0) ("aclParseDenyInfoLine: missing 'error page' parameter.\n"); return; } A = xcalloc(1, sizeof(acl_deny_info_list)); + A->err_page_id = errorReservePageId(t); +#if 0 xstrncpy(A->url, t, MAX_URL); +#else + A->err_page_name = xstrdup(t); +#endif A->next = (acl_deny_info_list *) NULL; /* next expect a list of ACL names */ Tail = &A->acl_list; @@ -1325,7 +1327,7 @@ aclMatchAcl(acl * acl, aclCheck_t * checklist) return aclMatchTime(acl->data, squid_curtime); /* NOTREACHED */ case ACL_URLPATH_REGEX: - return aclMatchRegex(acl->data, r->urlpath); + return aclMatchRegex(acl->data, strBuf(r->urlpath)); /* NOTREACHED */ case ACL_URL_REGEX: return aclMatchRegex(acl->data, urlCanonical(r, NULL)); @@ -1774,6 +1776,7 @@ aclDestroyDenyInfoList(acl_deny_info_list ** list) safe_free(l); } a_next = a->next; + xfree(a->err_page_name); safe_free(a); } *list = NULL; diff --git a/src/asn.cc b/src/asn.cc index ea78260f3b..aed729e0be 100644 --- a/src/asn.cc +++ b/src/asn.cc @@ -1,5 +1,5 @@ /* - * $Id: asn.cc,v 1.24 1998/03/07 00:02:14 wessels Exp $ + * $Id: asn.cc,v 1.25 1998/03/07 23:43:00 rousskov Exp $ * * DEBUG: section 53 AS Number handling * AUTHOR: Duane Wessels, Kostas Anagnostakis @@ -381,8 +381,8 @@ whoisConnectDone(int fd, int status, void *data) comm_close(fd); return; } - snprintf(buf, 128, "%s\r\n", p->request->urlpath + 1); - debug(53, 1) ("whoisConnectDone: FD %d, '%s'\n", fd, p->request->urlpath + 1); + snprintf(buf, 128, "%s\r\n", strBuf(p->request->urlpath) + 1); + debug(53, 1) ("whoisConnectDone: FD %d, '%s'\n", fd, strBuf(p->request->urlpath) + 1); comm_write(fd, xstrdup(buf), strlen(buf), NULL, p, xfree); commSetSelect(fd, COMM_SELECT_READ, whoisReadReply, p, Config.Timeout.read); } diff --git a/src/cache_cf.cc b/src/cache_cf.cc index 6ce2095ffa..3c2d64e253 100644 --- a/src/cache_cf.cc +++ b/src/cache_cf.cc @@ -1,6 +1,6 @@ /* - * $Id: cache_cf.cc,v 1.256 1998/03/05 17:43:32 rousskov Exp $ + * $Id: cache_cf.cc,v 1.257 1998/03/07 23:43:01 rousskov Exp $ * * DEBUG: section 3 Configuration File Parsing * AUTHOR: Harvest Derived @@ -823,7 +823,7 @@ dump_denyinfo(StoreEntry * entry, const char *name, acl_deny_info_list * var) { acl_name_list *a; while (var != NULL) { - storeAppendPrintf(entry, "%s %s", name, var->url); + storeAppendPrintf(entry, "%s %s", name, var->err_page_name); for (a = var->acl_list; a != NULL; a = a->next) storeAppendPrintf(entry, " %s", a->name); storeAppendPrintf(entry, "\n"); diff --git a/src/cf.data.pre b/src/cf.data.pre index b657232f1e..969e469374 100644 --- a/src/cf.data.pre +++ b/src/cf.data.pre @@ -1579,16 +1579,16 @@ TYPE: denyinfo LOC: Config.denyInfoList DEFAULT: none DOC_START - Usage: deny_info URL acl + Usage: deny_info err_page_name acl + Example: deny_info ERR_CUSTOM_ACCESS_DENIED bad_guys - This can be used to return a HTTP redirect for requests which + This can be used to return a ERR_ page for requests which do not pass the 'http_access' rules. A single ACL will cause the http_access check to fail. If a 'deny_info' line exists - for that ACL then Squid returns a redirect to the given URL. + for that ACL then Squid returns a corresponding error page. - Be sure to make an exception for the site you are referring - people to with the 'acl dstdomain' function, otherwise they - are going to be refused access to the redirected URL too. + You may use ERR_ pages that come with Squid or create your own pages + and put them into the configured errors/ directory. DOC_END NAME: memory_pools diff --git a/src/client_side.cc b/src/client_side.cc index 15e7fde39d..ede57c8a8b 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -1,6 +1,6 @@ /* - * $Id: client_side.cc,v 1.222 1998/03/06 22:19:31 wessels Exp $ + * $Id: client_side.cc,v 1.223 1998/03/07 23:43:03 rousskov Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Duane Wessels @@ -146,7 +146,11 @@ void clientAccessCheckDone(int answer, void *data) { clientHttpRequest *http = data; +#if 0 char *redirectUrl = NULL; +#else + int page_id = -1; +#endif ErrorState *err = NULL; HttpReply *rep; debug(33, 5) ("clientAccessCheckDone: '%s' answer=%d\n", http->uri, answer); @@ -167,23 +171,18 @@ clientAccessCheckDone(int answer, void *data) httpReplyDestroy(rep); } else { debug(33, 5) ("Access Denied: %s\n", http->uri); + debug(33, 5) ("AclMatchedName = %s\n", AclMatchedName ? AclMatchedName : ""); http->log_type = LOG_TCP_DENIED; http->entry = clientCreateStoreEntry(http, http->request->method, 0); - redirectUrl = aclGetDenyInfoUrl(&Config.denyInfoList, AclMatchedName); - if (redirectUrl) { - err = errorCon(ERR_ACCESS_DENIED, HTTP_MOVED_TEMPORARILY); - err->request = requestLink(http->request); - err->src_addr = http->conn->peer.sin_addr; - err->redirect_url = xstrdup(redirectUrl); - errorAppendEntry(http->entry, err); - } else { - /* NOTE: don't use HTTP_UNAUTHORIZED because then the - * stupid browser wants us to authenticate */ - err = errorCon(ERR_ACCESS_DENIED, HTTP_FORBIDDEN); - err->request = requestLink(http->request); - err->src_addr = http->conn->peer.sin_addr; - errorAppendEntry(http->entry, err); - } + page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName); + /* NOTE: don't use HTTP_UNAUTHORIZED because then the + * stupid browser wants us to authenticate */ + err = errorCon(ERR_ACCESS_DENIED, HTTP_FORBIDDEN); + err->request = requestLink(http->request); + err->src_addr = http->conn->peer.sin_addr; + if (page_id > 0) + err->page_id = page_id; + errorAppendEntry(http->entry, err); } } diff --git a/src/defines.h b/src/defines.h index 5894a17a9b..d0c9fb3fcd 100644 --- a/src/defines.h +++ b/src/defines.h @@ -212,3 +212,6 @@ /* gb_type operations */ #define gb_flush_limit (0x3FFFFFFF) #define gb_inc(gb, delta) { if ((gb)->bytes > gb_flush_limit || delta > gb_flush_limit) gb_flush(gb); (gb)->bytes += delta; (gb)->count++; } + +/* iteration for HttpHdrRange */ +#define HttpHdrRangeInitPos (-1) diff --git a/src/enums.h b/src/enums.h index f382b26d06..1a3828b9b9 100644 --- a/src/enums.h +++ b/src/enums.h @@ -185,6 +185,7 @@ typedef enum { HDR_MAX_FORWARDS, HDR_PROXY_AUTHENTICATE, HDR_PUBLIC, + HDR_RANGE, HDR_RETRY_AFTER, HDR_SET_COOKIE, HDR_UPGRADE, @@ -214,7 +215,8 @@ typedef enum { ftInt, ftPChar, ftDate_1123, - ftPSCC, + ftPCc, + ftPRange, ftPExtField } field_type; diff --git a/src/errorpage.cc b/src/errorpage.cc index 6d15004904..73c672ae7a 100644 --- a/src/errorpage.cc +++ b/src/errorpage.cc @@ -1,6 +1,6 @@ /* - * $Id: errorpage.cc,v 1.120 1998/03/06 05:43:35 kostas Exp $ + * $Id: errorpage.cc,v 1.121 1998/03/07 23:43:05 rousskov Exp $ * * DEBUG: section 4 Error Generation * AUTHOR: Duane Wessels @@ -38,27 +38,42 @@ #include "squid.h" + +/* local types */ + +typedef struct { + int id; + char *page_name; +} ErrorDynamicPageInfo; + +/* local constant and vars */ + /* * note: hard coded error messages are not appended with %S automagically * to give you more control on the format */ static const struct { - err_type type; + int type; /* and page_id */ const char *text; } error_hard_text[] = { - { ERR_SQUID_SIGNATURE, "\n
\n" "
\n" - "Generated on %T by %s@%h" + "Generated on %T by %s@%h\n" } }; + +static Stack ErrorDynamicPages; + +/* local prototypes */ + static const int error_hard_text_count = sizeof(error_hard_text) / sizeof(*error_hard_text); -static char *error_text[ERR_MAX]; +static char **error_text = NULL; +static int error_page_count = 0; -static char *errorTryLoadText(err_type type, const char *dir); -static char *errorLoadText(err_type type); +static char *errorTryLoadText(const char *page_name, const char *dir); +static char *errorLoadText(const char *page_name); static const char *errorFindHardText(err_type type); static MemBuf errorBuildContent(ErrorState * err); static const char *errorConvert(char token, ErrorState * err); @@ -76,16 +91,25 @@ static CWCB errorSendComplete; void errorInitialize(void) { - err_type i; + int i; const char *text; - /* find this one first so we can append it to others in errorTryLoadText() */ - for (i = ERR_NONE + 1; i < ERR_MAX; i++) { + error_page_count = ERR_MAX + ErrorDynamicPages.count; + error_text = xcalloc(error_page_count, sizeof(char*)); + for (i = ERR_NONE + 1; i < error_page_count; i++) { safe_free(error_text[i]); /* hard-coded ? */ if ((text = errorFindHardText(i))) error_text[i] = xstrdup(text); else - error_text[i] = errorLoadText(i); + /* precompiled ? */ + if (i < ERR_MAX) + error_text[i] = errorLoadText(err_type_str[i]); + /* dynamic */ + else { + ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i-ERR_MAX]; + assert(info && info->id == i && info->page_name); + error_text[i] = errorLoadText(info->page_name); + } assert(error_text[i]); } } @@ -102,13 +126,13 @@ errorFindHardText(err_type type) static char * -errorLoadText(err_type type) +errorLoadText(const char *page_name) { /* test configured location */ - char *text = errorTryLoadText(type, Config.errorDirectory); + char *text = errorTryLoadText(page_name, Config.errorDirectory); /* test default location if failed */ if (!text && strcmp(Config.errorDirectory, DEFAULT_SQUID_ERROR_DIR)) - text = errorTryLoadText(type, DEFAULT_SQUID_ERROR_DIR); + text = errorTryLoadText(page_name, DEFAULT_SQUID_ERROR_DIR); /* giving up if failed */ if (!text) fatal("failed to find or read error text file."); @@ -116,7 +140,7 @@ errorLoadText(err_type type) } static char * -errorTryLoadText(err_type type, const char *dir) +errorTryLoadText(const char *page_name, const char *dir) { int fd; char path[MAXPATHLEN]; @@ -124,7 +148,7 @@ errorTryLoadText(err_type type, const char *dir) char *text; snprintf(path, MAXPATHLEN, "%s/%s", - dir, err_type_str[type]); + dir, page_name); fd = file_open(path, O_RDONLY, NULL, NULL, NULL); if (fd < 0 || fstat(fd, &sb) < 0) { debug(4, 0) ("errorTryLoadText: '%s': %s\n", path, xstrerror()); @@ -144,12 +168,42 @@ errorTryLoadText(err_type type, const char *dir) return text; } +static ErrorDynamicPageInfo * +errorDynamicPageInfoCreate(int id, const char *page_name) +{ + ErrorDynamicPageInfo *info = xcalloc(1, sizeof(ErrorDynamicPageInfo)); + info->id = id; + info->page_name = xstrdup(page_name); + return info; +} + +static void +errorDynamicPageInfoDestroy(ErrorDynamicPageInfo *info) +{ + assert(info); + xfree(info->page_name); + xfree(info); +} + +int +errorReservePageId(const char *page_name) +{ + ErrorDynamicPageInfo *info = + errorDynamicPageInfoCreate(ERR_MAX + ErrorDynamicPages.count, page_name); + stackPush(&ErrorDynamicPages, info); + return info->id; +} + void errorFree(void) { int i; - for (i = ERR_NONE + 1; i < ERR_MAX; i++) + for (i = ERR_NONE + 1; i < error_page_count; i++) safe_free(error_text[i]); + while (ErrorDynamicPages.count) + errorDynamicPageInfoDestroy(stackPop(&ErrorDynamicPages)); + safe_free(error_text); + error_page_count = 0; } /* @@ -158,9 +212,10 @@ errorFree(void) * Abstract: This function creates a ErrorState object. */ ErrorState * -errorCon(err_type type, http_status status) +errorCon(int type, http_status status) { ErrorState *err = xcalloc(1, sizeof(ErrorState)); + err->page_id = type; /* has to be reset manually if needed */ err->type = type; err->http_status = status; return err; @@ -183,26 +238,16 @@ errorCon(err_type type, http_status status) void errorAppendEntry(StoreEntry * entry, ErrorState * err) { -#if 0 - const char *buf; - int len; -#else HttpReply *rep; -#endif MemObject *mem = entry->mem_obj; #if 0 /* we might have an ok store for put etc */ assert(entry->store_status == STORE_PENDING); #endif assert(mem != NULL); assert(mem->inmem_hi == 0); -#if 0 - buf = errorBuildBuf(err, &len); - storeAppend(entry, buf, len); -#else rep = errorBuildReply(err); httpReplySwapOut(rep, entry); httpReplyDestroy(rep); -#endif mem->reply->sline.status = err->http_status; storeComplete(entry); storeNegativeCache(entry); @@ -233,11 +278,6 @@ void errorSend(int fd, ErrorState * err) { HttpReply *rep; -#if 0 - FREE *freefunc; - char *buf; - int len; -#endif debug(4, 3) ("errorSend: FD %d, err=%p\n", fd, err); assert(fd >= 0); /* @@ -249,14 +289,9 @@ errorSend(int fd, ErrorState * err) /* moved in front of errorBuildBuf @?@ */ EBIT_SET(err->flags, ERR_FLAG_CBDATA); cbdataAdd(err, MEM_NONE); -#if 0 - buf = errorBuildBuf(err, &len); - comm_write(fd, xstrdup(buf), len, errorSendComplete, err, xfree); -#else rep = errorBuildReply(err); comm_write_mbuf(fd, httpReplyPack(rep), errorSendComplete, err); httpReplyDestroy(rep); -#endif } /* @@ -401,14 +436,14 @@ errorConvert(char token, ErrorState * err) break; case 'S': /* signature may contain %-escapes, recursion */ - if (err->type != ERR_SQUID_SIGNATURE) { - const err_type saved_et = err->type; + if (err->page_id != ERR_SQUID_SIGNATURE) { + const int saved_id = err->page_id; MemBuf mb; - err->type = ERR_SQUID_SIGNATURE; + err->page_id = ERR_SQUID_SIGNATURE; mb = errorBuildContent(err); snprintf(buf, CVT_BUF_SZ, "%s", mb.buf); memBufClean(&mb); - err->type = saved_et; + err->page_id = saved_id; } else { /* wow, somebody put %S into ERR_SIGNATURE, stop recursion */ p = "[%S]"; @@ -464,114 +499,22 @@ static MemBuf errorBuildContent(ErrorState * err) { MemBuf content; -#if 0 /* use MemBuf so we can support recursion; const pointers: no xstrdup */ - LOCAL_ARRAY(char, content, ERROR_BUF_SZ); - int clen; - char *m; - char *mx; - char *p; -#endif const char *m; const char *p; const char *t; assert(err != NULL); - assert(err->type > ERR_NONE && err->type < ERR_MAX); -#if 0 /* use MemBuf so we can support recursion */ - mx = m = xstrdup(error_text[err->type]); -#endif + assert(err->page_id > ERR_NONE && err->page_id < error_page_count); memBufDefInit(&content); - m = error_text[err->type]; + m = error_text[err->page_id]; assert(m); while ((p = strchr(m, '%'))) { -#if 0 /* use MemBuf so we can support recursion */ - *p = '\0'; /* terminate */ - xstrncpy(content + clen, m, ERROR_BUF_SZ - clen); /* copy */ - clen += (p - m); /* advance */ - if (clen >= ERROR_BUF_SZ) - break; - p++; - m = p + 1; - t = errorConvert(*p, err); /* convert */ - xstrncpy(content + clen, t, ERROR_BUF_SZ - clen); /* copy */ - clen += strlen(t); /* advance */ - if (clen >= ERROR_BUF_SZ) - break; -#endif memBufAppend(&content, m, p - m); /* copy */ t = errorConvert(*++p, err); /* convert */ memBufPrintf(&content, "%s", t); /* copy */ m = p + 1; /* advance */ } -#if 0 /* use MemBuf so we can support recursion */ - if (clen < ERROR_BUF_SZ && m != NULL) { - xstrncpy(content + clen, m, ERROR_BUF_SZ - clen); - clen += strlen(m); - } - if (clen >= ERROR_BUF_SZ) { - clen = ERROR_BUF_SZ - 1; - *(content + clen) = '\0'; - } - assert(clen == strlen(content)); - if (len) - *len = clen; - xfree(mx); -#endif if (*m) memBufPrintf(&content, "%s", m); /* copy tail */ assert(content.size == strlen(content.buf)); return content; } - -#if 0 /* we use httpReply instead of a buffer now */ -const char * -errorBuildBuf(ErrorState * err, int *len) -{ - LOCAL_ARRAY(char, buf, ERROR_BUF_SZ); - LOCAL_ARRAY(char, content, ERROR_BUF_SZ); - char *hdr; - int clen; - int tlen; - char *m; - char *mx; - char *p; - const char *t; - assert(err != NULL); - assert(err->type > ERR_NONE && err->type < ERR_MAX); - mx = m = xstrdup(error_text[err->type]); - clen = 0; - while ((p = strchr(m, '%'))) { - *p = '\0'; /* terminate */ - xstrncpy(content + clen, m, ERROR_BUF_SZ - clen); /* copy */ - clen += (p - m); /* advance */ - if (clen >= ERROR_BUF_SZ) - break; - p++; - m = p + 1; - t = errorConvert(*p, err); /* convert */ - xstrncpy(content + clen, t, ERROR_BUF_SZ - clen); /* copy */ - clen += strlen(t); /* advance */ - if (clen >= ERROR_BUF_SZ) - break; - } - if (clen < ERROR_BUF_SZ && m != NULL) { - xstrncpy(content + clen, m, ERROR_BUF_SZ - clen); - clen += strlen(m); - } - if (clen >= ERROR_BUF_SZ) { - clen = ERROR_BUF_SZ - 1; - *(content + clen) = '\0'; - } - assert(clen == strlen(content)); - hdr = httpReplyHeader((double) 1.0, - err->http_status, - "text/html", - clen, - 0, /* no LMT for error pages */ - squid_curtime); - tlen = snprintf(buf, ERROR_BUF_SZ, "%s\r\n%s", hdr, content); - if (len) - *len = tlen; - xfree(mx); - return buf; -} -#endif diff --git a/src/ftp.cc b/src/ftp.cc index f8d3301c29..599a69cd5b 100644 --- a/src/ftp.cc +++ b/src/ftp.cc @@ -1,6 +1,6 @@ /* - * $Id: ftp.cc,v 1.203 1998/03/06 23:22:27 wessels Exp $ + * $Id: ftp.cc,v 1.204 1998/03/07 23:43:06 rousskov Exp $ * * DEBUG: section 9 File Transfer Protocol (FTP) * AUTHOR: Harvest Derived @@ -866,24 +866,24 @@ ftpCheckUrlpath(FtpStateData * ftpState) { request_t *request = ftpState->request; int l; - char *t; - if ((t = strrchr(request->urlpath, ';')) != NULL) { + const char *t; + if ((t = strRChr(request->urlpath, ';')) != NULL) { if (strncasecmp(t + 1, "type=", 5) == 0) { ftpState->typecode = (char) toupper((int) *(t + 6)); - *t = '\0'; + strSet(request->urlpath, t, '\0'); } } - l = strlen(request->urlpath); + l = strLen(request->urlpath); EBIT_SET(ftpState->flags, FTP_USE_BASE); /* check for null path */ - if (*request->urlpath == '\0') { - xstrncpy(request->urlpath, "/", MAX_URL); + if (!l) { + stringReset(&request->urlpath, "/"); EBIT_SET(ftpState->flags, FTP_ISDIR); EBIT_SET(ftpState->flags, FTP_ROOT_DIR); - } else if (!strcmp(request->urlpath, "/%2f/")) { + } else if (!strCmp(request->urlpath, "/%2f/")) { EBIT_SET(ftpState->flags, FTP_ISDIR); EBIT_SET(ftpState->flags, FTP_ROOT_DIR); - } else if ((l >= 1) && (*(request->urlpath + l - 1) == '/')) { + } else if ((l >= 1) && (*(strBuf(request->urlpath) + l - 1) == '/')) { EBIT_SET(ftpState->flags, FTP_ISDIR); EBIT_CLR(ftpState->flags, FTP_USE_BASE); if (l == 1) @@ -901,7 +901,7 @@ ftpBuildTitleUrl(FtpStateData * ftpState) + strlen(ftpState->user) + strlen(ftpState->password) + strlen(request->host) - + strlen(request->urlpath); + + strLen(request->urlpath); t = ftpState->title_url = xcalloc(len, 1); strcat(t, "ftp://"); if (strcmp(ftpState->user, "anonymous")) { @@ -911,7 +911,7 @@ ftpBuildTitleUrl(FtpStateData * ftpState) strcat(t, request->host); if (request->port != urlDefaultPort(PROTO_FTP)) snprintf(&t[strlen(t)], len - strlen(t), ":%d", request->port); - strcat(t, request->urlpath); + strcat(t, strBuf(request->urlpath)); } void @@ -957,7 +957,7 @@ ftpStart(request_t * request, StoreEntry * entry) ftpCheckUrlpath(ftpState); ftpBuildTitleUrl(ftpState); debug(9, 5) ("FtpStart: host=%s, path=%s, user=%s, passwd=%s\n", - ftpState->request->host, ftpState->request->urlpath, + ftpState->request->host, strBuf(ftpState->request->urlpath), ftpState->user, ftpState->password); fd = comm_open(SOCK_STREAM, 0, @@ -1260,8 +1260,8 @@ ftpReadPass(FtpStateData * ftpState) static void ftpSendType(FtpStateData * ftpState) { - char *t; - char *filename; + const char *t; + const char *filename; char mode; /* * Ref section 3.2.2 of RFC 1738 @@ -1277,8 +1277,8 @@ ftpSendType(FtpStateData * ftpState) if (EBIT_TEST(ftpState->flags, FTP_ISDIR)) { mode = 'A'; } else { - t = strrchr(ftpState->request->urlpath, '/'); - filename = t ? t + 1 : ftpState->request->urlpath; + t = strRChr(ftpState->request->urlpath, '/'); + filename = t ? t + 1 : strBuf(ftpState->request->urlpath); mode = mimeGetTransferMode(filename); } break; @@ -1300,7 +1300,7 @@ ftpReadType(FtpStateData * ftpState) char *d; debug(9, 3) ("This is ftpReadType\n"); if (code == 200) { - path = xstrdup(ftpState->request->urlpath); + path = xstrdup(strBuf(ftpState->request->urlpath)); T = &ftpState->pathcomps; for (d = strtok(path, "/"); d; d = strtok(NULL, "/")) { rfc1738_unescape(d); @@ -1952,7 +1952,7 @@ ftpTrySlashHack(FtpStateData * ftpState) wordlistDestroy(&ftpState->pathcomps); safe_free(ftpState->filepath); /* Build the new path (urlpath begins with /) */ - path = xstrdup(ftpState->request->urlpath); + path = xstrdup(strBuf(ftpState->request->urlpath)); rfc1738_unescape(path); ftpState->filepath = path; /* And off we go */ @@ -2065,16 +2065,16 @@ ftpAppendSuccessHeader(FtpStateData * ftpState) { char *mime_type = NULL; char *mime_enc = NULL; - char *urlpath = ftpState->request->urlpath; - char *filename = NULL; - char *t = NULL; + String urlpath = ftpState->request->urlpath; + const char *filename = NULL; + const char *t = NULL; StoreEntry *e = ftpState->entry; http_reply *reply = e->mem_obj->reply; if (EBIT_TEST(ftpState->flags, FTP_HTTP_HEADER_SENT)) return; EBIT_SET(ftpState->flags, FTP_HTTP_HEADER_SENT); assert(e->mem_obj->inmem_hi == 0); - filename = (t = strrchr(urlpath, '/')) ? t + 1 : urlpath; + filename = (t = strRChr(urlpath, '/')) ? t + 1 : strBuf(urlpath); if (EBIT_TEST(ftpState->flags, FTP_ISDIR)) { mime_type = "text/html"; } else { @@ -2158,7 +2158,7 @@ ftpUrlWith2f(const request_t * request) request->host, portbuf, "/%2f", - request->urlpath); + strBuf(request->urlpath)); if ((t = strchr(buf, '?'))) *t = '\0'; return buf; diff --git a/src/globals.h b/src/globals.h index 7ca7a70c4e..20ca3bd769 100644 --- a/src/globals.h +++ b/src/globals.h @@ -1,6 +1,6 @@ /* - * $Id: globals.h,v 1.41 1998/03/06 22:19:35 wessels Exp $ + * $Id: globals.h,v 1.42 1998/03/07 23:43:07 rousskov Exp $ */ extern FILE *debug_log; /* NULL */ @@ -98,6 +98,7 @@ extern double request_failure_ratio; /* 0.0 */ extern int store_hash_buckets; /* 0 */ extern hash_table *store_table; /* NULL */ extern dlink_list store_list; +extern const String StringNull; /* { 0, 0, NULL } */ extern int hot_obj_count; /* 0 */ #ifdef HAVE_SYSLOG diff --git a/src/http.cc b/src/http.cc index 2b988be22a..ad2543d64b 100644 --- a/src/http.cc +++ b/src/http.cc @@ -1,6 +1,6 @@ /* - * $Id: http.cc,v 1.249 1998/03/07 05:48:38 wessels Exp $ + * $Id: http.cc,v 1.250 1998/03/07 23:43:07 rousskov Exp $ * * DEBUG: section 11 Hypertext Transfer Protocol (HTTP) * AUTHOR: Harvest Derived @@ -604,7 +604,7 @@ httpBuildRequestHeader(request_t * request, xstrncpy(viabuf, "Via: ", 4096); snprintf(ybuf, YBUF_SZ, "%s %s HTTP/1.0", RequestMethodStr[request->method], - *request->urlpath ? request->urlpath : "/"); + strLen(request->urlpath) ? strBuf(request->urlpath) : "/"); httpAppendRequestHeader(hdr_out, ybuf, &len, out_sz, 1); /* Add IMS header */ if (entry && entry->lastmod && request->method == METHOD_GET) { @@ -682,8 +682,8 @@ httpBuildRequestHeader(request_t * request, url = entry ? storeUrl(entry) : urlCanonical(orig_request, NULL); snprintf(ybuf, YBUF_SZ, "Cache-control: Max-age=%d", (int) getMaxAge(url)); httpAppendRequestHeader(hdr_out, ybuf, &len, out_sz, 1); - if (request->urlpath[0]) - assert(strstr(url, request->urlpath)); + if (strLen(request->urlpath)) + assert(strstr(url, strBuf(request->urlpath))); } /* maybe append Connection: Keep-Alive */ if (EBIT_TEST(flags, HTTP_KEEPALIVE)) { @@ -723,7 +723,7 @@ httpSendRequest(int fd, void *data) peer *p = httpState->peer; debug(11, 5) ("httpSendRequest: FD %d: httpState %p.\n", fd, httpState); - buflen = strlen(req->urlpath); + buflen = strLen(req->urlpath); if (req->headers) buflen += req->headers_sz + 1; buflen += 512; /* lots of extra */ @@ -813,7 +813,11 @@ httpBuildState(int fd, StoreEntry * entry, request_t * orig_request, peer * e) request->method = orig_request->method; xstrncpy(request->host, e->host, SQUIDHOSTNAMELEN); request->port = e->http_port; +#if 0 xstrncpy(request->urlpath, storeUrl(entry), MAX_URL); +#else + stringReset(&request->urlpath, storeUrl(entry)); +#endif httpState->request = requestLink(request); httpState->peer = e; httpState->orig_request = requestLink(orig_request); diff --git a/src/icp_v2.cc b/src/icp_v2.cc index aa95c75d86..870acc9037 100644 --- a/src/icp_v2.cc +++ b/src/icp_v2.cc @@ -250,8 +250,10 @@ icpHandleIcpV2(int fd, struct sockaddr_in from, char *buf, int len) header.opcode, inet_ntoa(from.sin_addr)); break; } - if (icp_request) + if (icp_request) { + stringClean(&icp_request->urlpath); memFree(MEM_REQUEST_T, icp_request); + } } #ifdef ICP_PKT_DUMP diff --git a/src/icp_v3.cc b/src/icp_v3.cc index f8dde47aa6..e657002e4b 100644 --- a/src/icp_v3.cc +++ b/src/icp_v3.cc @@ -112,6 +112,8 @@ icpHandleIcpV3(int fd, struct sockaddr_in from, char *buf, int len) header.opcode, inet_ntoa(from.sin_addr)); break; } - if (icp_request) + if (icp_request) { + stringClean(&icp_request->urlpath); memFree(MEM_REQUEST_T, icp_request); + } } diff --git a/src/protos.h b/src/protos.h index 2f8e89ec5f..8ac1519ca3 100644 --- a/src/protos.h +++ b/src/protos.h @@ -21,7 +21,11 @@ extern void aclDestroyAcls(acl **); extern void aclParseAccessLine(struct _acl_access **); extern void aclParseAclLine(acl **); extern struct _acl *aclFindByName(const char *name); +#if 0 extern char *aclGetDenyInfoUrl(struct _acl_deny_info_list **, const char *name); +#else +extern int aclGetDenyInfoPage(acl_deny_info_list ** head, const char *name); +#endif extern void aclParseDenyInfoLine(struct _acl_deny_info_list **); extern void aclDestroyDenyInfoList(struct _acl_deny_info_list **); extern void aclDestroyRegexList(struct _relist *data); @@ -262,19 +266,23 @@ extern void httpBodyPackInto(const HttpBody * body, Packer * p); extern void httpHdrCcInitModule(); extern HttpHdrCc *httpHdrCcCreate(); extern HttpHdrCc *httpHdrCcParseCreate(const char *str); -extern void httpHdrCcDestroy(HttpHdrCc * scc); -extern HttpHdrCc *httpHdrCcDup(HttpHdrCc * scc); -extern void httpHdrCcPackValueInto(HttpHdrCc * scc, Packer * p); -extern void httpHdrCcJoinWith(HttpHdrCc * scc, HttpHdrCc * new_scc); -extern void httpHdrCcUpdateStats(const HttpHdrCc * scc, StatHist * hist); +extern void httpHdrCcDestroy(HttpHdrCc * cc); +extern HttpHdrCc *httpHdrCcDup(const HttpHdrCc * cc); +extern void httpHdrCcPackValueInto(const HttpHdrCc * cc, Packer * p); +extern void httpHdrCcJoinWith(HttpHdrCc * cc, const HttpHdrCc * new_cc); +extern void httpHdrCcUpdateStats(const HttpHdrCc * cc, StatHist * hist); extern void httpHdrCcStatDumper(StoreEntry * sentry, int idx, double val, double size, int count); /* Http Range Header Field */ extern HttpHdrRange *httpHdrRangeParseCreate(const char *range_spec); /* returns true if ranges are valid; inits HttpHdrRange */ -extern int httpHdrRangeParseInit(HttpHdrRange * range, const char *range_spec); -extern void httpHdrRangeDestroy(HttpHdrRange * range); - +extern int httpHdrRangeParseInit(HttpHdrRange *range, const char *range_spec); +extern void httpHdrRangeDestroy(HttpHdrRange *range); +extern HttpHdrRange *httpHdrRangeDup(const HttpHdrRange * range); +extern void httpHdrRangePackValueInto(const HttpHdrRange * range, Packer * p); +extern void httpHdrRangeJoinWith(HttpHdrRange * range, const HttpHdrRange * new_range); +/* iterate through specs */ +extern int httpHdrRangeGetSpec(const HttpHdrRange *range, HttpHdrRangeSpec *spec, int *pos); /* Http Header Tools */ @@ -310,6 +318,7 @@ extern void httpHeaderAddExt(HttpHeader * hdr, const char *name, const char *val extern int httpHeaderGetInt(const HttpHeader * hdr, http_hdr_type id); extern time_t httpHeaderGetTime(const HttpHeader * hdr, http_hdr_type id); extern HttpHdrCc *httpHeaderGetCc(const HttpHeader * hdr); +extern HttpHdrRange *httpHeaderGetRange(const HttpHeader * hdr); extern const char *httpHeaderGetStr(const HttpHeader * hdr, http_hdr_type id); int httpHeaderDelFields(HttpHeader * hdr, const char *name); /* store report about current header usage and other stats */ @@ -533,23 +542,30 @@ void statHistDump(const StatHist * H, StoreEntry * sentry, StatHistBinDumper bd) void statHistLogInit(StatHist * H, int capacity, double min, double max); void statHistEnumInit(StatHist * H, int last_enum); +/* MemMeter */ +#define memMeterCheckHWater(m) { if ((m).hwater < (m).level) (m).hwater = (m).level; } +#define memMeterInc(m) { (m).level++; memMeterCheckHWater(m); } +#define memMeterDec(m) { (m).level--; memMeterCheckHWater(m); } +#define memMeterAdd(m, sz) { (m).level += (sz); memMeterCheckHWater(m); } +#define memMeterDel(m, sz) { (m).level -= (sz); memMeterCheckHWater(m); } +/* mem */ extern void memInit(void); extern void memClean(); extern void memInitModule(); extern void memCleanModule(); extern void memConfigure(); extern void *memAllocate(mem_type); +extern void *memAllocBuf(size_t net_size, size_t *gross_size); extern void memFree(mem_type, void *); +extern void memFreeBuf(size_t size, void *); extern void memFree4K(void *); extern void memFree8K(void *); extern void memFreeDISK(void *); extern int memInUse(mem_type); +extern size_t memTotalAllocated(); -extern DynPool *dynPoolCreate(); -extern void dynPoolDestroy(DynPool * pool); -extern void *dynPoolAlloc(DynPool * pool, size_t size); -extern void dynPoolFree(DynPool * pool, void *obj, size_t size); +/* MemPool */ extern MemPool *memPoolCreate(const char *label, size_t obj_size); extern void memPoolDestroy(MemPool * pool); extern void *memPoolAlloc(MemPool * pool); @@ -798,10 +814,11 @@ extern peer_t parseNeighborType(const char *s); extern HttpReply *errorBuildReply(ErrorState * err); extern void errorSend(int fd, ErrorState *); extern void errorAppendEntry(StoreEntry *, ErrorState *); -void errorStateFree(ErrorState * err); +extern void errorStateFree(ErrorState * err); extern void errorInitialize(void); +extern int errorReservePageId(const char *page_name); extern void errorFree(void); -extern ErrorState *errorCon(err_type, http_status); +extern ErrorState *errorCon(int type, http_status); extern void pconnPush(int, const char *host, u_short port); extern int pconnPop(const char *host, u_short port); @@ -819,6 +836,23 @@ extern double gb_to_double(const gb_t *); extern const char *gb_to_str(const gb_t *); extern void gb_flush(gb_t *); /* internal, do not use this */ +/* String */ +#define strLen(s) ((const int)(s).len) +#define strBuf(s) ((const char*)(s).buf) +#define strChr(s,ch) ((const char*)strchr(strBuf(s), (ch))) +#define strRChr(s,ch) ((const char*)strrchr(strBuf(s), (ch))) +#define strStr(s,str) ((const char*)strstr(strBuf(s), (str))) +#define strCmp(s,str) strcmp(strBuf(s), (str)) +#define strSet(s,ptr,ch) (s).buf[ptr-(s).buf] = (ch) +#define strCut(s,pos) (s).buf[pos] = '\0' +/* #define strCat(s,str) stringAppend(&(s), (str), strlen(str)+1) */ +extern void stringInit(String *s, const char *str); +extern String stringDup(const String *s); +extern void stringClean(String *s); +extern void stringReset(String *s, const char *str); +/* extern void stringAppend(String *s, const char *buf, size_t size); */ +/* extern void stringAppendf(String *s, const char *fmt, ...); */ + /* * prototypes for system functions missing from system includes */ diff --git a/src/squid.h b/src/squid.h index 2cd8d07a99..9b72606bbc 100644 --- a/src/squid.h +++ b/src/squid.h @@ -1,6 +1,6 @@ /* - * $Id: squid.h,v 1.161 1998/03/05 20:55:59 rousskov Exp $ + * $Id: squid.h,v 1.162 1998/03/07 23:43:10 rousskov Exp $ * * AUTHOR: Duane Wessels * @@ -31,6 +31,9 @@ #ifndef SQUID_H #define SQUID_H +/* @?@ @?@ tmp hack until coredumps are gone */ +#define OLD_POST_CODE 1 + #include "config.h" /* diff --git a/src/structs.h b/src/structs.h index be064fac8b..ff33f763c2 100644 --- a/src/structs.h +++ b/src/structs.h @@ -30,7 +30,8 @@ struct _acl_proxy_auth { }; struct _acl_deny_info_list { - char url[MAX_URL]; + int err_page_id; + char *err_page_name; acl_name_list *acl_list; acl_deny_info_list *next; }; @@ -45,6 +46,13 @@ struct _acl_arp_data { #endif +struct _String { + /* never reference these directly! */ + unsigned short int size; /* buffer size; 64K limit */ + unsigned short int len; /* current length */ + char *buf; +}; + #if SQUID_SNMP struct _snmpconf { char *line; @@ -978,7 +986,11 @@ struct _request_t { char login[MAX_LOGIN_SZ]; char host[SQUIDHOSTNAMELEN + 1]; u_short port; +#if 0 /* trying new interface */ char urlpath[MAX_URL]; +#else + String urlpath; +#endif int link_count; /* free when zero */ int flags; time_t max_age; @@ -1021,6 +1033,7 @@ struct _CommWriteStateData { struct _ErrorState { err_type type; + int page_id; http_status http_status; request_t *request; char *url; @@ -1122,6 +1135,28 @@ struct _storeSwapLogData { unsigned char key[MD5_DIGEST_CHARS]; }; +/* object to track per-action memory usage (e.g. #idle objects) */ +struct _MemMeter { + size_t level; /* current level (count or volume) */ + size_t hwater; /* high water mark */ +}; + +/* object to track per-pool memory usage (alloc = inuse+idle) */ +struct _MemPoolMeter { + MemMeter alloc; + MemMeter inuse; + MemMeter idle; + gb_t saved; +}; + +/* a pool is a [growing] space for objects of the same size */ +struct _MemPool { + const char *label; + size_t obj_size; + Stack pstack; /* stack for free pointers */ + MemPoolMeter meter; +}; + struct _ClientInfo { char *key; struct client_info *next; diff --git a/src/typedefs.h b/src/typedefs.h index 39aa5aa7a2..32298bb785 100644 --- a/src/typedefs.h +++ b/src/typedefs.h @@ -90,7 +90,6 @@ typedef struct _mem_node mem_node; typedef struct _mem_hdr mem_hdr; typedef struct _store_client store_client; typedef struct _MemObject MemObject; -typedef struct _MemPool MemPool; typedef struct _StoreEntry StoreEntry; typedef struct _SwapDir SwapDir; typedef struct _request_t request_t; @@ -106,6 +105,10 @@ typedef struct _tlv tlv; typedef struct _storeSwapLogData storeSwapLogData; typedef struct _cacheSwap cacheSwap; typedef struct _StatHist StatHist; +typedef struct _String String; +typedef struct _MemMeter MemMeter; +typedef struct _MemPoolMeter MemPoolMeter; +typedef struct _MemPool MemPool; typedef struct _ClientInfo ClientInfo; /* define AIOCB even without USE_ASYNC_IO */ @@ -157,3 +160,6 @@ typedef int Ctx; /* in case we want to change it later */ typedef size_t mb_size_t; + +/* iteration for HttpHdrRange */ +typedef int HttpHdrRangePos; diff --git a/src/url.cc b/src/url.cc index 600c54f4d3..c5b9cc24ee 100644 --- a/src/url.cc +++ b/src/url.cc @@ -1,6 +1,6 @@ /* - * $Id: url.cc,v 1.82 1998/03/03 00:31:17 rousskov Exp $ + * $Id: url.cc,v 1.83 1998/03/07 23:43:12 rousskov Exp $ * * DEBUG: section 23 URL Parsing * AUTHOR: Duane Wessels @@ -273,7 +273,11 @@ urlParse(method_t method, char *url) xstrncpy(request->host, host, SQUIDHOSTNAMELEN); xstrncpy(request->login, login, MAX_LOGIN_SZ); request->port = (u_short) port; +#if OLD_CODE xstrncpy(request->urlpath, urlpath, MAX_URL); +#else + stringReset(&request->urlpath, urlpath); +#endif request->max_age = -1; request->max_forwards = -1; return request; @@ -287,7 +291,11 @@ urnParse(method_t method, char *urn) request = memAllocate(MEM_REQUEST_T); request->method = method; request->protocol = PROTO_URN; +#if OLD_CODE xstrncpy(request->urlpath, &urn[4], MAX_URL); +#else + stringReset(&request->urlpath, urn+4); +#endif request->max_age = -1; request->max_forwards = -1; return request; @@ -301,7 +309,7 @@ urlCanonical(const request_t * request, char *buf) if (buf == NULL) buf = urlbuf; if (request->protocol == PROTO_URN) { - snprintf(buf, MAX_URL, "urn:%s", request->urlpath); + snprintf(buf, MAX_URL, "urn:%s", strBuf(request->urlpath)); } else switch (request->method) { case METHOD_CONNECT: @@ -317,7 +325,7 @@ urlCanonical(const request_t * request, char *buf) *request->login ? "@" : null_string, request->host, portbuf, - request->urlpath); + strBuf(request->urlpath)); break; } return buf; @@ -331,7 +339,7 @@ urlCanonicalClean(const request_t * request) LOCAL_ARRAY(char, loginbuf, MAX_LOGIN_SZ + 1); char *t; if (request->protocol == PROTO_URN) { - snprintf(buf, MAX_URL, "urn:%s", request->urlpath); + snprintf(buf, MAX_URL, "urn:%s", strBuf(request->urlpath)); } else switch (request->method) { case METHOD_CONNECT: @@ -353,7 +361,7 @@ urlCanonicalClean(const request_t * request) loginbuf, request->host, portbuf, - request->urlpath); + strBuf(request->urlpath)); if ((t = strchr(buf, '?'))) *t = '\0'; break; @@ -378,6 +386,7 @@ requestUnlink(request_t * request) return; safe_free(request->headers); safe_free(request->body); + stringClean(&request->urlpath); memFree(MEM_REQUEST_T, request); } diff --git a/src/urn.cc b/src/urn.cc index d18878010c..30e92deff4 100644 --- a/src/urn.cc +++ b/src/urn.cc @@ -63,6 +63,7 @@ urnFindMinRtt(wordlist * urls, method_t m, int *rtt_ret) if (rtt == 0) { debug(52, 3) ("Pinging %s\n", r->host); netdbPingSite(r->host); + stringClean(&r->urlpath); memFree(MEM_REQUEST_T, r); continue; } @@ -73,6 +74,7 @@ urnFindMinRtt(wordlist * urls, method_t m, int *rtt_ret) continue; min_rtt = rtt; min_w = w; + stringClean(&r->urlpath); memFree(MEM_REQUEST_T, r); } if (rtt_ret) @@ -89,7 +91,7 @@ urnStart(request_t * r, StoreEntry * e) LOCAL_ARRAY(char, urlres, 4096); request_t *urlres_r = NULL; const cache_key *k; - char *t; + const char *t; char *host; UrnState *urnState; StoreEntry *urlres_e; @@ -100,20 +102,26 @@ urnStart(request_t * r, StoreEntry * e) urnState->request = requestLink(r); cbdataAdd(urnState, MEM_NONE); storeLockObject(urnState->entry); - if (strncasecmp(r->urlpath, "menu.", 5) == 0) { + if (strncasecmp(strBuf(r->urlpath), "menu.", 5) == 0) { + char *new_path = xstrdup(strBuf(r->urlpath) + 5); EBIT_SET(urnState->flags, URN_FORCE_MENU); - t = xstrdup(r->urlpath + 5); +#if OLD_CODE + t = xstrdup(strBuf(r->urlpath) + 5); xstrncpy(r->urlpath, t, MAX_URL); xfree(t); +#else + stringReset(&r->urlpath, new_path); + xfree(new_path); +#endif } - if ((t = strchr(r->urlpath, ':')) != NULL) { - *t = '\0'; - host = xstrdup(r->urlpath); - *t = ':'; + if ((t = strChr(r->urlpath, ':')) != NULL) { + strSet(r->urlpath, t, '\0'); + host = xstrdup(strBuf(r->urlpath)); + strSet(r->urlpath, t, ':'); } else { - host = xstrdup(r->urlpath); + host = xstrdup(strBuf(r->urlpath)); } - snprintf(urlres, 4096, "http://%s/uri-res/N2L?urn:%s", host, r->urlpath); + snprintf(urlres, 4096, "http://%s/uri-res/N2L?urn:%s", host, strBuf(r->urlpath)); safe_free(host); k = storeKeyPublic(urlres, METHOD_GET); urlres_r = urlParse(METHOD_GET, urlres); @@ -147,7 +155,9 @@ urnStart(request_t * r, StoreEntry * e) static void urnHandleReply(void *data, char *buf, ssize_t size) { +#if 0 LOCAL_ARRAY(char, line, 4096); +#endif UrnState *urnState = data; StoreEntry *e = urnState->entry; StoreEntry *urlres_e = urnState->urlres_e; @@ -156,8 +166,12 @@ urnHandleReply(void *data, char *buf, ssize_t size) wordlist *w; wordlist *urls; wordlist *min_w; +#if 0 int l; String *S; +#else + MemBuf mb; +#endif ErrorState *err; double tmprtt; StoreEntry *tmpentry; @@ -216,46 +230,43 @@ urnHandleReply(void *data, char *buf, ssize_t size) } min_w = urnFindMinRtt(urls, urnState->request->method, NULL); storeBuffer(e); - S = stringCreate(1024); - l = snprintf(line, 4096, + memBufDefInit(&mb); + memBufPrintf(&mb, "Select URL for %s\n" "

Select URL for %s

\n" "\n", storeUrl(e), storeUrl(e)); - stringAppend(S, line, l); for (w = urls; w; w = w->next) { request_t *tmpr = urlParse(urnState->request->method, w->key); const cache_key *tmpk = storeKeyPublic(w->key, urnState->request->method); tmpentry = storeGet(tmpk); if (tmpr && tmpr->host && (tmprtt = netdbHostRtt(tmpr->host))) - l = snprintf(line, 4096, "\n", + memBufPrintf(&mb, "\n", w->key, w->key, tmprtt, tmpentry ? " [cached]" : " "); else - l = snprintf(line, 4096, "", w->key, w->key); - stringAppend(S, line, l); + memBufPrintf(&mb, "", w->key, w->key); } - l = snprintf(line, 4096, + memBufPrintf(&mb, "
%s%4.0f ms%s
%s%4.0f ms%s
%s
%s
" "
\n" "
\n" - "Generated by %s/%s@%s\n" + "Generated by %s@%s\n" "
\n", - appname, version_string, getMyHostname()); - stringAppend(S, line, l); + full_appname_string, getMyHostname()); rep = e->mem_obj->reply; httpReplyReset(rep); httpReplySetHeaders(rep, 1.0, HTTP_MOVED_TEMPORARILY, NULL, - "text/html", stringLength(S), 0, squid_curtime); + "text/html", mb.size, 0, squid_curtime); if (EBIT_TEST(urnState->flags, URN_FORCE_MENU)) { debug(51, 3) ("urnHandleReply: forcing menu\n"); } else if (min_w) { httpHeaderSetStr(&rep->hdr, HDR_LOCATION, min_w->key); } - httpBodySet(&rep->body, S->buf, stringLength(S) + 1, NULL); + httpBodySet(&rep->body, mb.buf, mb.size+1, memBufFreeFunc(&mb)); httpReplySwapOut(rep, e); storeComplete(e); memFree(MEM_4K_BUF, buf); wordlistDestroy(&urls); - stringFree(S); + /* mb was frozen with memBufFreeFunc call, so we must not clean it */ storeUnregister(urlres_e, urnState); storeUnlockObject(urlres_e); storeUnlockObject(urnState->entry);