4 * DEBUG: section 18 Cache Manager Statistics
5 * AUTHOR: Harvest Derived
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 #include "StoreClient.h"
38 #include "AuthUserRequest.h"
39 #include "CacheManager.h"
41 #include "HttpRequest.h"
42 #include "MemObject.h"
48 #include "client_side_request.h"
49 #include "client_side.h"
51 #include "SquidTime.h"
53 /* these are included because they expose stats calls */
54 /* TODO: provide a self registration mechanism for those classes
55 * to use during static construction
58 #include "StoreSearch.h"
60 #define DEBUG_OPENFD 1
62 typedef int STOBJFLT(const StoreEntry
*);
64 class StatObjectsState
70 StoreSearchPointer theSearch
;
73 CBDATA_CLASS2(StatObjectsState
);
78 static const char *describeStatuses(const StoreEntry
*);
79 static const char *describeTimestamps(const StoreEntry
*);
80 static void statAvgTick(void *notused
);
81 static void statAvgDump(StoreEntry
*, int minutes
, int hours
);
83 static void statGraphDump(StoreEntry
*);
85 static void statCountersInit(StatCounters
*);
86 static void statCountersInitSpecial(StatCounters
*);
87 static void statCountersClean(StatCounters
*);
88 static void statCountersCopy(StatCounters
* dest
, const StatCounters
* orig
);
89 static double statPctileSvc(double, int, int);
90 static void statStoreEntry(MemBuf
* mb
, StoreEntry
* e
);
91 static double statCPUUsage(int minutes
);
92 static OBJH stat_io_get
;
93 static OBJH stat_objects_get
;
94 static OBJH stat_vmobjects_get
;
96 static OBJH statOpenfdObj
;
98 static EVH statObjects
;
100 static OBJH statCountersDump
;
101 static OBJH statPeerSelect
;
102 static OBJH statDigestBlob
;
103 static OBJH statAvg5min
;
104 static OBJH statAvg60min
;
105 static OBJH statUtilization
;
106 static OBJH statCountersHistograms
;
107 static OBJH statClientRequests
;
109 #ifdef XMALLOC_STATISTICS
110 static void info_get_mallstat(int, int, int, void *);
111 static double xm_time
;
112 static double xm_deltat
;
115 StatCounters CountHist
[N_COUNT_HIST
];
116 static int NCountHist
= 0;
117 static StatCounters CountHourHist
[N_COUNT_HOUR_HIST
];
118 static int NCountHourHist
= 0;
119 CBDATA_CLASS_INIT(StatObjectsState
);
121 extern unsigned int mem_pool_alloc_calls
;
122 extern unsigned int mem_pool_free_calls
;
125 statUtilization(StoreEntry
* e
)
127 storeAppendPrintf(e
, "Cache Utilisation:\n");
128 storeAppendPrintf(e
, "\n");
129 storeAppendPrintf(e
, "Last 5 minutes:\n");
132 statAvgDump(e
, 5, 0);
134 storeAppendPrintf(e
, "(no values recorded yet)\n");
136 storeAppendPrintf(e
, "\n");
138 storeAppendPrintf(e
, "Last 15 minutes:\n");
140 if (NCountHist
>= 15)
141 statAvgDump(e
, 15, 0);
143 storeAppendPrintf(e
, "(no values recorded yet)\n");
145 storeAppendPrintf(e
, "\n");
147 storeAppendPrintf(e
, "Last hour:\n");
149 if (NCountHist
>= 60)
150 statAvgDump(e
, 60, 0);
152 storeAppendPrintf(e
, "(no values recorded yet)\n");
154 storeAppendPrintf(e
, "\n");
156 storeAppendPrintf(e
, "Last 8 hours:\n");
158 if (NCountHourHist
>= 8)
159 statAvgDump(e
, 0, 8);
161 storeAppendPrintf(e
, "(no values recorded yet)\n");
163 storeAppendPrintf(e
, "\n");
165 storeAppendPrintf(e
, "Last day:\n");
167 if (NCountHourHist
>= 24)
168 statAvgDump(e
, 0, 24);
170 storeAppendPrintf(e
, "(no values recorded yet)\n");
172 storeAppendPrintf(e
, "\n");
174 storeAppendPrintf(e
, "Last 3 days:\n");
176 if (NCountHourHist
>= 72)
177 statAvgDump(e
, 0, 72);
179 storeAppendPrintf(e
, "(no values recorded yet)\n");
181 storeAppendPrintf(e
, "\n");
183 storeAppendPrintf(e
, "Totals since cache startup:\n");
189 stat_io_get(StoreEntry
* sentry
)
193 storeAppendPrintf(sentry
, "HTTP I/O\n");
194 storeAppendPrintf(sentry
, "number of reads: %d\n", IOStats
.Http
.reads
);
195 storeAppendPrintf(sentry
, "Read Histogram:\n");
197 for (i
= 0; i
< 16; i
++) {
198 storeAppendPrintf(sentry
, "%5d-%5d: %9d %2d%%\n",
199 i
? (1 << (i
- 1)) + 1 : 1,
201 IOStats
.Http
.read_hist
[i
],
202 percent(IOStats
.Http
.read_hist
[i
], IOStats
.Http
.reads
));
205 storeAppendPrintf(sentry
, "\n");
206 storeAppendPrintf(sentry
, "FTP I/O\n");
207 storeAppendPrintf(sentry
, "number of reads: %d\n", IOStats
.Ftp
.reads
);
208 storeAppendPrintf(sentry
, "Read Histogram:\n");
210 for (i
= 0; i
< 16; i
++) {
211 storeAppendPrintf(sentry
, "%5d-%5d: %9d %2d%%\n",
212 i
? (1 << (i
- 1)) + 1 : 1,
214 IOStats
.Ftp
.read_hist
[i
],
215 percent(IOStats
.Ftp
.read_hist
[i
], IOStats
.Ftp
.reads
));
218 storeAppendPrintf(sentry
, "\n");
219 storeAppendPrintf(sentry
, "Gopher I/O\n");
220 storeAppendPrintf(sentry
, "number of reads: %d\n", IOStats
.Gopher
.reads
);
221 storeAppendPrintf(sentry
, "Read Histogram:\n");
223 for (i
= 0; i
< 16; i
++) {
224 storeAppendPrintf(sentry
, "%5d-%5d: %9d %2d%%\n",
225 i
? (1 << (i
- 1)) + 1 : 1,
227 IOStats
.Gopher
.read_hist
[i
],
228 percent(IOStats
.Gopher
.read_hist
[i
], IOStats
.Gopher
.reads
));
231 storeAppendPrintf(sentry
, "\n");
235 describeStatuses(const StoreEntry
* entry
)
237 LOCAL_ARRAY(char, buf
, 256);
238 snprintf(buf
, 256, "%-13s %-13s %-12s %-12s",
239 storeStatusStr
[entry
->store_status
],
240 memStatusStr
[entry
->mem_status
],
241 swapStatusStr
[entry
->swap_status
],
242 pingStatusStr
[entry
->ping_status
]);
247 storeEntryFlags(const StoreEntry
* entry
)
249 LOCAL_ARRAY(char, buf
, 256);
250 int flags
= (int) entry
->flags
;
254 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
255 strcat(buf
, "SPECIAL,");
257 if (EBIT_TEST(flags
, ENTRY_REVALIDATE
))
258 strcat(buf
, "REVALIDATE,");
260 if (EBIT_TEST(flags
, DELAY_SENDING
))
261 strcat(buf
, "DELAY_SENDING,");
263 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
264 strcat(buf
, "RELEASE_REQUEST,");
266 if (EBIT_TEST(flags
, REFRESH_REQUEST
))
267 strcat(buf
, "REFRESH_REQUEST,");
269 if (EBIT_TEST(flags
, ENTRY_CACHABLE
))
270 strcat(buf
, "CACHABLE,");
272 if (EBIT_TEST(flags
, ENTRY_DISPATCHED
))
273 strcat(buf
, "DISPATCHED,");
275 if (EBIT_TEST(flags
, KEY_PRIVATE
))
276 strcat(buf
, "PRIVATE,");
278 if (EBIT_TEST(flags
, ENTRY_FWD_HDR_WAIT
))
279 strcat(buf
, "FWD_HDR_WAIT,");
281 if (EBIT_TEST(flags
, ENTRY_NEGCACHED
))
282 strcat(buf
, "NEGCACHED,");
284 if (EBIT_TEST(flags
, ENTRY_VALIDATED
))
285 strcat(buf
, "VALIDATED,");
287 if (EBIT_TEST(flags
, ENTRY_BAD_LENGTH
))
288 strcat(buf
, "BAD_LENGTH,");
290 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
291 strcat(buf
, "ABORTED,");
293 if ((t
= strrchr(buf
, ',')))
300 describeTimestamps(const StoreEntry
* entry
)
302 LOCAL_ARRAY(char, buf
, 256);
303 snprintf(buf
, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
304 (int) entry
->timestamp
,
305 (int) entry
->lastref
,
306 (int) entry
->lastmod
,
307 (int) entry
->expires
);
312 statStoreEntry(MemBuf
* mb
, StoreEntry
* e
)
314 MemObject
*mem
= e
->mem_obj
;
315 mb
->Printf("KEY %s\n", e
->getMD5Text());
316 mb
->Printf("\t%s\n", describeStatuses(e
));
317 mb
->Printf("\t%s\n", storeEntryFlags(e
));
318 mb
->Printf("\t%s\n", describeTimestamps(e
));
319 mb
->Printf("\t%d locks, %d clients, %d refs\n",
321 storePendingNClients(e
),
323 mb
->Printf("\tSwap Dir %d, File %#08X\n",
324 e
->swap_dirn
, e
->swap_filen
);
332 /* process objects list */
334 statObjects(void *data
)
336 StatObjectsState
*state
= static_cast<StatObjectsState
*>(data
);
339 if (state
->theSearch
->isDone()) {
340 state
->sentry
->complete();
341 state
->sentry
->unlock();
344 } else if (EBIT_TEST(state
->sentry
->flags
, ENTRY_ABORTED
)) {
345 state
->sentry
->unlock();
348 } else if (state
->sentry
->checkDeferRead(-1)) {
349 eventAdd("statObjects", statObjects
, state
, 0.1, 1);
353 state
->sentry
->buffer();
354 size_t statCount
= 0;
358 while (statCount
++ < static_cast<size_t>(Config
.Store
.objectsPerBucket
) && state
->
360 e
= state
->theSearch
->currentItem();
362 if (state
->filter
&& 0 == state
->filter(e
))
365 statStoreEntry(&mb
, e
);
369 state
->sentry
->append(mb
.buf
, mb
.size
);
372 eventAdd("statObjects", statObjects
, state
, 0.0, 1);
376 statObjectsStart(StoreEntry
* sentry
, STOBJFLT
* filter
)
378 StatObjectsState
*state
= new StatObjectsState
;
379 state
->sentry
= sentry
;
380 state
->filter
= filter
;
383 state
->theSearch
= Store::Root().search(NULL
, NULL
);
385 eventAdd("statObjects", statObjects
, state
, 0.0, 1);
389 stat_objects_get(StoreEntry
* sentry
)
391 statObjectsStart(sentry
, NULL
);
395 statObjectsVmFilter(const StoreEntry
* e
)
397 return e
->mem_obj
? 1 : 0;
401 stat_vmobjects_get(StoreEntry
* sentry
)
403 statObjectsStart(sentry
, statObjectsVmFilter
);
408 statObjectsOpenfdFilter(const StoreEntry
* e
)
410 if (e
->mem_obj
== NULL
)
413 if (e
->mem_obj
->swapout
.sio
== NULL
)
420 statOpenfdObj(StoreEntry
* sentry
)
422 statObjectsStart(sentry
, statObjectsOpenfdFilter
);
427 #ifdef XMALLOC_STATISTICS
429 info_get_mallstat(int size
, int number
, int oldnum
, void *data
)
431 StoreEntry
*sentry
= (StoreEntry
*)data
;
434 storeAppendPrintf(sentry
, "%d\t %d\t %d\t %.1f\n", size
, number
, number
- oldnum
, xdiv((number
- oldnum
), xm_deltat
));
440 info_get(StoreEntry
* sentry
)
443 struct rusage rusage
;
446 #if HAVE_MSTATS && HAVE_GNUMALLOC_H
449 #elif HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
455 runtime
= tvSubDsec(squid_start
, current_time
);
460 storeAppendPrintf(sentry
, "Squid Object Cache: Version %s\n",
465 if (WIN32_run_mode
== _WIN_SQUID_RUN_MODE_SERVICE
) {
466 storeAppendPrintf(sentry
,"\nRunning as %s Windows System Service on %s\n",
467 WIN32_Service_name
, WIN32_OS_string
);
468 storeAppendPrintf(sentry
,"Service command line is: %s\n", WIN32_Service_Command_Line
);
470 storeAppendPrintf(sentry
,"Running on %s\n",WIN32_OS_string
);
474 storeAppendPrintf(sentry
, "Start Time:\t%s\n",
475 mkrfc1123(squid_start
.tv_sec
));
477 storeAppendPrintf(sentry
, "Current Time:\t%s\n",
478 mkrfc1123(current_time
.tv_sec
));
480 storeAppendPrintf(sentry
, "Connection information for %s:\n",APP_SHORTNAME
);
482 storeAppendPrintf(sentry
, "\tNumber of clients accessing cache:\t%u\n",
483 statCounter
.client_http
.clients
);
485 storeAppendPrintf(sentry
, "\tNumber of HTTP requests received:\t%u\n",
486 statCounter
.client_http
.requests
);
488 storeAppendPrintf(sentry
, "\tNumber of ICP messages received:\t%u\n",
489 statCounter
.icp
.pkts_recv
);
491 storeAppendPrintf(sentry
, "\tNumber of ICP messages sent:\t%u\n",
492 statCounter
.icp
.pkts_sent
);
494 storeAppendPrintf(sentry
, "\tNumber of queued ICP replies:\t%u\n",
495 statCounter
.icp
.replies_queued
);
499 storeAppendPrintf(sentry
, "\tNumber of HTCP messages received:\t%u\n",
500 statCounter
.htcp
.pkts_recv
);
502 storeAppendPrintf(sentry
, "\tNumber of HTCP messages sent:\t%u\n",
503 statCounter
.htcp
.pkts_sent
);
507 storeAppendPrintf(sentry
, "\tRequest failure ratio:\t%5.2f\n",
508 request_failure_ratio
);
510 storeAppendPrintf(sentry
, "\tAverage HTTP requests per minute since start:\t%.1f\n",
511 statCounter
.client_http
.requests
/ (runtime
/ 60.0));
513 storeAppendPrintf(sentry
, "\tAverage ICP messages per minute since start:\t%.1f\n",
514 (statCounter
.icp
.pkts_sent
+ statCounter
.icp
.pkts_recv
) / (runtime
/ 60.0));
516 storeAppendPrintf(sentry
, "\tSelect loop called: %d times, %0.3f ms avg\n",
517 statCounter
.select_loops
, 1000.0 * runtime
/ statCounter
.select_loops
);
519 storeAppendPrintf(sentry
, "Cache information for %s:\n",APP_SHORTNAME
);
521 storeAppendPrintf(sentry
, "\tHits as %% of all requests:\t5min: %3.1f%%, 60min: %3.1f%%\n",
522 statRequestHitRatio(5),
523 statRequestHitRatio(60));
525 storeAppendPrintf(sentry
, "\tHits as %% of bytes sent:\t5min: %3.1f%%, 60min: %3.1f%%\n",
527 statByteHitRatio(60));
529 storeAppendPrintf(sentry
, "\tMemory hits as %% of hit requests:\t5min: %3.1f%%, 60min: %3.1f%%\n",
530 statRequestHitMemoryRatio(5),
531 statRequestHitMemoryRatio(60));
533 storeAppendPrintf(sentry
, "\tDisk hits as %% of hit requests:\t5min: %3.1f%%, 60min: %3.1f%%\n",
534 statRequestHitDiskRatio(5),
535 statRequestHitDiskRatio(60));
537 storeAppendPrintf(sentry
, "\tStorage Swap size:\t%lu KB\n",
540 storeAppendPrintf(sentry
, "\tStorage Swap capacity:\t%4.1f%% used, %4.1f%% free\n",
541 dpercent(store_swap_size
, Store::Root().maxSize()),
542 dpercent((Store::Root().maxSize() - store_swap_size
), Store::Root().maxSize()));
545 storeAppendPrintf(sentry
, "\tStorage Mem size:\t%lu KB\n",
546 (unsigned long)mem_node::StoreMemSize() >> 10);
548 storeAppendPrintf(sentry
, "\tStorage Mem capacity:\t%4.1f%% used, %4.1f%% free\n",
549 dpercent(mem_node::InUseCount(), store_pages_max
),
550 dpercent((store_pages_max
- mem_node::InUseCount()), store_pages_max
));
552 storeAppendPrintf(sentry
, "\tMean Object Size:\t%0.2f KB\n",
553 n_disk_objects
? (double) store_swap_size
/ n_disk_objects
: 0.0);
555 storeAppendPrintf(sentry
, "\tRequests given to unlinkd:\t%d\n",
556 statCounter
.unlink
.requests
);
558 storeAppendPrintf(sentry
, "Median Service Times (seconds) 5 min 60 min:\n");
560 storeAppendPrintf(sentry
, "\tHTTP Requests (All): %8.5f %8.5f\n",
561 statPctileSvc(0.5, 5, PCTILE_HTTP
) / 1000.0,
562 statPctileSvc(0.5, 60, PCTILE_HTTP
) / 1000.0);
564 storeAppendPrintf(sentry
, "\tCache Misses: %8.5f %8.5f\n",
565 statPctileSvc(0.5, 5, PCTILE_MISS
) / 1000.0,
566 statPctileSvc(0.5, 60, PCTILE_MISS
) / 1000.0);
568 storeAppendPrintf(sentry
, "\tCache Hits: %8.5f %8.5f\n",
569 statPctileSvc(0.5, 5, PCTILE_HIT
) / 1000.0,
570 statPctileSvc(0.5, 60, PCTILE_HIT
) / 1000.0);
572 storeAppendPrintf(sentry
, "\tNear Hits: %8.5f %8.5f\n",
573 statPctileSvc(0.5, 5, PCTILE_NH
) / 1000.0,
574 statPctileSvc(0.5, 60, PCTILE_NH
) / 1000.0);
576 storeAppendPrintf(sentry
, "\tNot-Modified Replies: %8.5f %8.5f\n",
577 statPctileSvc(0.5, 5, PCTILE_NM
) / 1000.0,
578 statPctileSvc(0.5, 60, PCTILE_NM
) / 1000.0);
580 storeAppendPrintf(sentry
, "\tDNS Lookups: %8.5f %8.5f\n",
581 statPctileSvc(0.5, 5, PCTILE_DNS
) / 1000.0,
582 statPctileSvc(0.5, 60, PCTILE_DNS
) / 1000.0);
584 storeAppendPrintf(sentry
, "\tICP Queries: %8.5f %8.5f\n",
585 statPctileSvc(0.5, 5, PCTILE_ICP_QUERY
) / 1000000.0,
586 statPctileSvc(0.5, 60, PCTILE_ICP_QUERY
) / 1000000.0);
588 squid_getrusage(&rusage
);
590 cputime
= rusage_cputime(&rusage
);
592 storeAppendPrintf(sentry
, "Resource usage for %s:\n", APP_SHORTNAME
);
594 storeAppendPrintf(sentry
, "\tUP Time:\t%.3f seconds\n", runtime
);
596 storeAppendPrintf(sentry
, "\tCPU Time:\t%.3f seconds\n", cputime
);
598 storeAppendPrintf(sentry
, "\tCPU Usage:\t%.2f%%\n",
599 dpercent(cputime
, runtime
));
601 storeAppendPrintf(sentry
, "\tCPU Usage, 5 minute avg:\t%.2f%%\n",
604 storeAppendPrintf(sentry
, "\tCPU Usage, 60 minute avg:\t%.2f%%\n",
609 storeAppendPrintf(sentry
, "\tProcess Data Segment Size via sbrk(): %lu KB\n",
610 (unsigned long) (((char *) sbrk(0) - (char *) sbrk_start
) >> 10));
614 storeAppendPrintf(sentry
, "\tMaximum Resident Size: %d KB\n",
615 rusage_maxrss(&rusage
));
617 storeAppendPrintf(sentry
, "\tPage faults with physical i/o: %d\n",
618 rusage_pagefaults(&rusage
));
620 #if HAVE_MSTATS && HAVE_GNUMALLOC_H
624 storeAppendPrintf(sentry
, "Memory usage for %s via mstats():\n",APP_SHORTNAME
);
626 storeAppendPrintf(sentry
, "\tTotal space in arena: %6d KB\n",
627 ms
.bytes_total
>> 10);
629 storeAppendPrintf(sentry
, "\tTotal free: %6d KB %d%%\n",
630 ms
.bytes_free
>> 10, percent(ms
.bytes_free
, ms
.bytes_total
));
632 #elif HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
636 storeAppendPrintf(sentry
, "Memory usage for %s via mallinfo():\n",APP_SHORTNAME
);
638 storeAppendPrintf(sentry
, "\tTotal space in arena: %6ld KB\n",
639 (long)mp
.arena
>> 10);
641 storeAppendPrintf(sentry
, "\tOrdinary blocks: %6ld KB %6ld blks\n",
642 (long)mp
.uordblks
>> 10, (long)mp
.ordblks
);
644 storeAppendPrintf(sentry
, "\tSmall blocks: %6ld KB %6ld blks\n",
645 (long)mp
.usmblks
>> 10, (long)mp
.smblks
);
647 storeAppendPrintf(sentry
, "\tHolding blocks: %6ld KB %6ld blks\n",
648 (long)mp
.hblkhd
>> 10, (long)mp
.hblks
);
650 storeAppendPrintf(sentry
, "\tFree Small blocks: %6ld KB\n",
651 (long)mp
.fsmblks
>> 10);
653 storeAppendPrintf(sentry
, "\tFree Ordinary blocks: %6ld KB\n",
654 (long)mp
.fordblks
>> 10);
656 t
= mp
.uordblks
+ mp
.usmblks
+ mp
.hblkhd
;
658 storeAppendPrintf(sentry
, "\tTotal in use: %6d KB %d%%\n",
659 t
>> 10, percent(t
, mp
.arena
+ mp
.hblkhd
));
661 t
= mp
.fsmblks
+ mp
.fordblks
;
663 storeAppendPrintf(sentry
, "\tTotal free: %6d KB %d%%\n",
664 t
>> 10, percent(t
, mp
.arena
+ mp
.hblkhd
));
666 t
= mp
.arena
+ mp
.hblkhd
;
668 storeAppendPrintf(sentry
, "\tTotal size: %6d KB\n",
671 #if HAVE_STRUCT_MALLINFO_MXFAST
673 storeAppendPrintf(sentry
, "\tmax size of small blocks:\t%d\n", mp
.mxfast
);
675 storeAppendPrintf(sentry
, "\tnumber of small blocks in a holding block:\t%d\n",
678 storeAppendPrintf(sentry
, "\tsmall block rounding factor:\t%d\n", mp
.grain
);
680 storeAppendPrintf(sentry
, "\tspace (including overhead) allocated in ord. blks:\t%d\n"
683 storeAppendPrintf(sentry
, "\tnumber of ordinary blocks allocated:\t%d\n",
686 storeAppendPrintf(sentry
, "\tbytes used in maintaining the free tree:\t%d\n",
689 #endif /* HAVE_STRUCT_MALLINFO_MXFAST */
690 #endif /* HAVE_MALLINFO */
692 storeAppendPrintf(sentry
, "Memory accounted for:\n");
694 #if !(HAVE_MSTATS && HAVE_GNUMALLOC_H) && HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
696 storeAppendPrintf(sentry
, "\tTotal accounted: %6d KB %3d%%\n",
697 statMemoryAccounted() >> 10, percent(statMemoryAccounted(), t
));
701 storeAppendPrintf(sentry
, "\tTotal accounted: %6d KB\n",
702 statMemoryAccounted() >> 10);
706 MemPoolGlobalStats mp_stats
;
707 memPoolGetGlobalStats(&mp_stats
);
708 #if !(HAVE_MSTATS && HAVE_GNUMALLOC_H) && HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
710 storeAppendPrintf(sentry
, "\tmemPool accounted: %6d KB %3d%%\n",
711 (int) mp_stats
.TheMeter
->alloc
.level
>> 10,
712 percent(mp_stats
.TheMeter
->alloc
.level
, t
));
713 storeAppendPrintf(sentry
, "\tmemPool unaccounted: %6d KB %3d%%\n",
714 (t
- (int) mp_stats
.TheMeter
->alloc
.level
) >> 10,
715 percent((t
- mp_stats
.TheMeter
->alloc
.level
), t
));
718 storeAppendPrintf(sentry
, "\tmemPoolAlloc calls: %9.0f\n",
719 mp_stats
.TheMeter
->gb_saved
.count
);
720 storeAppendPrintf(sentry
, "\tmemPoolFree calls: %9.0f\n",
721 mp_stats
.TheMeter
->gb_freed
.count
);
724 storeAppendPrintf(sentry
, "File descriptor usage for %s:\n", APP_SHORTNAME
);
725 storeAppendPrintf(sentry
, "\tMaximum number of file descriptors: %4d\n",
727 storeAppendPrintf(sentry
, "\tLargest file desc currently in use: %4d\n",
729 storeAppendPrintf(sentry
, "\tNumber of file desc currently in use: %4d\n",
731 storeAppendPrintf(sentry
, "\tFiles queued for open: %4d\n",
733 storeAppendPrintf(sentry
, "\tAvailable number of file descriptors: %4d\n",
735 storeAppendPrintf(sentry
, "\tReserved number of file descriptors: %4d\n",
737 storeAppendPrintf(sentry
, "\tStore Disk files open: %4d\n",
740 storeAppendPrintf(sentry
, "Internal Data Structures:\n");
741 storeAppendPrintf(sentry
, "\t%6lu StoreEntries\n",
742 (unsigned long)StoreEntry::inUseCount());
743 storeAppendPrintf(sentry
, "\t%6lu StoreEntries with MemObjects\n",
744 (unsigned long)MemObject::inUseCount());
745 storeAppendPrintf(sentry
, "\t%6d Hot Object Cache Items\n",
747 storeAppendPrintf(sentry
, "\t%6d on-disk objects\n",
750 #if XMALLOC_STATISTICS
752 xm_deltat
= current_dtime
- xm_time
;
753 xm_time
= current_dtime
;
754 storeAppendPrintf(sentry
, "\nMemory allocation statistics\n");
755 storeAppendPrintf(sentry
, "Allocation Size\t Alloc Count\t Alloc Delta\t Allocs/sec \n");
756 malloc_statistics(info_get_mallstat
, sentry
);
761 service_times(StoreEntry
* sentry
)
764 storeAppendPrintf(sentry
, "Service Time Percentiles 5 min 60 min:\n");
765 for (p
= 5; p
< 100; p
+= 5) {
766 storeAppendPrintf(sentry
, "\tHTTP Requests (All): %2d%% %8.5f %8.5f\n",
768 statPctileSvc((double) p
/ 100.0, 5, PCTILE_HTTP
) / 1000.0,
769 statPctileSvc((double) p
/ 100.0, 60, PCTILE_HTTP
) / 1000.0);
771 for (p
= 5; p
< 100; p
+= 5) {
772 storeAppendPrintf(sentry
, "\tCache Misses: %2d%% %8.5f %8.5f\n",
774 statPctileSvc((double) p
/ 100.0, 5, PCTILE_MISS
) / 1000.0,
775 statPctileSvc((double) p
/ 100.0, 60, PCTILE_MISS
) / 1000.0);
777 for (p
= 5; p
< 100; p
+= 5) {
778 storeAppendPrintf(sentry
, "\tCache Hits: %2d%% %8.5f %8.5f\n",
780 statPctileSvc((double) p
/ 100.0, 5, PCTILE_HIT
) / 1000.0,
781 statPctileSvc((double) p
/ 100.0, 60, PCTILE_HIT
) / 1000.0);
783 for (p
= 5; p
< 100; p
+= 5) {
784 storeAppendPrintf(sentry
, "\tNear Hits: %2d%% %8.5f %8.5f\n",
786 statPctileSvc((double) p
/ 100.0, 5, PCTILE_NH
) / 1000.0,
787 statPctileSvc((double) p
/ 100.0, 60, PCTILE_NH
) / 1000.0);
789 for (p
= 5; p
< 100; p
+= 5) {
790 storeAppendPrintf(sentry
, "\tNot-Modified Replies: %2d%% %8.5f %8.5f\n",
792 statPctileSvc((double) p
/ 100.0, 5, PCTILE_NM
) / 1000.0,
793 statPctileSvc((double) p
/ 100.0, 60, PCTILE_NM
) / 1000.0);
795 for (p
= 5; p
< 100; p
+= 5) {
796 storeAppendPrintf(sentry
, "\tDNS Lookups: %2d%% %8.5f %8.5f\n",
798 statPctileSvc((double) p
/ 100.0, 5, PCTILE_DNS
) / 1000.0,
799 statPctileSvc((double) p
/ 100.0, 60, PCTILE_DNS
) / 1000.0);
801 for (p
= 5; p
< 100; p
+= 5) {
802 storeAppendPrintf(sentry
, "\tICP Queries: %2d%% %8.5f %8.5f\n",
804 statPctileSvc((double) p
/ 100.0, 5, PCTILE_ICP_QUERY
) / 1000000.0,
805 statPctileSvc((double) p
/ 100.0, 60, PCTILE_ICP_QUERY
) / 1000000.0);
809 #define XAVG(X) (dt ? (double) (f->X - l->X) / dt : 0.0)
811 statAvgDump(StoreEntry
* sentry
, int minutes
, int hours
)
818 assert(N_COUNT_HIST
> 1);
819 assert(minutes
> 0 || hours
> 0);
823 if (minutes
> 0 && hours
== 0) {
824 /* checking minute readings ... */
826 if (minutes
> N_COUNT_HIST
- 1)
827 minutes
= N_COUNT_HIST
- 1;
829 l
= &CountHist
[minutes
];
830 } else if (minutes
== 0 && hours
> 0) {
831 /* checking hour readings ... */
833 if (hours
> N_COUNT_HOUR_HIST
- 1)
834 hours
= N_COUNT_HOUR_HIST
- 1;
836 l
= &CountHourHist
[hours
];
838 debugs(18, 1, "statAvgDump: Invalid args, minutes=" << minutes
<< ", hours=" << hours
);
842 dt
= tvSubDsec(l
->timestamp
, f
->timestamp
);
843 ct
= f
->cputime
- l
->cputime
;
845 storeAppendPrintf(sentry
, "sample_start_time = %d.%d (%s)\n",
846 (int) l
->timestamp
.tv_sec
,
847 (int) l
->timestamp
.tv_usec
,
848 mkrfc1123(l
->timestamp
.tv_sec
));
849 storeAppendPrintf(sentry
, "sample_end_time = %d.%d (%s)\n",
850 (int) f
->timestamp
.tv_sec
,
851 (int) f
->timestamp
.tv_usec
,
852 mkrfc1123(f
->timestamp
.tv_sec
));
854 storeAppendPrintf(sentry
, "client_http.requests = %f/sec\n",
855 XAVG(client_http
.requests
));
856 storeAppendPrintf(sentry
, "client_http.hits = %f/sec\n",
857 XAVG(client_http
.hits
));
858 storeAppendPrintf(sentry
, "client_http.errors = %f/sec\n",
859 XAVG(client_http
.errors
));
860 storeAppendPrintf(sentry
, "client_http.kbytes_in = %f/sec\n",
861 XAVG(client_http
.kbytes_in
.kb
));
862 storeAppendPrintf(sentry
, "client_http.kbytes_out = %f/sec\n",
863 XAVG(client_http
.kbytes_out
.kb
));
865 x
= statHistDeltaMedian(&l
->client_http
.all_svc_time
,
866 &f
->client_http
.all_svc_time
);
867 storeAppendPrintf(sentry
, "client_http.all_median_svc_time = %f seconds\n",
869 x
= statHistDeltaMedian(&l
->client_http
.miss_svc_time
,
870 &f
->client_http
.miss_svc_time
);
871 storeAppendPrintf(sentry
, "client_http.miss_median_svc_time = %f seconds\n",
873 x
= statHistDeltaMedian(&l
->client_http
.nm_svc_time
,
874 &f
->client_http
.nm_svc_time
);
875 storeAppendPrintf(sentry
, "client_http.nm_median_svc_time = %f seconds\n",
877 x
= statHistDeltaMedian(&l
->client_http
.nh_svc_time
,
878 &f
->client_http
.nh_svc_time
);
879 storeAppendPrintf(sentry
, "client_http.nh_median_svc_time = %f seconds\n",
881 x
= statHistDeltaMedian(&l
->client_http
.hit_svc_time
,
882 &f
->client_http
.hit_svc_time
);
883 storeAppendPrintf(sentry
, "client_http.hit_median_svc_time = %f seconds\n",
886 storeAppendPrintf(sentry
, "server.all.requests = %f/sec\n",
887 XAVG(server
.all
.requests
));
888 storeAppendPrintf(sentry
, "server.all.errors = %f/sec\n",
889 XAVG(server
.all
.errors
));
890 storeAppendPrintf(sentry
, "server.all.kbytes_in = %f/sec\n",
891 XAVG(server
.all
.kbytes_in
.kb
));
892 storeAppendPrintf(sentry
, "server.all.kbytes_out = %f/sec\n",
893 XAVG(server
.all
.kbytes_out
.kb
));
895 storeAppendPrintf(sentry
, "server.http.requests = %f/sec\n",
896 XAVG(server
.http
.requests
));
897 storeAppendPrintf(sentry
, "server.http.errors = %f/sec\n",
898 XAVG(server
.http
.errors
));
899 storeAppendPrintf(sentry
, "server.http.kbytes_in = %f/sec\n",
900 XAVG(server
.http
.kbytes_in
.kb
));
901 storeAppendPrintf(sentry
, "server.http.kbytes_out = %f/sec\n",
902 XAVG(server
.http
.kbytes_out
.kb
));
904 storeAppendPrintf(sentry
, "server.ftp.requests = %f/sec\n",
905 XAVG(server
.ftp
.requests
));
906 storeAppendPrintf(sentry
, "server.ftp.errors = %f/sec\n",
907 XAVG(server
.ftp
.errors
));
908 storeAppendPrintf(sentry
, "server.ftp.kbytes_in = %f/sec\n",
909 XAVG(server
.ftp
.kbytes_in
.kb
));
910 storeAppendPrintf(sentry
, "server.ftp.kbytes_out = %f/sec\n",
911 XAVG(server
.ftp
.kbytes_out
.kb
));
913 storeAppendPrintf(sentry
, "server.other.requests = %f/sec\n",
914 XAVG(server
.other
.requests
));
915 storeAppendPrintf(sentry
, "server.other.errors = %f/sec\n",
916 XAVG(server
.other
.errors
));
917 storeAppendPrintf(sentry
, "server.other.kbytes_in = %f/sec\n",
918 XAVG(server
.other
.kbytes_in
.kb
));
919 storeAppendPrintf(sentry
, "server.other.kbytes_out = %f/sec\n",
920 XAVG(server
.other
.kbytes_out
.kb
));
922 storeAppendPrintf(sentry
, "icp.pkts_sent = %f/sec\n",
923 XAVG(icp
.pkts_sent
));
924 storeAppendPrintf(sentry
, "icp.pkts_recv = %f/sec\n",
925 XAVG(icp
.pkts_recv
));
926 storeAppendPrintf(sentry
, "icp.queries_sent = %f/sec\n",
927 XAVG(icp
.queries_sent
));
928 storeAppendPrintf(sentry
, "icp.replies_sent = %f/sec\n",
929 XAVG(icp
.replies_sent
));
930 storeAppendPrintf(sentry
, "icp.queries_recv = %f/sec\n",
931 XAVG(icp
.queries_recv
));
932 storeAppendPrintf(sentry
, "icp.replies_recv = %f/sec\n",
933 XAVG(icp
.replies_recv
));
934 storeAppendPrintf(sentry
, "icp.replies_queued = %f/sec\n",
935 XAVG(icp
.replies_queued
));
936 storeAppendPrintf(sentry
, "icp.query_timeouts = %f/sec\n",
937 XAVG(icp
.query_timeouts
));
938 storeAppendPrintf(sentry
, "icp.kbytes_sent = %f/sec\n",
939 XAVG(icp
.kbytes_sent
.kb
));
940 storeAppendPrintf(sentry
, "icp.kbytes_recv = %f/sec\n",
941 XAVG(icp
.kbytes_recv
.kb
));
942 storeAppendPrintf(sentry
, "icp.q_kbytes_sent = %f/sec\n",
943 XAVG(icp
.q_kbytes_sent
.kb
));
944 storeAppendPrintf(sentry
, "icp.r_kbytes_sent = %f/sec\n",
945 XAVG(icp
.r_kbytes_sent
.kb
));
946 storeAppendPrintf(sentry
, "icp.q_kbytes_recv = %f/sec\n",
947 XAVG(icp
.q_kbytes_recv
.kb
));
948 storeAppendPrintf(sentry
, "icp.r_kbytes_recv = %f/sec\n",
949 XAVG(icp
.r_kbytes_recv
.kb
));
950 x
= statHistDeltaMedian(&l
->icp
.query_svc_time
, &f
->icp
.query_svc_time
);
951 storeAppendPrintf(sentry
, "icp.query_median_svc_time = %f seconds\n",
953 x
= statHistDeltaMedian(&l
->icp
.reply_svc_time
, &f
->icp
.reply_svc_time
);
954 storeAppendPrintf(sentry
, "icp.reply_median_svc_time = %f seconds\n",
956 x
= statHistDeltaMedian(&l
->dns
.svc_time
, &f
->dns
.svc_time
);
957 storeAppendPrintf(sentry
, "dns.median_svc_time = %f seconds\n",
959 storeAppendPrintf(sentry
, "unlink.requests = %f/sec\n",
960 XAVG(unlink
.requests
));
961 storeAppendPrintf(sentry
, "page_faults = %f/sec\n",
963 storeAppendPrintf(sentry
, "select_loops = %f/sec\n",
965 storeAppendPrintf(sentry
, "select_fds = %f/sec\n",
967 storeAppendPrintf(sentry
, "average_select_fd_period = %f/fd\n",
968 f
->select_fds
> l
->select_fds
?
969 (f
->select_time
- l
->select_time
) / (f
->select_fds
- l
->select_fds
)
971 x
= statHistDeltaMedian(&l
->select_fds_hist
, &f
->select_fds_hist
);
972 storeAppendPrintf(sentry
, "median_select_fds = %f\n", x
);
973 storeAppendPrintf(sentry
, "swap.outs = %f/sec\n",
975 storeAppendPrintf(sentry
, "swap.ins = %f/sec\n",
977 storeAppendPrintf(sentry
, "swap.files_cleaned = %f/sec\n",
978 XAVG(swap
.files_cleaned
));
979 storeAppendPrintf(sentry
, "aborted_requests = %f/sec\n",
980 XAVG(aborted_requests
));
983 storeAppendPrintf(sentry
, "syscalls.polls = %f/sec\n", XAVG(syscalls
.selects
));
984 #elif defined(USE_SELECT) || defined(USE_SELECT_WIN32)
985 storeAppendPrintf(sentry
, "syscalls.selects = %f/sec\n", XAVG(syscalls
.selects
));
988 storeAppendPrintf(sentry
, "syscalls.disk.opens = %f/sec\n", XAVG(syscalls
.disk
.opens
));
989 storeAppendPrintf(sentry
, "syscalls.disk.closes = %f/sec\n", XAVG(syscalls
.disk
.closes
));
990 storeAppendPrintf(sentry
, "syscalls.disk.reads = %f/sec\n", XAVG(syscalls
.disk
.reads
));
991 storeAppendPrintf(sentry
, "syscalls.disk.writes = %f/sec\n", XAVG(syscalls
.disk
.writes
));
992 storeAppendPrintf(sentry
, "syscalls.disk.seeks = %f/sec\n", XAVG(syscalls
.disk
.seeks
));
993 storeAppendPrintf(sentry
, "syscalls.disk.unlinks = %f/sec\n", XAVG(syscalls
.disk
.unlinks
));
994 storeAppendPrintf(sentry
, "syscalls.sock.accepts = %f/sec\n", XAVG(syscalls
.sock
.accepts
));
995 storeAppendPrintf(sentry
, "syscalls.sock.sockets = %f/sec\n", XAVG(syscalls
.sock
.sockets
));
996 storeAppendPrintf(sentry
, "syscalls.sock.connects = %f/sec\n", XAVG(syscalls
.sock
.connects
));
997 storeAppendPrintf(sentry
, "syscalls.sock.binds = %f/sec\n", XAVG(syscalls
.sock
.binds
));
998 storeAppendPrintf(sentry
, "syscalls.sock.closes = %f/sec\n", XAVG(syscalls
.sock
.closes
));
999 storeAppendPrintf(sentry
, "syscalls.sock.reads = %f/sec\n", XAVG(syscalls
.sock
.reads
));
1000 storeAppendPrintf(sentry
, "syscalls.sock.writes = %f/sec\n", XAVG(syscalls
.sock
.writes
));
1001 storeAppendPrintf(sentry
, "syscalls.sock.recvfroms = %f/sec\n", XAVG(syscalls
.sock
.recvfroms
));
1002 storeAppendPrintf(sentry
, "syscalls.sock.sendtos = %f/sec\n", XAVG(syscalls
.sock
.sendtos
));
1004 storeAppendPrintf(sentry
, "cpu_time = %f seconds\n", ct
);
1005 storeAppendPrintf(sentry
, "wall_time = %f seconds\n", dt
);
1006 storeAppendPrintf(sentry
, "cpu_usage = %f%%\n", dpercent(ct
, dt
));
1010 statRegisterWithCacheManager(void)
1012 CacheManager
*manager
= CacheManager::GetInstance();
1013 manager
->registerAction("info", "General Runtime Information",
1015 manager
->registerAction("service_times", "Service Times (Percentiles)",
1016 service_times
, 0, 1);
1017 manager
->registerAction("filedescriptors", "Process Filedescriptor Allocation",
1018 fde::DumpStats
, 0, 1);
1019 manager
->registerAction("objects", "All Cache Objects", stat_objects_get
, 0, 0);
1020 manager
->registerAction("vm_objects", "In-Memory and In-Transit Objects",
1021 stat_vmobjects_get
, 0, 0);
1022 manager
->registerAction("io", "Server-side network read() size histograms",
1024 manager
->registerAction("counters", "Traffic and Resource Counters",
1025 statCountersDump
, 0, 1);
1026 manager
->registerAction("peer_select", "Peer Selection Algorithms",
1027 statPeerSelect
, 0, 1);
1028 manager
->registerAction("digest_stats", "Cache Digest and ICP blob",
1029 statDigestBlob
, 0, 1);
1030 manager
->registerAction("5min", "5 Minute Average of Counters",
1032 manager
->registerAction("60min", "60 Minute Average of Counters",
1033 statAvg60min
, 0, 1);
1034 manager
->registerAction("utilization", "Cache Utilization",
1035 statUtilization
, 0, 1);
1036 manager
->registerAction("histograms", "Full Histogram Counts",
1037 statCountersHistograms
, 0, 1);
1038 manager
->registerAction("active_requests",
1039 "Client-side Active Requests",
1040 statClientRequests
, 0, 1);
1042 manager
->registerAction("openfd_objects", "Objects with Swapout files open",
1043 statOpenfdObj
, 0, 0);
1046 manager
->registerAction("graph_variables", "Display cache metrics graphically",
1047 statGraphDump
, 0, 1);
1056 debugs(18, 5, "statInit: Initializing...");
1058 for (i
= 0; i
< N_COUNT_HIST
; i
++)
1059 statCountersInit(&CountHist
[i
]);
1061 for (i
= 0; i
< N_COUNT_HOUR_HIST
; i
++)
1062 statCountersInit(&CountHourHist
[i
]);
1064 statCountersInit(&statCounter
);
1066 eventAdd("statAvgTick", statAvgTick
, NULL
, (double) COUNT_INTERVAL
, 1);
1068 ClientActiveRequests
.head
= NULL
;
1070 ClientActiveRequests
.tail
= NULL
;
1072 statRegisterWithCacheManager();
1076 statAvgTick(void *notused
)
1078 StatCounters
*t
= &CountHist
[0];
1079 StatCounters
*p
= &CountHist
[1];
1080 StatCounters
*c
= &statCounter
;
1082 struct rusage rusage
;
1083 eventAdd("statAvgTick", statAvgTick
, NULL
, (double) COUNT_INTERVAL
, 1);
1084 squid_getrusage(&rusage
);
1085 c
->page_faults
= rusage_pagefaults(&rusage
);
1086 c
->cputime
= rusage_cputime(&rusage
);
1087 c
->timestamp
= current_time
;
1088 /* even if NCountHist is small, we already Init()ed the tail */
1089 statCountersClean(CountHist
+ N_COUNT_HIST
- 1);
1090 xmemmove(p
, t
, (N_COUNT_HIST
- 1) * sizeof(StatCounters
));
1091 statCountersCopy(t
, c
);
1094 if ((NCountHist
% COUNT_INTERVAL
) == 0) {
1095 /* we have an hours worth of readings. store previous hour */
1096 StatCounters
*t
= &CountHourHist
[0];
1097 StatCounters
*p
= &CountHourHist
[1];
1098 StatCounters
*c
= &CountHist
[N_COUNT_HIST
- 1];
1099 statCountersClean(CountHourHist
+ N_COUNT_HOUR_HIST
- 1);
1100 xmemmove(p
, t
, (N_COUNT_HOUR_HIST
- 1) * sizeof(StatCounters
));
1101 statCountersCopy(t
, c
);
1105 if (Config
.warnings
.high_rptm
> 0) {
1106 int i
= (int) statPctileSvc(0.5, 20, PCTILE_HTTP
);
1108 if (Config
.warnings
.high_rptm
< i
)
1109 debugs(18, 0, "WARNING: Median response time is " << i
<< " milliseconds");
1112 if (Config
.warnings
.high_pf
) {
1113 int i
= (CountHist
[0].page_faults
- CountHist
[1].page_faults
);
1114 double dt
= tvSubDsec(CountHist
[0].timestamp
, CountHist
[1].timestamp
);
1116 if (i
> 0 && dt
> 0.0) {
1119 if (Config
.warnings
.high_pf
< i
)
1120 debugs(18, 0, "WARNING: Page faults occuring at " << i
<< "/sec");
1124 if (Config
.warnings
.high_memory
) {
1126 #if HAVE_MSTATS && HAVE_GNUMALLOC_H
1128 struct mstats ms
= mstats();
1130 #elif HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
1132 struct mallinfo mp
= mallinfo();
1136 i
= (size_t) ((char *) sbrk(0) - (char *) sbrk_start
);
1139 if (Config
.warnings
.high_memory
< i
)
1140 debugs(18, 0, "WARNING: Memory usage at " << ((unsigned long int)(i
>> 20)) << " MB");
1145 statCountersInit(StatCounters
* C
)
1148 memset(C
, 0, sizeof(*C
));
1149 C
->timestamp
= current_time
;
1150 statCountersInitSpecial(C
);
1153 /* add special cases here as they arrive */
1155 statCountersInitSpecial(StatCounters
* C
)
1158 * HTTP svc_time hist is kept in milli-seconds; max of 3 hours.
1160 statHistLogInit(&C
->client_http
.all_svc_time
, 300, 0.0, 3600000.0 * 3.0);
1161 statHistLogInit(&C
->client_http
.miss_svc_time
, 300, 0.0, 3600000.0 * 3.0);
1162 statHistLogInit(&C
->client_http
.nm_svc_time
, 300, 0.0, 3600000.0 * 3.0);
1163 statHistLogInit(&C
->client_http
.nh_svc_time
, 300, 0.0, 3600000.0 * 3.0);
1164 statHistLogInit(&C
->client_http
.hit_svc_time
, 300, 0.0, 3600000.0 * 3.0);
1166 * ICP svc_time hist is kept in micro-seconds; max of 1 minute.
1168 statHistLogInit(&C
->icp
.query_svc_time
, 300, 0.0, 1000000.0 * 60.0);
1169 statHistLogInit(&C
->icp
.reply_svc_time
, 300, 0.0, 1000000.0 * 60.0);
1171 * DNS svc_time hist is kept in milli-seconds; max of 10 minutes.
1173 statHistLogInit(&C
->dns
.svc_time
, 300, 0.0, 60000.0 * 10.0);
1175 * Cache Digest Stuff
1177 statHistEnumInit(&C
->cd
.on_xition_count
, CacheDigestHashFuncCount
);
1178 statHistEnumInit(&C
->comm_icp_incoming
, INCOMING_ICP_MAX
);
1179 statHistEnumInit(&C
->comm_dns_incoming
, INCOMING_DNS_MAX
);
1180 statHistEnumInit(&C
->comm_http_incoming
, INCOMING_HTTP_MAX
);
1181 statHistIntInit(&C
->select_fds_hist
, 256); /* was SQUID_MAXFD, but it is way too much. It is OK to crop this statistics */
1184 /* add special cases here as they arrive */
1186 statCountersClean(StatCounters
* C
)
1189 statHistClean(&C
->client_http
.all_svc_time
);
1190 statHistClean(&C
->client_http
.miss_svc_time
);
1191 statHistClean(&C
->client_http
.nm_svc_time
);
1192 statHistClean(&C
->client_http
.nh_svc_time
);
1193 statHistClean(&C
->client_http
.hit_svc_time
);
1194 statHistClean(&C
->icp
.query_svc_time
);
1195 statHistClean(&C
->icp
.reply_svc_time
);
1196 statHistClean(&C
->dns
.svc_time
);
1197 statHistClean(&C
->cd
.on_xition_count
);
1198 statHistClean(&C
->comm_icp_incoming
);
1199 statHistClean(&C
->comm_dns_incoming
);
1200 statHistClean(&C
->comm_http_incoming
);
1201 statHistClean(&C
->select_fds_hist
);
1204 /* add special cases here as they arrive */
1206 statCountersCopy(StatCounters
* dest
, const StatCounters
* orig
)
1208 assert(dest
&& orig
);
1209 /* this should take care of all the fields, but "special" ones */
1210 xmemcpy(dest
, orig
, sizeof(*dest
));
1211 /* prepare space where to copy special entries */
1212 statCountersInitSpecial(dest
);
1213 /* now handle special cases */
1214 /* note: we assert that histogram capacities do not change */
1215 statHistCopy(&dest
->client_http
.all_svc_time
, &orig
->client_http
.all_svc_time
);
1216 statHistCopy(&dest
->client_http
.miss_svc_time
, &orig
->client_http
.miss_svc_time
);
1217 statHistCopy(&dest
->client_http
.nm_svc_time
, &orig
->client_http
.nm_svc_time
);
1218 statHistCopy(&dest
->client_http
.nh_svc_time
, &orig
->client_http
.nh_svc_time
);
1219 statHistCopy(&dest
->client_http
.hit_svc_time
, &orig
->client_http
.hit_svc_time
);
1220 statHistCopy(&dest
->icp
.query_svc_time
, &orig
->icp
.query_svc_time
);
1221 statHistCopy(&dest
->icp
.reply_svc_time
, &orig
->icp
.reply_svc_time
);
1222 statHistCopy(&dest
->dns
.svc_time
, &orig
->dns
.svc_time
);
1223 statHistCopy(&dest
->cd
.on_xition_count
, &orig
->cd
.on_xition_count
);
1224 statHistCopy(&dest
->comm_icp_incoming
, &orig
->comm_icp_incoming
);
1225 statHistCopy(&dest
->comm_http_incoming
, &orig
->comm_http_incoming
);
1226 statHistCopy(&dest
->select_fds_hist
, &orig
->select_fds_hist
);
1230 statCountersHistograms(StoreEntry
* sentry
)
1232 StatCounters
*f
= &statCounter
;
1233 storeAppendPrintf(sentry
, "client_http.all_svc_time histogram:\n");
1234 statHistDump(&f
->client_http
.all_svc_time
, sentry
, NULL
);
1235 storeAppendPrintf(sentry
, "client_http.miss_svc_time histogram:\n");
1236 statHistDump(&f
->client_http
.miss_svc_time
, sentry
, NULL
);
1237 storeAppendPrintf(sentry
, "client_http.nm_svc_time histogram:\n");
1238 statHistDump(&f
->client_http
.nm_svc_time
, sentry
, NULL
);
1239 storeAppendPrintf(sentry
, "client_http.nh_svc_time histogram:\n");
1240 statHistDump(&f
->client_http
.nh_svc_time
, sentry
, NULL
);
1241 storeAppendPrintf(sentry
, "client_http.hit_svc_time histogram:\n");
1242 statHistDump(&f
->client_http
.hit_svc_time
, sentry
, NULL
);
1243 storeAppendPrintf(sentry
, "icp.query_svc_time histogram:\n");
1244 statHistDump(&f
->icp
.query_svc_time
, sentry
, NULL
);
1245 storeAppendPrintf(sentry
, "icp.reply_svc_time histogram:\n");
1246 statHistDump(&f
->icp
.reply_svc_time
, sentry
, NULL
);
1247 storeAppendPrintf(sentry
, "dns.svc_time histogram:\n");
1248 statHistDump(&f
->dns
.svc_time
, sentry
, NULL
);
1249 storeAppendPrintf(sentry
, "select_fds_hist histogram:\n");
1250 statHistDump(&f
->select_fds_hist
, sentry
, NULL
);
1254 statCountersDump(StoreEntry
* sentry
)
1256 StatCounters
*f
= &statCounter
;
1258 struct rusage rusage
;
1259 squid_getrusage(&rusage
);
1260 f
->page_faults
= rusage_pagefaults(&rusage
);
1261 f
->cputime
= rusage_cputime(&rusage
);
1263 storeAppendPrintf(sentry
, "sample_time = %d.%d (%s)\n",
1264 (int) f
->timestamp
.tv_sec
,
1265 (int) f
->timestamp
.tv_usec
,
1266 mkrfc1123(f
->timestamp
.tv_sec
));
1267 storeAppendPrintf(sentry
, "client_http.requests = %d\n",
1268 f
->client_http
.requests
);
1269 storeAppendPrintf(sentry
, "client_http.hits = %d\n",
1270 f
->client_http
.hits
);
1271 storeAppendPrintf(sentry
, "client_http.errors = %d\n",
1272 f
->client_http
.errors
);
1273 storeAppendPrintf(sentry
, "client_http.kbytes_in = %d\n",
1274 (int) f
->client_http
.kbytes_in
.kb
);
1275 storeAppendPrintf(sentry
, "client_http.kbytes_out = %d\n",
1276 (int) f
->client_http
.kbytes_out
.kb
);
1277 storeAppendPrintf(sentry
, "client_http.hit_kbytes_out = %d\n",
1278 (int) f
->client_http
.hit_kbytes_out
.kb
);
1280 storeAppendPrintf(sentry
, "server.all.requests = %d\n",
1281 (int) f
->server
.all
.requests
);
1282 storeAppendPrintf(sentry
, "server.all.errors = %d\n",
1283 (int) f
->server
.all
.errors
);
1284 storeAppendPrintf(sentry
, "server.all.kbytes_in = %d\n",
1285 (int) f
->server
.all
.kbytes_in
.kb
);
1286 storeAppendPrintf(sentry
, "server.all.kbytes_out = %d\n",
1287 (int) f
->server
.all
.kbytes_out
.kb
);
1289 storeAppendPrintf(sentry
, "server.http.requests = %d\n",
1290 (int) f
->server
.http
.requests
);
1291 storeAppendPrintf(sentry
, "server.http.errors = %d\n",
1292 (int) f
->server
.http
.errors
);
1293 storeAppendPrintf(sentry
, "server.http.kbytes_in = %d\n",
1294 (int) f
->server
.http
.kbytes_in
.kb
);
1295 storeAppendPrintf(sentry
, "server.http.kbytes_out = %d\n",
1296 (int) f
->server
.http
.kbytes_out
.kb
);
1298 storeAppendPrintf(sentry
, "server.ftp.requests = %d\n",
1299 (int) f
->server
.ftp
.requests
);
1300 storeAppendPrintf(sentry
, "server.ftp.errors = %d\n",
1301 (int) f
->server
.ftp
.errors
);
1302 storeAppendPrintf(sentry
, "server.ftp.kbytes_in = %d\n",
1303 (int) f
->server
.ftp
.kbytes_in
.kb
);
1304 storeAppendPrintf(sentry
, "server.ftp.kbytes_out = %d\n",
1305 (int) f
->server
.ftp
.kbytes_out
.kb
);
1307 storeAppendPrintf(sentry
, "server.other.requests = %d\n",
1308 (int) f
->server
.other
.requests
);
1309 storeAppendPrintf(sentry
, "server.other.errors = %d\n",
1310 (int) f
->server
.other
.errors
);
1311 storeAppendPrintf(sentry
, "server.other.kbytes_in = %d\n",
1312 (int) f
->server
.other
.kbytes_in
.kb
);
1313 storeAppendPrintf(sentry
, "server.other.kbytes_out = %d\n",
1314 (int) f
->server
.other
.kbytes_out
.kb
);
1316 storeAppendPrintf(sentry
, "icp.pkts_sent = %d\n",
1318 storeAppendPrintf(sentry
, "icp.pkts_recv = %d\n",
1320 storeAppendPrintf(sentry
, "icp.queries_sent = %d\n",
1321 f
->icp
.queries_sent
);
1322 storeAppendPrintf(sentry
, "icp.replies_sent = %d\n",
1323 f
->icp
.replies_sent
);
1324 storeAppendPrintf(sentry
, "icp.queries_recv = %d\n",
1325 f
->icp
.queries_recv
);
1326 storeAppendPrintf(sentry
, "icp.replies_recv = %d\n",
1327 f
->icp
.replies_recv
);
1328 storeAppendPrintf(sentry
, "icp.query_timeouts = %d\n",
1329 f
->icp
.query_timeouts
);
1330 storeAppendPrintf(sentry
, "icp.replies_queued = %d\n",
1331 f
->icp
.replies_queued
);
1332 storeAppendPrintf(sentry
, "icp.kbytes_sent = %d\n",
1333 (int) f
->icp
.kbytes_sent
.kb
);
1334 storeAppendPrintf(sentry
, "icp.kbytes_recv = %d\n",
1335 (int) f
->icp
.kbytes_recv
.kb
);
1336 storeAppendPrintf(sentry
, "icp.q_kbytes_sent = %d\n",
1337 (int) f
->icp
.q_kbytes_sent
.kb
);
1338 storeAppendPrintf(sentry
, "icp.r_kbytes_sent = %d\n",
1339 (int) f
->icp
.r_kbytes_sent
.kb
);
1340 storeAppendPrintf(sentry
, "icp.q_kbytes_recv = %d\n",
1341 (int) f
->icp
.q_kbytes_recv
.kb
);
1342 storeAppendPrintf(sentry
, "icp.r_kbytes_recv = %d\n",
1343 (int) f
->icp
.r_kbytes_recv
.kb
);
1345 #if USE_CACHE_DIGESTS
1347 storeAppendPrintf(sentry
, "icp.times_used = %d\n",
1349 storeAppendPrintf(sentry
, "cd.times_used = %d\n",
1351 storeAppendPrintf(sentry
, "cd.msgs_sent = %d\n",
1353 storeAppendPrintf(sentry
, "cd.msgs_recv = %d\n",
1355 storeAppendPrintf(sentry
, "cd.memory = %d\n",
1356 (int) f
->cd
.memory
.kb
);
1357 storeAppendPrintf(sentry
, "cd.local_memory = %d\n",
1358 (int) (store_digest
? store_digest
->mask_size
/ 1024 : 0));
1359 storeAppendPrintf(sentry
, "cd.kbytes_sent = %d\n",
1360 (int) f
->cd
.kbytes_sent
.kb
);
1361 storeAppendPrintf(sentry
, "cd.kbytes_recv = %d\n",
1362 (int) f
->cd
.kbytes_recv
.kb
);
1365 storeAppendPrintf(sentry
, "unlink.requests = %d\n",
1366 f
->unlink
.requests
);
1367 storeAppendPrintf(sentry
, "page_faults = %d\n",
1369 storeAppendPrintf(sentry
, "select_loops = %d\n",
1371 storeAppendPrintf(sentry
, "cpu_time = %f\n",
1373 storeAppendPrintf(sentry
, "wall_time = %f\n",
1374 tvSubDsec(f
->timestamp
, current_time
));
1375 storeAppendPrintf(sentry
, "swap.outs = %d\n",
1377 storeAppendPrintf(sentry
, "swap.ins = %d\n",
1379 storeAppendPrintf(sentry
, "swap.files_cleaned = %d\n",
1380 f
->swap
.files_cleaned
);
1381 storeAppendPrintf(sentry
, "aborted_requests = %d\n",
1382 f
->aborted_requests
);
1386 statFreeMemory(void)
1390 for (i
= 0; i
< N_COUNT_HIST
; i
++)
1391 statCountersClean(&CountHist
[i
]);
1393 for (i
= 0; i
< N_COUNT_HOUR_HIST
; i
++)
1394 statCountersClean(&CountHourHist
[i
]);
1398 statPeerSelect(StoreEntry
* sentry
)
1400 #if USE_CACHE_DIGESTS
1401 StatCounters
*f
= &statCounter
;
1403 const int tot_used
= f
->cd
.times_used
+ f
->icp
.times_used
;
1406 cacheDigestGuessStatsReport(&f
->cd
.guess
, sentry
, "all peers");
1408 storeAppendPrintf(sentry
, "\nPer-peer statistics:\n");
1410 for (peer
= getFirstPeer(); peer
; peer
= getNextPeer(peer
)) {
1412 peerDigestStatsReport(peer
->digest
, sentry
);
1414 storeAppendPrintf(sentry
, "\nNo peer digest from %s\n", peer
->host
);
1416 storeAppendPrintf(sentry
, "\n");
1419 storeAppendPrintf(sentry
, "\nAlgorithm usage:\n");
1420 storeAppendPrintf(sentry
, "Cache Digest: %7d (%3d%%)\n",
1421 f
->cd
.times_used
, xpercentInt(f
->cd
.times_used
, tot_used
));
1422 storeAppendPrintf(sentry
, "Icp: %7d (%3d%%)\n",
1423 f
->icp
.times_used
, xpercentInt(f
->icp
.times_used
, tot_used
));
1424 storeAppendPrintf(sentry
, "Total: %7d (%3d%%)\n",
1425 tot_used
, xpercentInt(tot_used
, tot_used
));
1428 storeAppendPrintf(sentry
, "peer digests are disabled; no stats is available.\n");
1433 statDigestBlob(StoreEntry
* sentry
)
1435 storeAppendPrintf(sentry
, "\nCounters:\n");
1436 statCountersDump(sentry
);
1437 storeAppendPrintf(sentry
, "\n5 Min Averages:\n");
1438 statAvgDump(sentry
, 5, 0);
1439 storeAppendPrintf(sentry
, "\nHistograms:\n");
1440 statCountersHistograms(sentry
);
1441 storeAppendPrintf(sentry
, "\nPeer Digests:\n");
1442 statPeerSelect(sentry
);
1443 storeAppendPrintf(sentry
, "\nLocal Digest:\n");
1444 storeDigestReport(sentry
);
1448 statAvg5min(StoreEntry
* e
)
1450 statAvgDump(e
, 5, 0);
1454 statAvg60min(StoreEntry
* e
)
1456 statAvgDump(e
, 60, 0);
1460 statPctileSvc(double pctile
, int interval
, int which
)
1465 assert(interval
> 0);
1467 if (interval
> N_COUNT_HIST
- 1)
1468 interval
= N_COUNT_HIST
- 1;
1472 l
= &CountHist
[interval
];
1481 x
= statHistDeltaPctile(&l
->client_http
.all_svc_time
, &f
->client_http
.all_svc_time
, pctile
);
1485 x
= statHistDeltaPctile(&l
->client_http
.hit_svc_time
, &f
->client_http
.hit_svc_time
, pctile
);
1489 x
= statHistDeltaPctile(&l
->client_http
.miss_svc_time
, &f
->client_http
.miss_svc_time
, pctile
);
1493 x
= statHistDeltaPctile(&l
->client_http
.nm_svc_time
, &f
->client_http
.nm_svc_time
, pctile
);
1497 x
= statHistDeltaPctile(&l
->client_http
.nh_svc_time
, &f
->client_http
.nh_svc_time
, pctile
);
1500 case PCTILE_ICP_QUERY
:
1501 x
= statHistDeltaPctile(&l
->icp
.query_svc_time
, &f
->icp
.query_svc_time
, pctile
);
1505 x
= statHistDeltaPctile(&l
->dns
.svc_time
, &f
->dns
.svc_time
, pctile
);
1509 debugs(49, 5, "statPctileSvc: unknown type.");
1517 snmpStatGet(int minutes
)
1519 return &CountHist
[minutes
];
1523 stat5minClientRequests(void)
1525 assert(N_COUNT_HIST
> 5);
1526 return statCounter
.client_http
.requests
- CountHist
[5].client_http
.requests
;
1530 statCPUUsage(int minutes
)
1532 assert(minutes
< N_COUNT_HIST
);
1533 return dpercent(CountHist
[0].cputime
- CountHist
[minutes
].cputime
,
1534 tvSubDsec(CountHist
[minutes
].timestamp
, CountHist
[0].timestamp
));
1538 statRequestHitRatio(int minutes
)
1540 assert(minutes
< N_COUNT_HIST
);
1541 return dpercent(CountHist
[0].client_http
.hits
-
1542 CountHist
[minutes
].client_http
.hits
,
1543 CountHist
[0].client_http
.requests
-
1544 CountHist
[minutes
].client_http
.requests
);
1548 statRequestHitMemoryRatio(int minutes
)
1550 assert(minutes
< N_COUNT_HIST
);
1551 return dpercent(CountHist
[0].client_http
.mem_hits
-
1552 CountHist
[minutes
].client_http
.mem_hits
,
1553 CountHist
[0].client_http
.hits
-
1554 CountHist
[minutes
].client_http
.hits
);
1558 statRequestHitDiskRatio(int minutes
)
1560 assert(minutes
< N_COUNT_HIST
);
1561 return dpercent(CountHist
[0].client_http
.disk_hits
-
1562 CountHist
[minutes
].client_http
.disk_hits
,
1563 CountHist
[0].client_http
.hits
-
1564 CountHist
[minutes
].client_http
.hits
);
1568 statByteHitRatio(int minutes
)
1572 #if USE_CACHE_DIGESTS
1576 /* size_t might be unsigned */
1577 assert(minutes
< N_COUNT_HIST
);
1578 c
= CountHist
[0].client_http
.kbytes_out
.kb
- CountHist
[minutes
].client_http
.kbytes_out
.kb
;
1579 s
= CountHist
[0].server
.all
.kbytes_in
.kb
- CountHist
[minutes
].server
.all
.kbytes_in
.kb
;
1580 #if USE_CACHE_DIGESTS
1582 * This ugly hack is here to prevent the user from seeing a
1583 * negative byte hit ratio. When we fetch a cache digest from
1584 * a neighbor, it gets treated like a cache miss because the
1585 * object is consumed internally. Thus, we subtract cache
1586 * digest bytes out before calculating the byte hit ratio.
1588 cd
= CountHist
[0].cd
.kbytes_recv
.kb
- CountHist
[minutes
].cd
.kbytes_recv
.kb
;
1591 debugs(18, 1, "STRANGE: srv_kbytes=" << s
<< ", cd_kbytes=" << cd
);
1598 return dpercent(c
- s
, c
);
1600 return (-1.0 * dpercent(s
- c
, c
));
1604 statClientRequests(StoreEntry
* s
)
1607 ClientHttpRequest
*http
;
1610 char buf
[MAX_IPSTRLEN
];
1612 for (i
= ClientActiveRequests
.head
; i
; i
= i
->next
) {
1613 const char *p
= NULL
;
1614 http
= static_cast<ClientHttpRequest
*>(i
->data
);
1616 ConnStateData
* conn
= http
->getConn();
1617 storeAppendPrintf(s
, "Connection: %p\n", conn
);
1621 storeAppendPrintf(s
, "\tFD %d, read %"PRId64
", wrote %"PRId64
"\n", fd
,
1622 fd_table
[fd
].bytes_read
, fd_table
[fd
].bytes_written
);
1623 storeAppendPrintf(s
, "\tFD desc: %s\n", fd_table
[fd
].desc
);
1624 storeAppendPrintf(s
, "\tin: buf %p, offset %ld, size %ld\n",
1625 conn
->in
.buf
, (long int) conn
->in
.notYetUsed
, (long int) conn
->in
.allocatedSize
);
1626 storeAppendPrintf(s
, "\tpeer: %s:%d\n",
1627 conn
->peer
.NtoA(buf
,MAX_IPSTRLEN
),
1628 conn
->peer
.GetPort());
1629 storeAppendPrintf(s
, "\tme: %s:%d\n",
1630 conn
->me
.NtoA(buf
,MAX_IPSTRLEN
),
1631 conn
->me
.GetPort());
1632 storeAppendPrintf(s
, "\tnrequests: %d\n",
1636 storeAppendPrintf(s
, "uri %s\n", http
->uri
);
1637 storeAppendPrintf(s
, "logType %s\n", log_tags
[http
->logType
]);
1638 storeAppendPrintf(s
, "out.offset %ld, out.size %lu\n",
1639 (long int) http
->out
.offset
, (unsigned long int) http
->out
.size
);
1640 storeAppendPrintf(s
, "req_sz %ld\n", (long int) http
->req_sz
);
1641 e
= http
->storeEntry();
1642 storeAppendPrintf(s
, "entry %p/%s\n", e
, e
? e
->getMD5Text() : "N/A");
1644 /* Not a member anymore */
1645 e
= http
->old_entry
;
1646 storeAppendPrintf(s
, "old_entry %p/%s\n", e
, e
? e
->getMD5Text() : "N/A");
1649 storeAppendPrintf(s
, "start %ld.%06d (%f seconds ago)\n",
1650 (long int) http
->start_time
.tv_sec
,
1651 (int) http
->start_time
.tv_usec
,
1652 tvSubDsec(http
->start_time
, current_time
));
1654 if (http
->request
->auth_user_request
)
1655 p
= http
->request
->auth_user_request
->username();
1656 else if (http
->request
->extacl_user
.buf() != NULL
) {
1657 p
= http
->request
->extacl_user
.buf();
1660 if (!p
&& (conn
!= NULL
&& conn
->rfc931
[0]))
1665 if (!p
&& conn
!= NULL
)
1666 p
= sslGetUserEmail(fd_table
[conn
->fd
].ssl
);
1673 storeAppendPrintf(s
, "username %s\n", p
);
1677 storeAppendPrintf(s
, "delay_pool %d\n", DelayId::DelayClient(http
).pool());
1681 storeAppendPrintf(s
, "\n");
1687 * urgh, i don't like these, but they do cut the amount of code down immensely
1690 #define GRAPH_PER_MIN(Y) \
1691 for (i=0;i<(N_COUNT_HIST-2);i++) { \
1692 dt = tvSubDsec(CountHist[i+1].timestamp, CountHist[i].timestamp); \
1695 storeAppendPrintf(e, "%lu,%0.2f:", \
1696 CountHist[i].timestamp.tv_sec, \
1697 ((CountHist[i].Y - CountHist[i+1].Y) / dt)); \
1700 #define GRAPH_PER_HOUR(Y) \
1701 for (i=0;i<(N_COUNT_HOUR_HIST-2);i++) { \
1702 dt = tvSubDsec(CountHourHist[i+1].timestamp, CountHourHist[i].timestamp); \
1705 storeAppendPrintf(e, "%lu,%0.2f:", \
1706 CountHourHist[i].timestamp.tv_sec, \
1707 ((CountHourHist[i].Y - CountHourHist[i+1].Y) / dt)); \
1710 #define GRAPH_TITLE(X,Y) storeAppendPrintf(e,"%s\t%s\t",X,Y);
1711 #define GRAPH_END storeAppendPrintf(e,"\n");
1713 #define GENGRAPH(X,Y,Z) \
1720 statGraphDump(StoreEntry
* e
)
1725 GENGRAPH(client_http
.requests
, "client_http.requests", "Client HTTP requests/sec");
1726 GENGRAPH(client_http
.hits
, "client_http.hits", "Client HTTP hits/sec");
1727 GENGRAPH(client_http
.errors
, "client_http.errors", "Client HTTP errors/sec");
1728 GENGRAPH(client_http
.kbytes_in
.kb
, "client_http.kbytes_in", "Client HTTP kbytes_in/sec");
1729 GENGRAPH(client_http
.kbytes_out
.kb
, "client_http.kbytes_out", "Client HTTP kbytes_out/sec");
1731 /* XXX todo: http median service times */
1733 GENGRAPH(server
.all
.requests
, "server.all.requests", "Server requests/sec");
1734 GENGRAPH(server
.all
.errors
, "server.all.errors", "Server errors/sec");
1735 GENGRAPH(server
.all
.kbytes_in
.kb
, "server.all.kbytes_in", "Server total kbytes_in/sec");
1736 GENGRAPH(server
.all
.kbytes_out
.kb
, "server.all.kbytes_out", "Server total kbytes_out/sec");
1738 GENGRAPH(server
.http
.requests
, "server.http.requests", "Server HTTP requests/sec");
1739 GENGRAPH(server
.http
.errors
, "server.http.errors", "Server HTTP errors/sec");
1740 GENGRAPH(server
.http
.kbytes_in
.kb
, "server.http.kbytes_in", "Server HTTP kbytes_in/sec");
1741 GENGRAPH(server
.http
.kbytes_out
.kb
, "server.http.kbytes_out", "Server HTTP kbytes_out/sec");
1743 GENGRAPH(server
.ftp
.requests
, "server.ftp.requests", "Server FTP requests/sec");
1744 GENGRAPH(server
.ftp
.errors
, "server.ftp.errors", "Server FTP errors/sec");
1745 GENGRAPH(server
.ftp
.kbytes_in
.kb
, "server.ftp.kbytes_in", "Server FTP kbytes_in/sec");
1746 GENGRAPH(server
.ftp
.kbytes_out
.kb
, "server.ftp.kbytes_out", "Server FTP kbytes_out/sec");
1748 GENGRAPH(server
.other
.requests
, "server.other.requests", "Server other requests/sec");
1749 GENGRAPH(server
.other
.errors
, "server.other.errors", "Server other errors/sec");
1750 GENGRAPH(server
.other
.kbytes_in
.kb
, "server.other.kbytes_in", "Server other kbytes_in/sec");
1751 GENGRAPH(server
.other
.kbytes_out
.kb
, "server.other.kbytes_out", "Server other kbytes_out/sec");
1753 GENGRAPH(icp
.pkts_sent
, "icp.pkts_sent", "ICP packets sent/sec");
1754 GENGRAPH(icp
.pkts_recv
, "icp.pkts_recv", "ICP packets received/sec");
1755 GENGRAPH(icp
.kbytes_sent
.kb
, "icp.kbytes_sent", "ICP kbytes_sent/sec");
1756 GENGRAPH(icp
.kbytes_recv
.kb
, "icp.kbytes_recv", "ICP kbytes_received/sec");
1758 /* XXX todo: icp median service times */
1759 /* XXX todo: dns median service times */
1761 GENGRAPH(unlink
.requests
, "unlink.requests", "Cache File unlink requests/sec");
1762 GENGRAPH(page_faults
, "page_faults", "System Page Faults/sec");
1763 GENGRAPH(select_loops
, "select_loops", "System Select Loop calls/sec");
1764 GENGRAPH(cputime
, "cputime", "CPU utilisation");
1767 #endif /* STAT_GRAPHS */
1770 statMemoryAccounted(void)
1772 return memPoolsTotalAllocated();