/*
- * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
+ * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
/* LOCALS */
static const char *describeStatuses(const StoreEntry *);
-static const char *describeTimestamps(const StoreEntry *);
static void statAvgTick(void *notused);
static void statAvgDump(StoreEntry *, int minutes, int hours);
#if STAT_GRAPHS
static void statGraphDump(StoreEntry *);
#endif
-static void statCountersInit(StatCounters *);
-static void statCountersInitSpecial(StatCounters *);
-static void statCountersClean(StatCounters *);
-static void statCountersCopy(StatCounters * dest, const StatCounters * orig);
static double statPctileSvc(double, int, int);
static void statStoreEntry(MemBuf * mb, StoreEntry * e);
static double statCPUUsage(int minutes);
if (EBIT_TEST(flags, ENTRY_SPECIAL))
strcat(buf, "SPECIAL,");
- if (EBIT_TEST(flags, ENTRY_REVALIDATE))
- strcat(buf, "REVALIDATE,");
+ if (EBIT_TEST(flags, ENTRY_REVALIDATE_ALWAYS))
+ strcat(buf, "REVALIDATE_ALWAYS,");
if (EBIT_TEST(flags, DELAY_SENDING))
strcat(buf, "DELAY_SENDING,");
if (EBIT_TEST(flags, REFRESH_REQUEST))
strcat(buf, "REFRESH_REQUEST,");
+ if (EBIT_TEST(flags, ENTRY_REVALIDATE_STALE))
+ strcat(buf, "REVALIDATE_STALE,");
+
if (EBIT_TEST(flags, ENTRY_DISPATCHED))
strcat(buf, "DISPATCHED,");
return buf;
}
-static const char *
-describeTimestamps(const StoreEntry * entry)
-{
- LOCAL_ARRAY(char, buf, 256);
- snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
- (int) entry->timestamp,
- (int) entry->lastref,
- (int) entry->lastmod,
- (int) entry->expires);
- return buf;
-}
-
static void
statStoreEntry(MemBuf * mb, StoreEntry * e)
{
mb->appendf("KEY %s\n", e->getMD5Text());
mb->appendf("\t%s\n", describeStatuses(e));
mb->appendf("\t%s\n", storeEntryFlags(e));
- mb->appendf("\t%s\n", describeTimestamps(e));
+ mb->appendf("\t%s\n", e->describeTimestamps());
mb->appendf("\t%d locks, %d clients, %d refs\n", (int) e->locks(), storePendingNClients(e), (int) e->refcount);
mb->appendf("\tSwap Dir %d, File %#08X\n", e->swap_dirn, e->swap_filen);
stats.swap_files_cleaned = XAVG(swap.files_cleaned);
stats.aborted_requests = XAVG(aborted_requests);
+ stats.hitValidationAttempts = XAVG(hitValidation.attempts);
+ stats.hitValidationRefusalsDueToLocking = XAVG(hitValidation.refusalsDueToLocking);
+ stats.hitValidationRefusalsDueToZeroSize = XAVG(hitValidation.refusalsDueToZeroSize);
+ stats.hitValidationRefusalsDueToTimeLimit = XAVG(hitValidation.refusalsDueToTimeLimit);
+ stats.hitValidationFailures = XAVG(hitValidation.failures);
+
stats.syscalls_disk_opens = XAVG(syscalls.disk.opens);
stats.syscalls_disk_closes = XAVG(syscalls.disk.closes);
stats.syscalls_disk_reads = XAVG(syscalls.disk.reads);
storeAppendPrintf(sentry, "aborted_requests = %f/sec\n",
stats.aborted_requests);
+ storeAppendPrintf(sentry, "hit_validation.attempts = %f/sec\n",
+ stats.hitValidationAttempts);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_locking = %f/sec\n",
+ stats.hitValidationRefusalsDueToLocking);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_zeroSize = %f/sec\n",
+ stats.hitValidationRefusalsDueToZeroSize);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_timeLimit = %f/sec\n",
+ stats.hitValidationRefusalsDueToTimeLimit);
+ storeAppendPrintf(sentry, "hit_validation.failures = %f/sec\n",
+ stats.hitValidationFailures);
+
#if USE_POLL
storeAppendPrintf(sentry, "syscalls.polls = %f/sec\n", stats.syscalls_selects);
#elif defined(USE_SELECT) || defined(USE_SELECT_WIN32)
#endif
}
+/* add special cases here as they arrive */
+static void
+statCountersInitSpecial(StatCounters * C)
+{
+ /*
+ * HTTP svc_time hist is kept in milli-seconds; max of 3 hours.
+ */
+ C->client_http.allSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
+ C->client_http.missSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
+ C->client_http.nearMissSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
+ C->client_http.nearHitSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
+ C->client_http.hitSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
+ /*
+ * ICP svc_time hist is kept in micro-seconds; max of 1 minute.
+ */
+ C->icp.querySvcTime.logInit(300, 0.0, 1000000.0 * 60.0);
+ C->icp.replySvcTime.logInit(300, 0.0, 1000000.0 * 60.0);
+ /*
+ * DNS svc_time hist is kept in milli-seconds; max of 10 minutes.
+ */
+ C->dns.svcTime.logInit(300, 0.0, 60000.0 * 10.0);
+ /*
+ * Cache Digest Stuff
+ */
+ C->cd.on_xition_count.enumInit(CacheDigestHashFuncCount);
+ C->comm_udp_incoming.enumInit(INCOMING_UDP_MAX);
+ C->comm_dns_incoming.enumInit(INCOMING_DNS_MAX);
+ C->comm_tcp_incoming.enumInit(INCOMING_TCP_MAX);
+ C->select_fds_hist.enumInit(256); /* was SQUID_MAXFD, but it is way too much. It is OK to crop this statistics */
+}
+
+static void
+statCountersInit(StatCounters * C)
+{
+ assert(C);
+ *C = StatCounters();
+ statCountersInitSpecial(C);
+}
+
void
statInit(void)
{
static void
statAvgTick(void *)
{
- StatCounters *t = &CountHist[0];
- StatCounters *p = &CountHist[1];
- StatCounters *c = &statCounter;
-
struct rusage rusage;
eventAdd("statAvgTick", statAvgTick, NULL, (double) COUNT_INTERVAL, 1);
squid_getrusage(&rusage);
- c->page_faults = rusage_pagefaults(&rusage);
- c->cputime = rusage_cputime(&rusage);
- c->timestamp = current_time;
- /* even if NCountHist is small, we already Init()ed the tail */
- statCountersClean(CountHist + N_COUNT_HIST - 1);
- memmove(p, t, (N_COUNT_HIST - 1) * sizeof(StatCounters));
- statCountersCopy(t, c);
+ statCounter.page_faults = rusage_pagefaults(&rusage);
+ statCounter.cputime = rusage_cputime(&rusage);
+ statCounter.timestamp = current_time;
+ // shift all elements right and prepend statCounter
+ for(int i = N_COUNT_HIST-1; i > 0; --i)
+ CountHist[i] = CountHist[i-1];
+ CountHist[0] = statCounter;
++NCountHist;
if ((NCountHist % COUNT_INTERVAL) == 0) {
/* we have an hours worth of readings. store previous hour */
- StatCounters *t2 = &CountHourHist[0];
- StatCounters *p2 = &CountHourHist[1];
- StatCounters *c2 = &CountHist[N_COUNT_HIST - 1];
- statCountersClean(CountHourHist + N_COUNT_HOUR_HIST - 1);
- memmove(p2, t2, (N_COUNT_HOUR_HIST - 1) * sizeof(StatCounters));
- statCountersCopy(t2, c2);
+ // shift all elements right and prepend final CountHist element
+ for(int i = N_COUNT_HOUR_HIST-1; i > 0; --i)
+ CountHourHist[i] = CountHourHist[i-1];
+ CountHourHist[0] = CountHist[N_COUNT_HIST - 1];
++NCountHourHist;
}
i /= (int) dt;
if (Config.warnings.high_pf < i)
- debugs(18, DBG_CRITICAL, "WARNING: Page faults occuring at " << i << "/sec");
+ debugs(18, DBG_CRITICAL, "WARNING: Page faults occurring at " << i << "/sec");
}
}
}
}
-static void
-statCountersInit(StatCounters * C)
-{
- assert(C);
- memset(C, 0, sizeof(*C));
- C->timestamp = current_time;
- statCountersInitSpecial(C);
-}
-
-/* add special cases here as they arrive */
-static void
-statCountersInitSpecial(StatCounters * C)
-{
- /*
- * HTTP svc_time hist is kept in milli-seconds; max of 3 hours.
- */
- C->client_http.allSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
- C->client_http.missSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
- C->client_http.nearMissSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
- C->client_http.nearHitSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
- C->client_http.hitSvcTime.logInit(300, 0.0, 3600000.0 * 3.0);
- /*
- * ICP svc_time hist is kept in micro-seconds; max of 1 minute.
- */
- C->icp.querySvcTime.logInit(300, 0.0, 1000000.0 * 60.0);
- C->icp.replySvcTime.logInit(300, 0.0, 1000000.0 * 60.0);
- /*
- * DNS svc_time hist is kept in milli-seconds; max of 10 minutes.
- */
- C->dns.svcTime.logInit(300, 0.0, 60000.0 * 10.0);
- /*
- * Cache Digest Stuff
- */
- C->cd.on_xition_count.enumInit(CacheDigestHashFuncCount);
- C->comm_udp_incoming.enumInit(INCOMING_UDP_MAX);
- C->comm_dns_incoming.enumInit(INCOMING_DNS_MAX);
- C->comm_tcp_incoming.enumInit(INCOMING_TCP_MAX);
- C->select_fds_hist.enumInit(256); /* was SQUID_MAXFD, but it is way too much. It is OK to crop this statistics */
-}
-
-/* add special cases here as they arrive */
-static void
-statCountersClean(StatCounters * C)
-{
- assert(C);
- C->client_http.allSvcTime.clear();
- C->client_http.missSvcTime.clear();
- C->client_http.nearMissSvcTime.clear();
- C->client_http.nearHitSvcTime.clear();
- C->client_http.hitSvcTime.clear();
- C->icp.querySvcTime.clear();
- C->icp.replySvcTime.clear();
- C->dns.svcTime.clear();
- C->cd.on_xition_count.clear();
- C->comm_udp_incoming.clear();
- C->comm_dns_incoming.clear();
- C->comm_tcp_incoming.clear();
- C->select_fds_hist.clear();
-}
-
-/* add special cases here as they arrive */
-static void
-statCountersCopy(StatCounters * dest, const StatCounters * orig)
-{
- assert(dest && orig);
- /* this should take care of all the fields, but "special" ones */
- memcpy(dest, orig, sizeof(*dest));
- /* prepare space where to copy special entries */
- statCountersInitSpecial(dest);
- /* now handle special cases */
- /* note: we assert that histogram capacities do not change */
- dest->client_http.allSvcTime=orig->client_http.allSvcTime;
- dest->client_http.missSvcTime=orig->client_http.missSvcTime;
- dest->client_http.nearMissSvcTime=orig->client_http.nearMissSvcTime;
- dest->client_http.nearHitSvcTime=orig->client_http.nearHitSvcTime;
-
- dest->client_http.hitSvcTime=orig->client_http.hitSvcTime;
- dest->icp.querySvcTime=orig->icp.querySvcTime;
- dest->icp.replySvcTime=orig->icp.replySvcTime;
- dest->dns.svcTime=orig->dns.svcTime;
- dest->cd.on_xition_count=orig->cd.on_xition_count;
- dest->comm_udp_incoming=orig->comm_udp_incoming;
- dest->comm_dns_incoming=orig->comm_dns_incoming;
- dest->comm_tcp_incoming=orig->comm_tcp_incoming;
- dest->select_fds_hist=orig->select_fds_hist;
-}
-
static void
statCountersHistograms(StoreEntry * sentry)
{
stats.swap_ins = f->swap.ins;
stats.swap_files_cleaned = f->swap.files_cleaned;
stats.aborted_requests = f->aborted_requests;
+
+ stats.hitValidationAttempts = f->hitValidation.attempts;
+ stats.hitValidationRefusalsDueToLocking = f->hitValidation.refusalsDueToLocking;
+ stats.hitValidationRefusalsDueToZeroSize = f->hitValidation.refusalsDueToZeroSize;
+ stats.hitValidationRefusalsDueToTimeLimit = f->hitValidation.refusalsDueToTimeLimit;
+ stats.hitValidationFailures = f->hitValidation.failures;
}
void
stats.swap_files_cleaned);
storeAppendPrintf(sentry, "aborted_requests = %.0f\n",
stats.aborted_requests);
+
+ storeAppendPrintf(sentry, "hit_validation.attempts = %.0f\n",
+ stats.hitValidationAttempts);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_locking = %.0f\n",
+ stats.hitValidationRefusalsDueToLocking);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_zeroSize = %.0f\n",
+ stats.hitValidationRefusalsDueToZeroSize);
+ storeAppendPrintf(sentry, "hit_validation.refusals.due_to_timeLimit = %.0f\n",
+ stats.hitValidationRefusalsDueToTimeLimit);
+ storeAppendPrintf(sentry, "hit_validation.failures = %.0f\n",
+ stats.hitValidationFailures);
}
void
statFreeMemory(void)
{
- int i;
+ // TODO: replace with delete[]
+ for (int i = 0; i < N_COUNT_HIST; ++i)
+ CountHist[i] = StatCounters();
- for (i = 0; i < N_COUNT_HIST; ++i)
- statCountersClean(&CountHist[i]);
-
- for (i = 0; i < N_COUNT_HOUR_HIST; ++i)
- statCountersClean(&CountHourHist[i]);
+ for (int i = 0; i < N_COUNT_HOUR_HIST; ++i)
+ CountHourHist[i] = StatCounters();
}
static void
const int tot_used = f->cd.times_used + f->icp.times_used;
/* totals */
- cacheDigestGuessStatsReport(&f->cd.guess, sentry, "all peers");
+ static const SBuf label("all peers");
+ cacheDigestGuessStatsReport(&f->cd.guess, sentry, label);
/* per-peer */
storeAppendPrintf(sentry, "\nPer-peer statistics:\n");
GENGRAPH(client_http.kbytes_in.kb, "client_http.kbytes_in", "Client HTTP kbytes_in/sec");
GENGRAPH(client_http.kbytes_out.kb, "client_http.kbytes_out", "Client HTTP kbytes_out/sec");
- /* XXX todo: http median service times */
+ // TODO: http median service times
GENGRAPH(server.all.requests, "server.all.requests", "Server requests/sec");
GENGRAPH(server.all.errors, "server.all.errors", "Server errors/sec");
GENGRAPH(icp.kbytes_sent.kb, "icp.kbytes_sent", "ICP kbytes_sent/sec");
GENGRAPH(icp.kbytes_recv.kb, "icp.kbytes_recv", "ICP kbytes_received/sec");
- /* XXX todo: icp median service times */
- /* XXX todo: dns median service times */
+ // TODO: icp median service times
+ // TODO: dns median service times
GENGRAPH(unlink.requests, "unlink.requests", "Cache File unlink requests/sec");
GENGRAPH(page_faults, "page_faults", "System Page Faults/sec");