delete map; // we just wanted to initialize shared memory segments
}
-MemStore::MemStore(): map(NULL)
+MemStore::MemStore(): map(NULL), cur_size(0)
{
}
const int limit = map->entryLimit();
storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
if (limit > 0) {
- const int entryCount = map->entryCount();
- storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
- entryCount, (100.0 * entryCount / limit));
+ storeAppendPrintf(&e, "Current entries: %"PRId64" %.2f%%\n",
+ currentCount(), (100.0 * currentCount() / limit));
if (limit < 100) { // XXX: otherwise too expensive to count
Ipc::ReadWriteLockStats stats;
return 0; // XXX: make configurable
}
+uint64_t
+MemStore::currentSize() const
+{
+ return cur_size >> 10;
+}
+
+uint64_t
+MemStore::currentCount() const
+{
+ return map ? map->entryCount() : 0;
+}
+
void
MemStore::updateSize(int64_t eSize, int sign)
{
debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
" in " << page);
+ cur_size += eSize;
// remember storage location and size
extras.page = page;
extras.storedSize = copied;
MemStore::cleanReadable(const sfileno fileno)
{
Ipc::Mem::PutPage(map->extras(fileno).page);
+ cur_size -= map->extras(fileno).storedSize;
}
/// calculates maximum number of entries we need to store and map
virtual void init();
virtual uint64_t maxSize() const;
virtual uint64_t minSize() const;
+ virtual uint64_t currentSize() const;
+ virtual uint64_t currentCount() const;
virtual void stat(StoreEntry &) const;
virtual StoreSearch *search(String const url, HttpRequest *);
virtual void reference(StoreEntry &);
private:
MemStoreMap *map; ///< index of mem-cached entries
+ uint64_t cur_size; ///< currently used space in the storage area
};
// Why use Store as a base? MemStore and SwapDir are both "caches".
/** The minimum size the store will shrink to via normal housekeeping */
virtual uint64_t minSize() const = 0;
+ /** current store size in kiloBytes */
+ virtual uint64_t currentSize() const = 0; // TODO: return size in bytes
+
+ /** the total number of objects stored */
+ virtual uint64_t currentCount() const = 0;
+
/**
* Output stats to the provided store entry.
\todo make these calls asynchronous
virtual uint64_t minSize() const;
+ virtual uint64_t currentSize() const;
+
+ virtual uint64_t currentCount() const;
+
virtual void stat(StoreEntry&) const;
virtual void reference(StoreEntry&);
#include "ConfigOption.h"
SwapDir::SwapDir(char const *aType): theType(aType),
- cur_size (0), max_size(0),
+ cur_size (0), max_size(0), n_disk_objects(0),
path(NULL), index(-1), min_objsize(0), max_objsize (-1),
repl(NULL), removals(0), scanned(0),
cleanLog(NULL)
void
SwapDir::stat(StoreEntry &output) const
{
+ if (!doReportStat())
+ return;
+
storeAppendPrintf(&output, "Store Directory #%d (%s): %s\n", index, type(),
path);
storeAppendPrintf(&output, "FS Block Size %d Bytes\n",
virtual uint64_t minSize() const;
+ virtual uint64_t currentSize() const;
+
+ virtual uint64_t currentCount() const;
+
virtual void stat(StoreEntry &) const;
virtual void sync(); /* Sync the store prior to shutdown */
virtual bool needsDiskStrand() const; ///< needs a dedicated kid process
virtual bool active() const; ///< may be used in this strand
+ /// whether stat should be reported by this SwapDir
+ virtual bool doReportStat() const { return active(); }
/* official Store interface functions */
virtual void diskFull();
virtual uint64_t maxSize() const { return max_size;}
virtual uint64_t minSize() const;
+
+ virtual uint64_t currentSize() const { return cur_size; }
+
+ virtual uint64_t currentCount() const { return n_disk_objects; }
+
virtual void stat (StoreEntry &anEntry) const;
virtual StoreSearch *search(String const url, HttpRequest *) = 0;
char const *theType;
public:
- uint64_t cur_size; ///< currently used space in the storage area
- uint64_t max_size; ///< maximum allocatable size of the storage area
+ // TODO: store cur_size and max_size in bytes
+ uint64_t cur_size; ///< currently used space in the storage area in kiloBytes
+ uint64_t max_size; ///< maximum allocatable size of the storage area in kiloBytes
+ uint64_t n_disk_objects; ///< total number of objects stored
char *path;
int index; /* This entry's index into the swapDirs array */
int64_t min_objsize;
/*
* Make sure we don't unlink the file, it might be
* in use by a subsequent entry. Also note that
- * we don't have to subtract from store_swap_size
- * because adding to store_swap_size happens in
- * the cleanup procedure.
+ * we don't have to subtract from cur_size because
+ * adding to cur_size happens in the cleanup procedure.
*/
e->expireNow();
e->releaseRequest();
continue;
}
- /* update store_swap_size */
rb->counts.objcount++;
e = storeCossAddDiskRestore(rb->sd, s.key,
e.swap_status = SWAPOUT_NONE;
}
+uint64_t
+Rock::SwapDir::currentSize() const
+{
+ return (HeaderSize + max_objsize * currentCount()) >> 10;
+}
+
+uint64_t
+Rock::SwapDir::currentCount() const
+{
+ return map ? map->entryCount() : 0;
+}
+
+/// In SMP mode only the disker process reports stats to avoid
+/// counting the same stats by multiple processes.
+bool
+Rock::SwapDir::doReportStat() const
+{
+ return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
+}
+
// TODO: encapsulate as a tool; identical to CossSwapDir::create()
void
Rock::SwapDir::create()
debugs(47, 0, "WARNING: Rock store config wastes space.");
}
*/
-
- // XXX: misplaced, map is not yet created
- //cur_size = (HeaderSize + max_objsize * map->entryCount()) >> 10;
}
void
if (!map)
map = new DirMap(path);
- cur_size = (HeaderSize + max_objsize * map->entryCount()) >> 10;
-
// TODO: lower debugging level
debugs(47,1, "Rock cache_dir[" << index << "] limits: " <<
std::setw(12) << maximumSize() << " disk bytes and " <<
map->free(sio.swap_filen); // will mark as unusable, just in case
}
- // TODO: always compute cur_size based on map, do not store it
- cur_size = (HeaderSize + max_objsize * map->entryCount()) >> 10;
assert(sio.diskOffset + sio.offset_ <= diskOffsetLimit()); // post-factum
sio.finishedWriting(errflag);
void
Rock::SwapDir::updateSize(int64_t size, int sign)
{
- // it is not clear what store_swap_size really is; TODO: move low-level
- // size maintenance to individual store dir types
- cur_size = (HeaderSize + max_objsize * map->entryCount()) >> 10;
- store_swap_size = cur_size;
-
- if (sign > 0)
- ++n_disk_objects;
- else if (sign < 0)
- --n_disk_objects;
+ // stats are not stored but computed when needed
}
// storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
if (StoreController::store_dirs_rebuilding)
return;
- debugs(47,3, HERE << "cache_dir[" << index << "] state: " <<
- map->full() << ' ' << currentSize() << " < " << diskOffsetLimit());
+ debugs(47,3, HERE << "cache_dir[" << index << "] state: " << map->full() <<
+ ' ' << (currentSize() << 10) << " < " << diskOffsetLimit());
// Hopefully, we find a removable entry much sooner (TODO: use time?)
const int maxProbed = 10000;
{
storeAppendPrintf(&e, "\n");
storeAppendPrintf(&e, "Maximum Size: %"PRIu64" KB\n", max_size);
- storeAppendPrintf(&e, "Current Size: %"PRIu64" KB %.2f%%\n", cur_size,
- 100.0 * cur_size / max_size);
+ storeAppendPrintf(&e, "Current Size: %"PRIu64" KB %.2f%%\n",
+ currentSize(), 100.0 * currentSize() / max_size);
if (map) {
const int limit = map->entryLimit();
virtual StoreSearch *search(String const url, HttpRequest *);
virtual StoreEntry *get(const cache_key *key);
virtual void disconnect(StoreEntry &e);
+ virtual uint64_t currentSize() const;
+ virtual uint64_t currentCount() const;
+ virtual bool doReportStat() const;
protected:
/* protected ::SwapDir API */
void ignoreReferences(StoreEntry &e); ///< delete from repl policy scope
// TODO: change cur_size and max_size type to stop this madness
- int64_t currentSize() const { return static_cast<int64_t>(cur_size) << 10;}
int64_t maximumSize() const { return static_cast<int64_t>(max_size) << 10;}
int64_t diskOffset(int filen) const;
int64_t diskOffsetLimit() const;
/*
* Make sure we don't unlink the file, it might be
* in use by a subsequent entry. Also note that
- * we don't have to subtract from store_swap_size
- * because adding to store_swap_size happens in
- * the cleanup procedure.
+ * we don't have to subtract from cur_size because
+ * adding to cur_size happens in the cleanup procedure.
*/
currentEntry()->expireNow();
currentEntry()->releaseRequest();
(void) 0;
}
- /* update store_swap_size */
counts.objcount++;
currentEntry(sd->addDiskRestore(swapData.key,
extern char *snmp_agentinfo;
#endif
- extern int n_disk_objects; /* 0 */
extern iostats IOStats;
extern struct acl_deny_info_list *DenyInfoList; /* NULL */
extern int starting_up; /* 1 */
extern int shutting_down; /* 0 */
extern int reconfiguring; /* 0 */
- extern unsigned long store_swap_size; /* 0 */
extern time_t hit_only_mode_until; /* 0 */
extern StatCounters statCounter;
extern double request_failure_ratio; /* 0.0 */
{
debugs(16, 5, HERE);
Must(entry != NULL);
- if (UsingSmp() && IamWorkerProcess())
+ if (UsingSmp())
storeAppendPrintf(entry, "by kid%d {\n", KidIdentifier);
handler(entry);
- if (atomic() && UsingSmp() && IamWorkerProcess())
+ if (atomic() && UsingSmp())
storeAppendPrintf(entry, "} by kid%d\n\n", KidIdentifier);
}
Must(entry != NULL);
#if XMALLOC_STATISTICS
- if (UsingSmp() && IamWorkerProcess())
+ if (UsingSmp())
storeAppendPrintf(entry, "by kid%d {\n", KidIdentifier);
DumpMallocStatistics(entry);
- if (UsingSmp() && IamWorkerProcess())
+ if (UsingSmp())
storeAppendPrintf(entry, "} by kid%d\n\n", KidIdentifier);
#endif
if (IamPrimaryProcess())
case SYSSTOR:
Answer = snmp_var_new_integer(Var->name, Var->name_length,
- store_swap_size,
+ Store::Root().currentSize(),
ASN_INTEGER);
break;
case PERF_PROTOSTAT_AGGR_CURSWAP:
Answer = snmp_var_new_integer(Var->name, Var->name_length,
- (snint) store_swap_size,
+ (snint) Store::Root().currentSize(),
SMI_GAUGE32);
break;
stats.request_hit_disk_ratio5 = statRequestHitDiskRatio(5);
stats.request_hit_disk_ratio60 = statRequestHitDiskRatio(60);
- stats.store_swap_size = store_swap_size;
+ stats.store_swap_size = Store::Root().currentSize();
stats.store_swap_max_size = Store::Root().maxSize();
stats.store_mem_size = mem_node::StoreMemSize();
stats.store_pages_max = store_pages_max;
stats.store_mem_used = mem_node::InUseCount();
- stats.objects_size = n_disk_objects ? (double) store_swap_size / n_disk_objects : 0.0;
+ stats.n_disk_objects = Store::Root().currentCount();
+ stats.objects_size = stats.n_disk_objects > 0 ?
+ (double)Store::Root().currentSize() / stats.n_disk_objects : 0.0;
stats.unlink_requests = statCounter.unlink.requests;
stats.store_entries = StoreEntry::inUseCount();
stats.store_mem_entries = MemObject::inUseCount();
stats.hot_obj_count = hot_obj_count;
- stats.n_disk_objects = n_disk_objects;
}
void
/* this should be emitted by the oversize dir, not globally */
- if (store_swap_size > Store::Root().maxSize()) {
+ if (Store::Root().currentSize() > Store::Root().maxSize()) {
if (squid_curtime - last_warn_time > 10) {
- debugs(20, 0, "WARNING: Disk space over limit: " << store_swap_size << " KB > "
+ debugs(20, 0, "WARNING: Disk space over limit: "
+ << Store::Root().currentSize() << " KB > "
<< Store::Root().maxSize() << " KB");
last_warn_time = squid_curtime;
}
* number of _entries_ we want to pre-allocate for.
*/
const int hi_cap = Store::Root().maxSize() / Config.Store.avgObjectSize;
- const int lo_cap = 1 + store_swap_size / Config.Store.avgObjectSize;
+ const int lo_cap = 1 + Store::Root().currentSize() / Config.Store.avgObjectSize;
const int e_count = StoreEntry::inUseCount();
int cap = e_count ? e_count :hi_cap;
debugs(71, 2, "storeDigestCalcCap: have: " << e_count << ", want " << cap <<
int64_t blks = (size + fs.blksize - 1) / fs.blksize;
int64_t k = ((blks * fs.blksize) >> 10) * sign;
cur_size += k;
- store_swap_size += k;
if (sign > 0)
n_disk_objects++;
storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
maxSize());
storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
- store_swap_size);
+ currentSize());
storeAppendPrintf(&output, "Current Capacity : %"PRId64"%% used, %"PRId64"%% free\n",
- Math::int64Percent(store_swap_size, maxSize()),
- Math::int64Percent((maxSize() - store_swap_size), maxSize()));
+ Math::int64Percent(currentSize(), maxSize()),
+ Math::int64Percent((maxSize() - currentSize()), maxSize()));
if (memStore)
memStore->stat(output);
return swapDir->minSize();
}
+uint64_t
+StoreController::currentSize() const
+{
+ return swapDir->currentSize();
+}
+
+uint64_t
+StoreController::currentCount() const
+{
+ return swapDir->currentCount();
+}
+
void
SwapDir::diskFull()
{
{
uint64_t result = 0;
- for (int i = 0; i < Config.cacheSwap.n_configured; i++)
- result += store(i)->maxSize();
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
+ if (dir(i).doReportStat())
+ result += store(i)->maxSize();
+ }
return result;
}
{
uint64_t result = 0;
- for (int i = 0; i < Config.cacheSwap.n_configured; i++)
- result += store(i)->minSize();
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
+ if (dir(i).doReportStat())
+ result += store(i)->minSize();
+ }
+
+ return result;
+}
+
+uint64_t
+StoreHashIndex::currentSize() const
+{
+ uint64_t result = 0;
+
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
+ if (dir(i).doReportStat())
+ result += store(i)->currentSize();
+ }
+
+ return result;
+}
+
+uint64_t
+StoreHashIndex::currentCount() const
+{
+ uint64_t result = 0;
+
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
+ if (dir(i).doReportStat())
+ result += store(i)->currentCount();
+ }
return result;
}
if (currentSearch->isDone()) {
debugs(20, 1, " Completed Validation Procedure");
debugs(20, 1, " Validated " << validated << " Entries");
- debugs(20, 1, " store_swap_size = " << store_swap_size);
+ debugs(20, 1, " store_swap_size = " << Store::Root().currentSize());
StoreController::store_dirs_rebuilding--;
assert(0 == StoreController::store_dirs_rebuilding);