From: Alex Rousskov Date: Wed, 5 Dec 2012 19:22:51 +0000 (-0700) Subject: Use newly added slot-size cache_dir parameter instead of abusing max-size. X-Git-Tag: SQUID_3_5_0_1~444^2~95 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e51ce7da04cc204b17968007ced10f0fbee6af1f;p=thirdparty%2Fsquid.git Use newly added slot-size cache_dir parameter instead of abusing max-size. Use unique suffixes for inode and free slot index names. --- diff --git a/src/cf.data.pre b/src/cf.data.pre index 3fe491d51b..e4b28735b5 100644 --- a/src/cf.data.pre +++ b/src/cf.data.pre @@ -3202,13 +3202,11 @@ DOC_START The rock store type: - cache_dir rock Directory-Name Mbytes [options] + cache_dir rock Directory-Name Mbytes [options] The Rock Store type is a database-style storage. All cached - entries are stored in a "database" file, using fixed-size slots, - one entry per slot. The database size is specified in MB. The - slot size is specified in bytes using the max-size option. See - below for more info on the max-size option. + entries are stored in a "database" file, using fixed-size slots. + A single entry occupies one or more slots. swap-timeout=msec: Squid will not start writing a miss to or reading a hit from disk if it estimates that the swap operation @@ -3233,6 +3231,16 @@ DOC_START and when set to zero, disables the disk I/O rate limit enforcement. Currently supported by IpcIo module only. + slot-size=bytes: The size of a database "record" used for + storing cached responses. A cached response occupies at least + one slot and all database I/O is done using individual slots so + increasing this parameter leads to more disk space waste while + decreasing it leads to more disk I/O overheads. Should be a + multiple of your operating system I/O page size. Defaults to + 16KBytes. A housekeeping header is stored with each slot and + smaller slot-sizes will be rejected. The header is smaller than + 100 bytes. + The coss store type: diff --git a/src/fs/rock/RockIoState.cc b/src/fs/rock/RockIoState.cc index 2b36d0f44a..9c2612eb4e 100644 --- a/src/fs/rock/RockIoState.cc +++ b/src/fs/rock/RockIoState.cc @@ -20,7 +20,7 @@ Rock::IoState::IoState(SwapDir &aDir, void *data): dbSlot(NULL), dir(aDir), - slotSize(dir.max_objsize), + slotSize(dir.slotSize), objOffset(0) { e = anEntry; diff --git a/src/fs/rock/RockRebuild.cc b/src/fs/rock/RockRebuild.cc index 845d90662d..3705ad981e 100644 --- a/src/fs/rock/RockRebuild.cc +++ b/src/fs/rock/RockRebuild.cc @@ -34,7 +34,7 @@ Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"), assert(sd); memset(&counts, 0, sizeof(counts)); dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste - dbEntrySize = sd->max_objsize; + dbEntrySize = sd->slotSize; dbEntryLimit = sd->entryLimit(); loaded.reserve(dbSize); for (size_t i = 0; i < loaded.size(); ++i) diff --git a/src/fs/rock/RockSwapDir.cc b/src/fs/rock/RockSwapDir.cc index e82898a52b..7f06c2fdf1 100644 --- a/src/fs/rock/RockSwapDir.cc +++ b/src/fs/rock/RockSwapDir.cc @@ -30,7 +30,8 @@ const int64_t Rock::SwapDir::HeaderSize = 16*1024; -Rock::SwapDir::SwapDir(): ::SwapDir("rock"), filePath(NULL), io(NULL), map(NULL) +Rock::SwapDir::SwapDir(): ::SwapDir("rock"), + slotSize(HeaderSize), filePath(NULL), io(NULL), map(NULL), dbSlots(NULL) { } @@ -115,7 +116,10 @@ void Rock::SwapDir::disconnect(StoreEntry &e) uint64_t Rock::SwapDir::currentSize() const { - return HeaderSize + max_objsize * currentCount(); + const uint64_t spaceSize = !dbSlotIndex ? + maxSize() : (slotSize * dbSlotIndex->size()); + // everything that is not free is in use + return maxSize() - spaceSize; } uint64_t @@ -142,7 +146,7 @@ int64_t Rock::SwapDir::entryLimitAllowed() const { const int64_t eLimitLo = map ? map->entryLimit() : 0; // dynamic shrinking unsupported - const int64_t eWanted = (maxSize() - HeaderSize)/maxObjectSize(); + const int64_t eWanted = (maxSize() - HeaderSize)/slotSize; return min(max(eLimitLo, eWanted), entryLimitHigh()); } @@ -302,6 +306,7 @@ Rock::SwapDir::getOptionTree() const { ConfigOptionVector *vector = dynamic_cast(::SwapDir::getOptionTree()); assert(vector); + vector->options.push_back(new ConfigOptionAdapter(*const_cast(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption)); vector->options.push_back(new ConfigOptionAdapter(*const_cast(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption)); vector->options.push_back(new ConfigOptionAdapter(*const_cast(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption)); return vector; @@ -310,7 +315,7 @@ Rock::SwapDir::getOptionTree() const bool Rock::SwapDir::allowOptionReconfigure(const char *const option) const { - return strcmp(option, "max-size") != 0 && + return strcmp(option, "slot-size") != 0 && ::SwapDir::allowOptionReconfigure(option); } @@ -319,7 +324,7 @@ bool Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfiguring) { // TODO: ::SwapDir or, better, Config should provide time-parsing routines, - // including time unit handling. Same for size. + // including time unit handling. Same for size and rate. time_msec_t *storedTime; if (strcmp(option, "swap-timeout") == 0) @@ -405,17 +410,60 @@ Rock::SwapDir::dumpRateOption(StoreEntry * e) const storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate); } +/// parses size-specific options; mimics ::SwapDir::optionObjectSizeParse() +bool +Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfiguring) +{ + uint64_t *storedSize; + if (strcmp(option, "slot-size") == 0) + storedSize = &slotSize; + else + return false; + + if (!value) + self_destruct(); + + // TODO: handle size units and detect parsing errors better + const uint64_t newSize = strtoll(value, NULL, 10); + if (newSize <= 0) { + debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize); + self_destruct(); + } + + if (newSize <= sizeof(DbCellHeader)) { + debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize); + self_destruct(); + } + + if (!reconfiguring) + *storedSize = newSize; + else if (*storedSize != newSize) { + debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option + << " cannot be changed dynamically, value left unchanged: " << + *storedSize); + } + + return true; +} + +/// reports size-specific options; mimics ::SwapDir::optionObjectSizeDump() +void +Rock::SwapDir::dumpSizeOption(StoreEntry * e) const +{ + storeAppendPrintf(e, " slot-size=%" PRId64, slotSize); +} + /// check the results of the configuration; only level-0 debugging works here void Rock::SwapDir::validateOptions() { - if (max_objsize <= 0) - fatal("Rock store requires a positive max-size"); + if (slotSize <= 0) + fatal("Rock store requires a positive slot-size"); const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB - const int64_t maxObjectSizeRoundingWaste = maxObjectSize(); + const int64_t slotSizeRoundingWaste = slotSize; const int64_t maxRoundingWaste = - max(maxSizeRoundingWaste, maxObjectSizeRoundingWaste); + max(maxSizeRoundingWaste, slotSizeRoundingWaste); const int64_t usableDiskSize = diskOffset(entryLimitAllowed()); const int64_t diskWasteSize = maxSize() - usableDiskSize; Must(diskWasteSize >= 0); @@ -425,7 +473,7 @@ Rock::SwapDir::validateOptions() diskWasteSize >= maxRoundingWaste) { debugs(47, DBG_CRITICAL, "Rock store cache_dir[" << index << "] '" << path << "':"); debugs(47, DBG_CRITICAL, "\tmaximum number of entries: " << entryLimitAllowed()); - debugs(47, DBG_CRITICAL, "\tmaximum object size: " << maxObjectSize() << " Bytes"); + debugs(47, DBG_CRITICAL, "\tdb slot size: " << slotSize << " Bytes"); debugs(47, DBG_CRITICAL, "\tmaximum db size: " << maxSize() << " Bytes"); debugs(47, DBG_CRITICAL, "\tusable db size: " << usableDiskSize << " Bytes"); debugs(47, DBG_CRITICAL, "\tdisk space waste: " << diskWasteSize << " Bytes"); @@ -529,7 +577,7 @@ int64_t Rock::SwapDir::diskOffset(int filen) const { assert(filen >= 0); - return HeaderSize + max_objsize*filen; + return HeaderSize + slotSize*filen; } int64_t @@ -549,7 +597,7 @@ Rock::SwapDir::diskOffsetLimit() const int Rock::SwapDir::entryMaxPayloadSize() const { - return max_objsize - sizeof(DbCellHeader); + return slotSize - sizeof(DbCellHeader); } int @@ -882,13 +930,19 @@ void Rock::SwapDirRr::create(const RunnerRegistry &) for (int i = 0; i < Config.cacheSwap.n_configured; ++i) { if (const Rock::SwapDir *const sd = dynamic_cast(INDEXSD(i))) { const int64_t capacity = sd->entryLimitAllowed(); + + String inodesPath = sd->path; + inodesPath.append("_inodes"); SwapDir::DirMap::Owner *const mapOwner = - SwapDir::DirMap::Init(sd->path, capacity); + SwapDir::DirMap::Init(inodesPath.termedBuf(), capacity); mapOwners.push_back(mapOwner); + String spacesPath = sd->path; + spacesPath.append("_spaces"); // XXX: remove pool id and counters from PageStack Ipc::Mem::Owner *const dbSlotsOwner = - shm_new(Ipc::Mem::PageStack)(sd->path, i, capacity, + shm_new(Ipc::Mem::PageStack)(spacesPath.termedBuf(), + i, capacity, sizeof(DbCellHeader)); dbSlotsOwners.push_back(dbSlotsOwner); diff --git a/src/fs/rock/RockSwapDir.h b/src/fs/rock/RockSwapDir.h index bd2739d010..50352aa015 100644 --- a/src/fs/rock/RockSwapDir.h +++ b/src/fs/rock/RockSwapDir.h @@ -38,9 +38,6 @@ public: virtual void create(); virtual void parse(int index, char *path); - // XXX: stop misusing max_objsize as slot size - virtual int64_t maxObjectSize() const { return max_objsize * entryLimitAllowed(); } - int64_t entryLimitHigh() const { return SwapFilenMax; } ///< Core limit int64_t entryLimitAllowed() const; @@ -59,6 +56,8 @@ public: }; typedef Ipc::StoreMapWithExtras DirMap; + uint64_t slotSize; ///< all db slots are of this size + protected: /* protected ::SwapDir API */ virtual bool needsDiskStrand() const; @@ -88,6 +87,8 @@ protected: void dumpTimeOption(StoreEntry * e) const; bool parseRateOption(char const *option, const char *value, int reconfiguring); void dumpRateOption(StoreEntry * e) const; + bool parseSizeOption(char const *option, const char *value, int reconfiguring); + void dumpSizeOption(StoreEntry * e) const; void rebuild(); ///< starts loading and validating stored entry metadata