2 * DEBUG: section 47 Store Directory Routines
7 #include "CollapsedForwarding.h"
8 #include "ConfigOption.h"
9 #include "DiskIO/DiskIOModule.h"
10 #include "DiskIO/DiskIOStrategy.h"
11 #include "DiskIO/ReadRequest.h"
12 #include "DiskIO/WriteRequest.h"
13 #include "fs/rock/RockSwapDir.h"
14 #include "fs/rock/RockIoState.h"
15 #include "fs/rock/RockIoRequests.h"
16 #include "fs/rock/RockRebuild.h"
18 #include "ipc/mem/Pages.h"
19 #include "MemObject.h"
21 #include "SquidConfig.h"
22 #include "SquidMath.h"
32 const int64_t Rock::SwapDir::HeaderSize
= 16*1024;
34 Rock::SwapDir::SwapDir(): ::SwapDir("rock"),
35 slotSize(HeaderSize
), filePath(NULL
), map(NULL
), io(NULL
),
40 Rock::SwapDir::~SwapDir()
48 Rock::SwapDir::search(String
const url
, HttpRequest
*)
51 return NULL
; // XXX: implement
55 Rock::SwapDir::get(String
const key
, STOREGETCLIENT cb
, void *data
)
57 ::SwapDir::get(key
, cb
, data
);
60 // called when Squid core needs a StoreEntry with a given key
62 Rock::SwapDir::get(const cache_key
*key
)
64 if (!map
|| !theFile
|| !theFile
->canRead())
68 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, filen
);
72 const Ipc::StoreMapAnchor::Basics
&basics
= slot
->basics
;
74 // create a brand new store entry and initialize it with stored basics
75 StoreEntry
*e
= new StoreEntry();
78 e
->swap_filen
= filen
;
79 e
->swap_file_sz
= basics
.swap_file_sz
;
80 e
->lastref
= basics
.lastref
;
81 e
->timestamp
= basics
.timestamp
;
82 e
->expires
= basics
.expires
;
83 e
->lastmod
= basics
.lastmod
;
84 e
->refcount
= basics
.refcount
;
85 e
->flags
= basics
.flags
;
86 e
->store_status
= STORE_OK
;
87 e
->setMemStatus(NOT_IN_MEMORY
);
88 e
->swap_status
= SWAPOUT_DONE
;
89 e
->ping_status
= PING_NONE
;
90 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
91 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
92 EBIT_CLR(e
->flags
, KEY_PRIVATE
);
93 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
98 // the disk entry remains open for reading, protected from modifications
101 void Rock::SwapDir::disconnect(StoreEntry
&e
)
103 assert(e
.swap_dirn
== index
);
104 assert(e
.swap_filen
>= 0);
105 // cannot have SWAPOUT_NONE entry with swap_filen >= 0
106 assert(e
.swap_status
!= SWAPOUT_NONE
);
108 // do not rely on e.swap_status here because there is an async delay
109 // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
111 // since e has swap_filen, its slot is locked for either reading or writing
112 map
->abortIo(e
.swap_filen
);
115 e
.swap_status
= SWAPOUT_NONE
;
119 Rock::SwapDir::currentSize() const
121 const uint64_t spaceSize
= !freeSlots
?
122 maxSize() : (slotSize
* freeSlots
->size());
123 // everything that is not free is in use
124 return maxSize() - spaceSize
;
128 Rock::SwapDir::currentCount() const
130 return map
? map
->entryCount() : 0;
133 /// In SMP mode only the disker process reports stats to avoid
134 /// counting the same stats by multiple processes.
136 Rock::SwapDir::doReportStat() const
138 return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
142 Rock::SwapDir::swappedOut(const StoreEntry
&)
144 // stats are not stored but computed when needed
148 Rock::SwapDir::entryLimitAllowed() const
150 const int64_t eLimitLo
= map
? map
->entryLimit() : 0; // dynamic shrinking unsupported
151 const int64_t eWanted
= (maxSize() - HeaderSize
)/slotSize
;
152 return min(max(eLimitLo
, eWanted
), entryLimitHigh());
155 // TODO: encapsulate as a tool; identical to CossSwapDir::create()
157 Rock::SwapDir::create()
162 if (UsingSmp() && !IamDiskProcess()) {
163 debugs (47,3, HERE
<< "disker will create in " << path
);
167 debugs (47,3, HERE
<< "creating in " << path
);
170 if (::stat(path
, &dir_sb
) == 0) {
172 if (::stat(filePath
, &file_sb
) == 0) {
173 debugs (47, DBG_IMPORTANT
, "Skipping existing Rock db: " << filePath
);
176 // else the db file is not there or is not accessible, and we will try
177 // to create it later below, generating a detailed error on failures.
178 } else { // path does not exist or is inaccessible
179 // If path exists but is not accessible, mkdir() below will fail, and
180 // the admin should see the error and act accordingly, so there is
181 // no need to distinguish ENOENT from other possible stat() errors.
182 debugs (47, DBG_IMPORTANT
, "Creating Rock db directory: " << path
);
183 const int res
= mkdir(path
, 0700);
185 createError("mkdir");
188 debugs (47, DBG_IMPORTANT
, "Creating Rock db: " << filePath
);
189 const int swap
= open(filePath
, O_WRONLY
|O_CREAT
|O_TRUNC
|O_BINARY
, 0600);
191 createError("create");
193 #if SLOWLY_FILL_WITH_ZEROS
195 Must(maxSize() % sizeof(block
) == 0);
196 memset(block
, '\0', sizeof(block
));
198 for (off_t offset
= 0; offset
< maxSize(); offset
+= sizeof(block
)) {
199 if (write(swap
, block
, sizeof(block
)) != sizeof(block
))
200 createError("write");
203 if (ftruncate(swap
, maxSize()) != 0)
204 createError("truncate");
206 char header
[HeaderSize
];
207 memset(header
, '\0', sizeof(header
));
208 if (write(swap
, header
, sizeof(header
)) != sizeof(header
))
209 createError("write");
215 // report Rock DB creation error and exit
217 Rock::SwapDir::createError(const char *const msg
) {
218 debugs(47, DBG_CRITICAL
, "ERROR: Failed to initialize Rock Store db in " <<
219 filePath
<< "; " << msg
<< " error: " << xstrerror());
220 fatal("Rock Store db creation error");
224 Rock::SwapDir::init()
228 // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
229 // are refcounted. We up our count once to avoid implicit delete's.
232 freeSlots
= shm_old(Ipc::Mem::PageStack
)(freeSlotsPath());
235 map
= new DirMap(inodeMapPath());
238 const char *ioModule
= needsDiskStrand() ? "IpcIo" : "Blocking";
239 if (DiskIOModule
*m
= DiskIOModule::Find(ioModule
)) {
240 debugs(47,2, HERE
<< "Using DiskIO module: " << ioModule
);
241 io
= m
->createStrategy();
244 debugs(47, DBG_CRITICAL
, "FATAL: Rock store is missing DiskIO module: " <<
246 fatal("Rock Store missing a required DiskIO module");
249 theFile
= io
->newFile(filePath
);
250 theFile
->configure(fileConfig
);
251 theFile
->open(O_RDWR
, 0644, this);
253 // Increment early. Otherwise, if one SwapDir finishes rebuild before
254 // others start, storeRebuildComplete() will think the rebuild is over!
255 // TODO: move store_dirs_rebuilding hack to store modules that need it.
256 ++StoreController::store_dirs_rebuilding
;
260 Rock::SwapDir::needsDiskStrand() const
262 const bool wontEvenWorkWithoutDisker
= Config
.workers
> 1;
263 const bool wouldWorkBetterWithDisker
= DiskIOModule::Find("IpcIo");
264 return InDaemonMode() && (wontEvenWorkWithoutDisker
||
265 wouldWorkBetterWithDisker
);
269 Rock::SwapDir::parse(int anIndex
, char *aPath
)
273 path
= xstrdup(aPath
);
275 // cache store is located at path/db
277 fname
.append("/rock");
278 filePath
= xstrdup(fname
.termedBuf());
283 // Current openForWriting() code overwrites the old slot if needed
284 // and possible, so proactively removing old slots is probably useless.
285 assert(!repl
); // repl = createRemovalPolicy(Config.replPolicy);
291 Rock::SwapDir::reconfigure()
295 // TODO: can we reconfigure the replacement policy (repl)?
299 /// parse maximum db disk size
301 Rock::SwapDir::parseSize(const bool reconfig
)
303 const int i
= GetInteger();
305 fatal("negative Rock cache_dir size value");
306 const uint64_t new_max_size
=
307 static_cast<uint64_t>(i
) << 20; // MBytes to Bytes
309 max_size
= new_max_size
;
310 else if (new_max_size
!= max_size
) {
311 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir '" << path
<< "' size "
312 "cannot be changed dynamically, value left unchanged (" <<
313 (max_size
>> 20) << " MB)");
318 Rock::SwapDir::getOptionTree() const
320 ConfigOptionVector
*vector
= dynamic_cast<ConfigOptionVector
*>(::SwapDir::getOptionTree());
322 vector
->options
.push_back(new ConfigOptionAdapter
<SwapDir
>(*const_cast<SwapDir
*>(this), &SwapDir::parseSizeOption
, &SwapDir::dumpSizeOption
));
323 vector
->options
.push_back(new ConfigOptionAdapter
<SwapDir
>(*const_cast<SwapDir
*>(this), &SwapDir::parseTimeOption
, &SwapDir::dumpTimeOption
));
324 vector
->options
.push_back(new ConfigOptionAdapter
<SwapDir
>(*const_cast<SwapDir
*>(this), &SwapDir::parseRateOption
, &SwapDir::dumpRateOption
));
329 Rock::SwapDir::allowOptionReconfigure(const char *const option
) const
331 return strcmp(option
, "slot-size") != 0 &&
332 ::SwapDir::allowOptionReconfigure(option
);
335 /// parses time-specific options; mimics ::SwapDir::optionObjectSizeParse()
337 Rock::SwapDir::parseTimeOption(char const *option
, const char *value
, int reconfig
)
339 // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
340 // including time unit handling. Same for size and rate.
342 time_msec_t
*storedTime
;
343 if (strcmp(option
, "swap-timeout") == 0)
344 storedTime
= &fileConfig
.ioTimeout
;
351 // TODO: handle time units and detect parsing errors better
352 const int64_t parsedValue
= strtoll(value
, NULL
, 10);
353 if (parsedValue
< 0) {
354 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << parsedValue
);
358 const time_msec_t newTime
= static_cast<time_msec_t
>(parsedValue
);
361 *storedTime
= newTime
;
362 else if (*storedTime
!= newTime
) {
363 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir " << path
<< ' ' << option
364 << " cannot be changed dynamically, value left unchanged: " <<
371 /// reports time-specific options; mimics ::SwapDir::optionObjectSizeDump()
373 Rock::SwapDir::dumpTimeOption(StoreEntry
* e
) const
375 if (fileConfig
.ioTimeout
)
376 storeAppendPrintf(e
, " swap-timeout=%" PRId64
,
377 static_cast<int64_t>(fileConfig
.ioTimeout
));
380 /// parses rate-specific options; mimics ::SwapDir::optionObjectSizeParse()
382 Rock::SwapDir::parseRateOption(char const *option
, const char *value
, int isaReconfig
)
385 if (strcmp(option
, "max-swap-rate") == 0)
386 storedRate
= &fileConfig
.ioRate
;
393 // TODO: handle time units and detect parsing errors better
394 const int64_t parsedValue
= strtoll(value
, NULL
, 10);
395 if (parsedValue
< 0) {
396 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << parsedValue
);
400 const int newRate
= static_cast<int>(parsedValue
);
403 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << newRate
);
408 *storedRate
= newRate
;
409 else if (*storedRate
!= newRate
) {
410 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir " << path
<< ' ' << option
411 << " cannot be changed dynamically, value left unchanged: " <<
418 /// reports rate-specific options; mimics ::SwapDir::optionObjectSizeDump()
420 Rock::SwapDir::dumpRateOption(StoreEntry
* e
) const
422 if (fileConfig
.ioRate
>= 0)
423 storeAppendPrintf(e
, " max-swap-rate=%d", fileConfig
.ioRate
);
426 /// parses size-specific options; mimics ::SwapDir::optionObjectSizeParse()
428 Rock::SwapDir::parseSizeOption(char const *option
, const char *value
, int reconfig
)
430 uint64_t *storedSize
;
431 if (strcmp(option
, "slot-size") == 0)
432 storedSize
= &slotSize
;
439 // TODO: handle size units and detect parsing errors better
440 const uint64_t newSize
= strtoll(value
, NULL
, 10);
442 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must be positive; got: " << newSize
);
446 if (newSize
<= sizeof(DbCellHeader
)) {
447 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must exceed " << sizeof(DbCellHeader
) << "; got: " << newSize
);
452 *storedSize
= newSize
;
453 else if (*storedSize
!= newSize
) {
454 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir " << path
<< ' ' << option
455 << " cannot be changed dynamically, value left unchanged: " <<
462 /// reports size-specific options; mimics ::SwapDir::optionObjectSizeDump()
464 Rock::SwapDir::dumpSizeOption(StoreEntry
* e
) const
466 storeAppendPrintf(e
, " slot-size=%" PRId64
, slotSize
);
469 /// check the results of the configuration; only level-0 debugging works here
471 Rock::SwapDir::validateOptions()
474 fatal("Rock store requires a positive slot-size");
476 const int64_t maxSizeRoundingWaste
= 1024 * 1024; // size is configured in MB
477 const int64_t slotSizeRoundingWaste
= slotSize
;
478 const int64_t maxRoundingWaste
=
479 max(maxSizeRoundingWaste
, slotSizeRoundingWaste
);
480 const int64_t usableDiskSize
= diskOffset(entryLimitAllowed());
481 const int64_t diskWasteSize
= maxSize() - usableDiskSize
;
482 Must(diskWasteSize
>= 0);
484 // warn if maximum db size is not reachable due to sfileno limit
485 if (entryLimitAllowed() == entryLimitHigh() &&
486 diskWasteSize
>= maxRoundingWaste
) {
487 debugs(47, DBG_CRITICAL
, "Rock store cache_dir[" << index
<< "] '" << path
<< "':");
488 debugs(47, DBG_CRITICAL
, "\tmaximum number of entries: " << entryLimitAllowed());
489 debugs(47, DBG_CRITICAL
, "\tdb slot size: " << slotSize
<< " Bytes");
490 debugs(47, DBG_CRITICAL
, "\tmaximum db size: " << maxSize() << " Bytes");
491 debugs(47, DBG_CRITICAL
, "\tusable db size: " << usableDiskSize
<< " Bytes");
492 debugs(47, DBG_CRITICAL
, "\tdisk space waste: " << diskWasteSize
<< " Bytes");
493 debugs(47, DBG_CRITICAL
, "WARNING: Rock store config wastes space.");
498 Rock::SwapDir::rebuild()
500 //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init()
501 AsyncJob::Start(new Rebuild(this));
505 Rock::SwapDir::canStore(const StoreEntry
&e
, int64_t diskSpaceNeeded
, int &load
) const
507 if (!::SwapDir::canStore(e
, sizeof(DbCellHeader
)+diskSpaceNeeded
, load
))
510 if (!theFile
|| !theFile
->canWrite())
516 // Do not start I/O transaction if there are less than 10% free pages left.
517 // TODO: reserve page instead
518 if (needsDiskStrand() &&
519 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage
) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage
)) {
520 debugs(47, 5, HERE
<< "too few shared pages for IPC I/O left");
531 StoreIOState::Pointer
532 Rock::SwapDir::createStoreIO(StoreEntry
&e
, StoreIOState::STFNCB
*cbFile
, StoreIOState::STIOCB
*cbIo
, void *data
)
534 if (!theFile
|| theFile
->error()) {
535 debugs(47,4, HERE
<< theFile
);
540 Ipc::StoreMapAnchor
*const slot
=
541 map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), filen
);
543 debugs(47, 5, HERE
<< "map->add failed");
550 // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
551 // If that does not happen, the entry will not decrement the read level!
553 Rock::SwapDir::Pointer
self(this);
554 IoState
*sio
= new IoState(self
, &e
, cbFile
, cbIo
, data
);
556 sio
->swap_dirn
= index
;
557 sio
->swap_filen
= filen
;
558 sio
->writeableAnchor_
= slot
;
560 debugs(47,5, HERE
<< "dir " << index
<< " created new filen " <<
561 std::setfill('0') << std::hex
<< std::uppercase
<< std::setw(8) <<
562 sio
->swap_filen
<< std::dec
<< " starting at " <<
563 diskOffset(sio
->swap_filen
));
572 Rock::SwapDir::diskOffset(int filen
) const
575 return HeaderSize
+ slotSize
*filen
;
579 Rock::SwapDir::diskOffset(Ipc::Mem::PageId
&pageId
) const
582 return diskOffset(pageId
.number
- 1);
586 Rock::SwapDir::diskOffsetLimit() const
589 return diskOffset(map
->entryLimit());
593 Rock::SwapDir::entryMaxPayloadSize() const
595 return slotSize
- sizeof(DbCellHeader
);
599 Rock::SwapDir::entriesNeeded(const int64_t objSize
) const
601 return (objSize
+ entryMaxPayloadSize() - 1) / entryMaxPayloadSize();
605 Rock::SwapDir::useFreeSlot(Ipc::Mem::PageId
&pageId
)
607 if (freeSlots
->pop(pageId
)) {
608 debugs(47, 5, "got a previously free slot: " << pageId
);
612 // catch free slots delivered to noteFreeMapSlice()
613 assert(!waitingForPage
);
614 waitingForPage
= &pageId
;
615 if (map
->purgeOne()) {
616 assert(!waitingForPage
); // noteFreeMapSlice() should have cleared it
617 assert(pageId
.set());
618 debugs(47, 5, "got a previously busy slot: " << pageId
);
621 assert(waitingForPage
== &pageId
);
622 waitingForPage
= NULL
;
624 debugs(47, 3, "cannot get a slot; entries: " << map
->entryCount());
629 Rock::SwapDir::validSlotId(const SlotId slotId
) const
631 return 0 <= slotId
&& slotId
< entryLimitAllowed();
635 Rock::SwapDir::noteFreeMapSlice(const sfileno sliceId
)
637 Ipc::Mem::PageId pageId
;
638 pageId
.pool
= index
+1;
639 pageId
.number
= sliceId
+1;
640 if (waitingForPage
) {
641 *waitingForPage
= pageId
;
642 waitingForPage
= NULL
;
644 freeSlots
->push(pageId
);
648 // tries to open an old entry with swap_filen for reading
649 StoreIOState::Pointer
650 Rock::SwapDir::openStoreIO(StoreEntry
&e
, StoreIOState::STFNCB
*cbFile
, StoreIOState::STIOCB
*cbIo
, void *data
)
652 if (!theFile
|| theFile
->error()) {
653 debugs(47,4, HERE
<< theFile
);
657 if (e
.swap_filen
< 0) {
658 debugs(47,4, HERE
<< e
);
662 // Do not start I/O transaction if there are less than 10% free pages left.
663 // TODO: reserve page instead
664 if (needsDiskStrand() &&
665 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage
) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage
)) {
666 debugs(47, 5, HERE
<< "too few shared pages for IPC I/O left");
670 // The are two ways an entry can get swap_filen: our get() locked it for
671 // reading or our storeSwapOutStart() locked it for writing. Peeking at our
672 // locked entry is safe, but no support for reading a filling entry.
673 const Ipc::StoreMapAnchor
*slot
= map
->peekAtReader(e
.swap_filen
);
675 return NULL
; // we were writing afterall
677 Rock::SwapDir::Pointer
self(this);
678 IoState
*sio
= new IoState(self
, &e
, cbFile
, cbIo
, data
);
680 sio
->swap_dirn
= index
;
681 sio
->swap_filen
= e
.swap_filen
;
682 sio
->readableAnchor_
= slot
;
685 debugs(47,5, HERE
<< "dir " << index
<< " has old filen: " <<
686 std::setfill('0') << std::hex
<< std::uppercase
<< std::setw(8) <<
689 assert(slot
->sameKey(static_cast<const cache_key
*>(e
.key
)));
690 assert(slot
->basics
.swap_file_sz
> 0);
691 assert(slot
->basics
.swap_file_sz
== e
.swap_file_sz
);
697 Rock::SwapDir::ioCompletedNotification()
700 fatalf("Rock cache_dir failed to initialize db file: %s", filePath
);
702 if (theFile
->error())
703 fatalf("Rock cache_dir at %s failed to open db file: %s", filePath
,
706 debugs(47, 2, "Rock cache_dir[" << index
<< "] limits: " <<
707 std::setw(12) << maxSize() << " disk bytes and " <<
708 std::setw(7) << map
->entryLimit() << " entries");
714 Rock::SwapDir::closeCompleted()
720 Rock::SwapDir::readCompleted(const char *buf
, int rlen
, int errflag
, RefCount
< ::ReadRequest
> r
)
722 ReadRequest
*request
= dynamic_cast<Rock::ReadRequest
*>(r
.getRaw());
724 IoState::Pointer sio
= request
->sio
;
726 if (errflag
== DISK_OK
&& rlen
> 0)
727 sio
->offset_
+= rlen
;
729 StoreIOState::STRCB
*callb
= sio
->read
.callback
;
731 sio
->read
.callback
= NULL
;
733 if (cbdataReferenceValidDone(sio
->read
.callback_data
, &cbdata
))
734 callb(cbdata
, r
->buf
, rlen
, sio
.getRaw());
738 Rock::SwapDir::writeCompleted(int errflag
, size_t rlen
, RefCount
< ::WriteRequest
> r
)
740 Rock::WriteRequest
*request
= dynamic_cast<Rock::WriteRequest
*>(r
.getRaw());
742 assert(request
->sio
!= NULL
);
743 IoState
&sio
= *request
->sio
;
745 // quit if somebody called IoState::close() while we were waiting
746 if (!sio
.stillWaiting()) {
747 debugs(79, 3, "ignoring closed entry " << sio
.swap_filen
);
751 // XXX: can we check that this is needed w/o stalling readers
752 // that appear right after our check?
753 if (Config
.onoff
.collapsed_forwarding
)
754 CollapsedForwarding::NewData(sio
);
756 if (errflag
== DISK_OK
) {
757 // do not increment sio.offset_ because we do it in sio->write()
758 if (request
->isLast
) {
759 // close, the entry gets the read lock
760 map
->closeForWriting(sio
.swap_filen
, true);
761 sio
.finishedWriting(errflag
);
764 writeError(sio
.swap_filen
);
765 sio
.finishedWriting(errflag
);
766 // and hope that Core will call disconnect() to close the map entry
771 Rock::SwapDir::writeError(const sfileno fileno
)
773 // Do not abortWriting here. The entry should keep the write lock
774 // instead of losing association with the store and confusing core.
775 map
->freeEntry(fileno
); // will mark as unusable, just in case
776 // All callers must also call IoState callback, to propagate the error.
780 Rock::SwapDir::full() const
782 return freeSlots
!= NULL
&& !freeSlots
->size();
785 // storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
786 // but it should not happen for us
788 Rock::SwapDir::diskFull()
790 debugs(20, DBG_IMPORTANT
, "BUG: No space left with rock cache_dir: " <<
794 /// purge while full(); it should be sufficient to purge just one
796 Rock::SwapDir::maintain()
798 // The Store calls this to free some db space, but there is nothing wrong
799 // with a full() db, except when db has to shrink after reconfigure, and
800 // we do not support shrinking yet (it would have to purge specific slots).
801 // TODO: Disable maintain() requests when they are pointless.
805 Rock::SwapDir::reference(StoreEntry
&e
)
807 debugs(47, 5, HERE
<< &e
<< ' ' << e
.swap_dirn
<< ' ' << e
.swap_filen
);
808 if (repl
&& repl
->Referenced
)
809 repl
->Referenced(repl
, &e
, &e
.repl
);
813 Rock::SwapDir::dereference(StoreEntry
&e
, bool)
815 debugs(47, 5, HERE
<< &e
<< ' ' << e
.swap_dirn
<< ' ' << e
.swap_filen
);
816 if (repl
&& repl
->Dereferenced
)
817 repl
->Dereferenced(repl
, &e
, &e
.repl
);
819 // no need to keep e in the global store_table for us; we have our own map
824 Rock::SwapDir::unlinkdUseful() const
826 // no entry-specific files to unlink
831 Rock::SwapDir::unlink(StoreEntry
&e
)
833 debugs(47, 5, HERE
<< e
);
835 map
->freeEntry(e
.swap_filen
);
840 Rock::SwapDir::trackReferences(StoreEntry
&e
)
842 debugs(47, 5, HERE
<< e
);
844 repl
->Add(repl
, &e
, &e
.repl
);
848 Rock::SwapDir::ignoreReferences(StoreEntry
&e
)
850 debugs(47, 5, HERE
<< e
);
852 repl
->Remove(repl
, &e
, &e
.repl
);
856 Rock::SwapDir::statfs(StoreEntry
&e
) const
858 storeAppendPrintf(&e
, "\n");
859 storeAppendPrintf(&e
, "Maximum Size: %" PRIu64
" KB\n", maxSize() >> 10);
860 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
861 currentSize() / 1024.0,
862 Math::doublePercent(currentSize(), maxSize()));
865 const int limit
= map
->entryLimit();
866 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
868 const int entryCount
= map
->entryCount();
869 storeAppendPrintf(&e
, "Current entries: %9d %.2f%%\n",
870 entryCount
, (100.0 * entryCount
/ limit
));
872 const unsigned int slotsFree
= !freeSlots
? 0 : freeSlots
->size();
873 if (slotsFree
<= static_cast<const unsigned int>(limit
)) {
874 const int usedSlots
= limit
- static_cast<const int>(slotsFree
);
875 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
876 usedSlots
, (100.0 * usedSlots
/ limit
));
878 if (limit
< 100) { // XXX: otherwise too expensive to count
879 Ipc::ReadWriteLockStats stats
;
880 map
->updateStats(stats
);
886 storeAppendPrintf(&e
, "Pending operations: %d out of %d\n",
887 store_open_disk_fd
, Config
.max_open_disk_fds
);
889 storeAppendPrintf(&e
, "Flags:");
892 storeAppendPrintf(&e
, " SELECTED");
895 storeAppendPrintf(&e
, " READ-ONLY");
897 storeAppendPrintf(&e
, "\n");
902 Rock::SwapDir::inodeMapPath() const {
903 static String inodesPath
;
905 inodesPath
.append("_inodes");
906 return inodesPath
.termedBuf();
910 Rock::SwapDir::freeSlotsPath() const {
911 static String spacesPath
;
913 spacesPath
.append("_spaces");
914 return spacesPath
.termedBuf();
919 RunnerRegistrationEntry(rrAfterConfig
, SwapDirRr
);
922 void Rock::SwapDirRr::create(const RunnerRegistry
&)
924 Must(mapOwners
.empty() && freeSlotsOwners
.empty());
925 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
926 if (const Rock::SwapDir
*const sd
= dynamic_cast<Rock::SwapDir
*>(INDEXSD(i
))) {
927 const int64_t capacity
= sd
->entryLimitAllowed();
929 SwapDir::DirMap::Owner
*const mapOwner
=
930 SwapDir::DirMap::Init(sd
->inodeMapPath(), capacity
);
931 mapOwners
.push_back(mapOwner
);
933 // XXX: remove pool id and counters from PageStack
934 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *const freeSlotsOwner
=
935 shm_new(Ipc::Mem::PageStack
)(sd
->freeSlotsPath(),
937 sizeof(DbCellHeader
));
938 freeSlotsOwners
.push_back(freeSlotsOwner
);
940 // XXX: add method to initialize PageStack with no free pages
942 Ipc::Mem::PageId pageId
;
943 if (!freeSlotsOwner
->object()->pop(pageId
))
950 Rock::SwapDirRr::~SwapDirRr()
952 for (size_t i
= 0; i
< mapOwners
.size(); ++i
) {
954 delete freeSlotsOwners
[i
];