2 * DEBUG: section 47 Store Directory Routines
7 #include "ConfigOption.h"
8 #include "DiskIO/DiskIOModule.h"
9 #include "DiskIO/DiskIOStrategy.h"
10 #include "DiskIO/ReadRequest.h"
11 #include "DiskIO/WriteRequest.h"
12 #include "fs/rock/RockIoRequests.h"
13 #include "fs/rock/RockIoState.h"
14 #include "fs/rock/RockRebuild.h"
15 #include "fs/rock/RockSwapDir.h"
17 #include "ipc/mem/Pages.h"
18 #include "MemObject.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
31 const int64_t Rock::SwapDir::HeaderSize
= 16*1024;
33 Rock::SwapDir::SwapDir(): ::SwapDir("rock"), filePath(NULL
), io(NULL
), map(NULL
)
37 Rock::SwapDir::~SwapDir()
45 Rock::SwapDir::search(String
const url
, HttpRequest
*)
48 return NULL
; // XXX: implement
52 Rock::SwapDir::get(String
const key
, STOREGETCLIENT cb
, void *data
)
54 ::SwapDir::get(key
, cb
, data
);
57 // called when Squid core needs a StoreEntry with a given key
59 Rock::SwapDir::get(const cache_key
*key
)
61 if (!map
|| !theFile
|| !theFile
->canRead())
65 const Ipc::StoreMapSlot
*const slot
= map
->openForReading(key
, filen
);
69 const Ipc::StoreMapSlot::Basics
&basics
= slot
->basics
;
71 // create a brand new store entry and initialize it with stored basics
72 StoreEntry
*e
= new StoreEntry();
75 e
->swap_filen
= filen
;
76 e
->swap_file_sz
= basics
.swap_file_sz
;
77 e
->lastref
= basics
.lastref
;
78 e
->timestamp
= basics
.timestamp
;
79 e
->expires
= basics
.expires
;
80 e
->lastmod
= basics
.lastmod
;
81 e
->refcount
= basics
.refcount
;
82 e
->flags
= basics
.flags
;
83 e
->store_status
= STORE_OK
;
84 e
->setMemStatus(NOT_IN_MEMORY
);
85 e
->swap_status
= SWAPOUT_DONE
;
86 e
->ping_status
= PING_NONE
;
87 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
88 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
89 EBIT_CLR(e
->flags
, KEY_PRIVATE
);
90 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
95 // the disk entry remains open for reading, protected from modifications
98 void Rock::SwapDir::disconnect(StoreEntry
&e
)
100 assert(e
.swap_dirn
== index
);
101 assert(e
.swap_filen
>= 0);
102 // cannot have SWAPOUT_NONE entry with swap_filen >= 0
103 assert(e
.swap_status
!= SWAPOUT_NONE
);
105 // do not rely on e.swap_status here because there is an async delay
106 // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
108 // since e has swap_filen, its slot is locked for either reading or writing
109 map
->abortIo(e
.swap_filen
);
112 e
.swap_status
= SWAPOUT_NONE
;
116 Rock::SwapDir::currentSize() const
118 return HeaderSize
+ max_objsize
* currentCount();
122 Rock::SwapDir::currentCount() const
124 return map
? map
->entryCount() : 0;
127 /// In SMP mode only the disker process reports stats to avoid
128 /// counting the same stats by multiple processes.
130 Rock::SwapDir::doReportStat() const
132 return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
136 Rock::SwapDir::swappedOut(const StoreEntry
&)
138 // stats are not stored but computed when needed
142 Rock::SwapDir::entryLimitAllowed() const
144 const int64_t eLimitLo
= map
? map
->entryLimit() : 0; // dynamic shrinking unsupported
145 const int64_t eWanted
= (maxSize() - HeaderSize
)/maxObjectSize();
146 return min(max(eLimitLo
, eWanted
), entryLimitHigh());
149 // TODO: encapsulate as a tool
151 Rock::SwapDir::create()
156 if (UsingSmp() && !IamDiskProcess()) {
157 debugs (47,3, HERE
<< "disker will create in " << path
);
161 debugs (47,3, HERE
<< "creating in " << path
);
164 if (::stat(path
, &dir_sb
) == 0) {
166 if (::stat(filePath
, &file_sb
) == 0) {
167 debugs (47, DBG_IMPORTANT
, "Skipping existing Rock db: " << filePath
);
170 // else the db file is not there or is not accessible, and we will try
171 // to create it later below, generating a detailed error on failures.
172 } else { // path does not exist or is inaccessible
173 // If path exists but is not accessible, mkdir() below will fail, and
174 // the admin should see the error and act accordingly, so there is
175 // no need to distinguish ENOENT from other possible stat() errors.
176 debugs (47, DBG_IMPORTANT
, "Creating Rock db directory: " << path
);
177 const int res
= mkdir(path
, 0700);
179 debugs(47, DBG_CRITICAL
, "Failed to create Rock db dir " << path
<<
180 ": " << xstrerror());
181 fatal("Rock Store db creation error");
185 debugs (47, DBG_IMPORTANT
, "Creating Rock db: " << filePath
);
186 #if SLOWLY_FILL_WITH_ZEROS
188 Must(maxSize() % sizeof(block
) == 0);
189 memset(block
, '\0', sizeof(block
));
191 const int swap
= open(filePath
, O_WRONLY
|O_CREAT
|O_TRUNC
|O_BINARY
, 0600);
192 for (off_t offset
= 0; offset
< maxSize(); offset
+= sizeof(block
)) {
193 if (write(swap
, block
, sizeof(block
)) != sizeof(block
)) {
194 debugs(47, DBG_CRITICAL
, "ERROR: Failed to create Rock Store db in " << filePath
<<
195 ": " << xstrerror());
196 fatal("Rock Store db creation error");
201 const int swap
= open(filePath
, O_WRONLY
|O_CREAT
|O_TRUNC
|O_BINARY
, 0600);
203 debugs(47, DBG_CRITICAL
, "ERROR: Failed to initialize Rock Store db in " << filePath
<<
204 "; create error: " << xstrerror());
205 fatal("Rock Store db creation error");
208 if (ftruncate(swap
, maxSize()) != 0) {
209 debugs(47, DBG_CRITICAL
, "ERROR: Failed to initialize Rock Store db in " << filePath
<<
210 "; truncate error: " << xstrerror());
211 fatal("Rock Store db creation error");
214 char header
[HeaderSize
];
215 memset(header
, '\0', sizeof(header
));
216 if (write(swap
, header
, sizeof(header
)) != sizeof(header
)) {
217 debugs(47, DBG_CRITICAL
, "ERROR: Failed to initialize Rock Store db in " << filePath
<<
218 "; write error: " << xstrerror());
219 fatal("Rock Store db initialization error");
226 Rock::SwapDir::init()
230 // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
231 // are refcounted. We up our count once to avoid implicit delete's.
235 map
= new DirMap(path
);
237 const char *ioModule
= needsDiskStrand() ? "IpcIo" : "Blocking";
238 if (DiskIOModule
*m
= DiskIOModule::Find(ioModule
)) {
239 debugs(47,2, HERE
<< "Using DiskIO module: " << ioModule
);
240 io
= m
->createStrategy();
243 debugs(47, DBG_CRITICAL
, "FATAL: Rock store is missing DiskIO module: " <<
245 fatal("Rock Store missing a required DiskIO module");
248 theFile
= io
->newFile(filePath
);
249 theFile
->configure(fileConfig
);
250 theFile
->open(O_RDWR
, 0644, this);
252 // Increment early. Otherwise, if one SwapDir finishes rebuild before
253 // others start, storeRebuildComplete() will think the rebuild is over!
254 // TODO: move store_dirs_rebuilding hack to store modules that need it.
255 ++StoreController::store_dirs_rebuilding
;
259 Rock::SwapDir::needsDiskStrand() const
261 const bool wontEvenWorkWithoutDisker
= Config
.workers
> 1;
262 const bool wouldWorkBetterWithDisker
= DiskIOModule::Find("IpcIo");
263 return InDaemonMode() && (wontEvenWorkWithoutDisker
||
264 wouldWorkBetterWithDisker
);
268 Rock::SwapDir::parse(int anIndex
, char *aPath
)
272 path
= xstrdup(aPath
);
274 // cache store is located at path/db
276 fname
.append("/rock");
277 filePath
= xstrdup(fname
.termedBuf());
282 // Current openForWriting() code overwrites the old slot if needed
283 // and possible, so proactively removing old slots is probably useless.
284 assert(!repl
); // repl = createRemovalPolicy(Config.replPolicy);
290 Rock::SwapDir::reconfigure()
294 // TODO: can we reconfigure the replacement policy (repl)?
298 /// parse maximum db disk size
300 Rock::SwapDir::parseSize(const bool reconfig
)
302 const int i
= GetInteger();
304 fatal("negative Rock cache_dir size value");
305 const uint64_t new_max_size
=
306 static_cast<uint64_t>(i
) << 20; // MBytes to Bytes
308 max_size
= new_max_size
;
309 else if (new_max_size
!= max_size
) {
310 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir '" << path
<< "' size "
311 "cannot be changed dynamically, value left unchanged (" <<
312 (max_size
>> 20) << " MB)");
317 Rock::SwapDir::getOptionTree() const
319 ConfigOptionVector
*vector
= dynamic_cast<ConfigOptionVector
*>(::SwapDir::getOptionTree());
321 vector
->options
.push_back(new ConfigOptionAdapter
<SwapDir
>(*const_cast<SwapDir
*>(this), &SwapDir::parseTimeOption
, &SwapDir::dumpTimeOption
));
322 vector
->options
.push_back(new ConfigOptionAdapter
<SwapDir
>(*const_cast<SwapDir
*>(this), &SwapDir::parseRateOption
, &SwapDir::dumpRateOption
));
327 Rock::SwapDir::allowOptionReconfigure(const char *const option
) const
329 return strcmp(option
, "max-size") != 0 &&
330 ::SwapDir::allowOptionReconfigure(option
);
333 /// parses time-specific options; mimics ::SwapDir::optionObjectSizeParse()
335 Rock::SwapDir::parseTimeOption(char const *option
, const char *value
, int reconfig
)
337 // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
338 // including time unit handling. Same for size.
340 time_msec_t
*storedTime
;
341 if (strcmp(option
, "swap-timeout") == 0)
342 storedTime
= &fileConfig
.ioTimeout
;
349 // TODO: handle time units and detect parsing errors better
350 const int64_t parsedValue
= strtoll(value
, NULL
, 10);
351 if (parsedValue
< 0) {
352 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << parsedValue
);
356 const time_msec_t newTime
= static_cast<time_msec_t
>(parsedValue
);
359 *storedTime
= newTime
;
360 else if (*storedTime
!= newTime
) {
361 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir " << path
<< ' ' << option
362 << " cannot be changed dynamically, value left unchanged: " <<
369 /// reports time-specific options; mimics ::SwapDir::optionObjectSizeDump()
371 Rock::SwapDir::dumpTimeOption(StoreEntry
* e
) const
373 if (fileConfig
.ioTimeout
)
374 storeAppendPrintf(e
, " swap-timeout=%" PRId64
,
375 static_cast<int64_t>(fileConfig
.ioTimeout
));
378 /// parses rate-specific options; mimics ::SwapDir::optionObjectSizeParse()
380 Rock::SwapDir::parseRateOption(char const *option
, const char *value
, int isaReconfig
)
383 if (strcmp(option
, "max-swap-rate") == 0)
384 storedRate
= &fileConfig
.ioRate
;
391 // TODO: handle time units and detect parsing errors better
392 const int64_t parsedValue
= strtoll(value
, NULL
, 10);
393 if (parsedValue
< 0) {
394 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << parsedValue
);
398 const int newRate
= static_cast<int>(parsedValue
);
401 debugs(3, DBG_CRITICAL
, "FATAL: cache_dir " << path
<< ' ' << option
<< " must not be negative but is: " << newRate
);
406 *storedRate
= newRate
;
407 else if (*storedRate
!= newRate
) {
408 debugs(3, DBG_IMPORTANT
, "WARNING: cache_dir " << path
<< ' ' << option
409 << " cannot be changed dynamically, value left unchanged: " <<
416 /// reports rate-specific options; mimics ::SwapDir::optionObjectSizeDump()
418 Rock::SwapDir::dumpRateOption(StoreEntry
* e
) const
420 if (fileConfig
.ioRate
>= 0)
421 storeAppendPrintf(e
, " max-swap-rate=%d", fileConfig
.ioRate
);
424 /// check the results of the configuration; only level-0 debugging works here
426 Rock::SwapDir::validateOptions()
428 if (max_objsize
<= 0)
429 fatal("Rock store requires a positive max-size");
431 const int64_t maxSizeRoundingWaste
= 1024 * 1024; // size is configured in MB
432 const int64_t maxObjectSizeRoundingWaste
= maxObjectSize();
433 const int64_t maxRoundingWaste
=
434 max(maxSizeRoundingWaste
, maxObjectSizeRoundingWaste
);
435 const int64_t usableDiskSize
= diskOffset(entryLimitAllowed());
436 const int64_t diskWasteSize
= maxSize() - usableDiskSize
;
437 Must(diskWasteSize
>= 0);
439 // warn if maximum db size is not reachable due to sfileno limit
440 if (entryLimitAllowed() == entryLimitHigh() &&
441 diskWasteSize
>= maxRoundingWaste
) {
442 debugs(47, DBG_CRITICAL
, "Rock store cache_dir[" << index
<< "] '" << path
<< "':");
443 debugs(47, DBG_CRITICAL
, "\tmaximum number of entries: " << entryLimitAllowed());
444 debugs(47, DBG_CRITICAL
, "\tmaximum object size: " << maxObjectSize() << " Bytes");
445 debugs(47, DBG_CRITICAL
, "\tmaximum db size: " << maxSize() << " Bytes");
446 debugs(47, DBG_CRITICAL
, "\tusable db size: " << usableDiskSize
<< " Bytes");
447 debugs(47, DBG_CRITICAL
, "\tdisk space waste: " << diskWasteSize
<< " Bytes");
448 debugs(47, DBG_CRITICAL
, "WARNING: Rock store config wastes space.");
453 Rock::SwapDir::rebuild()
455 //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init()
456 AsyncJob::Start(new Rebuild(this));
459 /* Add a new object to the cache with empty memory copy and pointer to disk
460 * use to rebuild store from disk. Based on UFSSwapDir::addDiskRestore */
462 Rock::SwapDir::addEntry(const int filen
, const DbCellHeader
&header
, const StoreEntry
&from
)
464 debugs(47, 8, HERE
<< &from
<< ' ' << from
.getMD5Text() <<
465 ", filen="<< std::setfill('0') << std::hex
<< std::uppercase
<<
466 std::setw(8) << filen
);
468 sfileno newLocation
= 0;
469 if (Ipc::StoreMapSlot
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(from
.key
), newLocation
)) {
470 if (filen
== newLocation
) {
472 map
->extras(filen
) = header
;
473 } // else some other, newer entry got into our cell
474 map
->closeForWriting(newLocation
, false);
475 return filen
== newLocation
;
482 Rock::SwapDir::canStore(const StoreEntry
&e
, int64_t diskSpaceNeeded
, int &load
) const
484 if (!::SwapDir::canStore(e
, sizeof(DbCellHeader
)+diskSpaceNeeded
, load
))
487 if (!theFile
|| !theFile
->canWrite())
493 // Do not start I/O transaction if there are less than 10% free pages left.
494 // TODO: reserve page instead
495 if (needsDiskStrand() &&
496 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage
) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage
)) {
497 debugs(47, 5, HERE
<< "too few shared pages for IPC I/O left");
508 StoreIOState::Pointer
509 Rock::SwapDir::createStoreIO(StoreEntry
&e
, StoreIOState::STFNCB
*cbFile
, StoreIOState::STIOCB
*cbIo
, void *data
)
511 if (!theFile
|| theFile
->error()) {
512 debugs(47,4, HERE
<< theFile
);
516 // compute payload size for our cell header, using StoreEntry info
517 // careful: e.objectLen() may still be negative here
518 const int64_t expectedReplySize
= e
.mem_obj
->expectedReplySize();
519 assert(expectedReplySize
>= 0); // must know to prevent cell overflows
520 assert(e
.mem_obj
->swap_hdr_sz
> 0);
522 header
.payloadSize
= e
.mem_obj
->swap_hdr_sz
+ expectedReplySize
;
523 const int64_t payloadEnd
= sizeof(DbCellHeader
) + header
.payloadSize
;
524 assert(payloadEnd
<= max_objsize
);
527 Ipc::StoreMapSlot
*const slot
=
528 map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), filen
);
530 debugs(47, 5, HERE
<< "map->add failed");
533 e
.swap_file_sz
= header
.payloadSize
; // and will be copied to the map
535 map
->extras(filen
) = header
;
537 // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
538 // If that does not happen, the entry will not decrement the read level!
540 IoState
*sio
= new IoState(this, &e
, cbFile
, cbIo
, data
);
542 sio
->swap_dirn
= index
;
543 sio
->swap_filen
= filen
;
544 sio
->payloadEnd
= payloadEnd
;
545 sio
->diskOffset
= diskOffset(sio
->swap_filen
);
547 debugs(47,5, HERE
<< "dir " << index
<< " created new filen " <<
548 std::setfill('0') << std::hex
<< std::uppercase
<< std::setw(8) <<
549 sio
->swap_filen
<< std::dec
<< " at " << sio
->diskOffset
);
551 assert(sio
->diskOffset
+ payloadEnd
<= diskOffsetLimit());
560 Rock::SwapDir::diskOffset(int filen
) const
563 return HeaderSize
+ max_objsize
*filen
;
567 Rock::SwapDir::diskOffsetLimit() const
570 return diskOffset(map
->entryLimit());
573 // tries to open an old or being-written-to entry with swap_filen for reading
574 StoreIOState::Pointer
575 Rock::SwapDir::openStoreIO(StoreEntry
&e
, StoreIOState::STFNCB
*cbFile
, StoreIOState::STIOCB
*cbIo
, void *data
)
577 if (!theFile
|| theFile
->error()) {
578 debugs(47,4, HERE
<< theFile
);
582 if (e
.swap_filen
< 0) {
583 debugs(47,4, HERE
<< e
);
587 // Do not start I/O transaction if there are less than 10% free pages left.
588 // TODO: reserve page instead
589 if (needsDiskStrand() &&
590 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage
) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage
)) {
591 debugs(47, 5, HERE
<< "too few shared pages for IPC I/O left");
595 // The are two ways an entry can get swap_filen: our get() locked it for
596 // reading or our storeSwapOutStart() locked it for writing. Peeking at our
597 // locked entry is safe, but no support for reading a filling entry.
598 const Ipc::StoreMapSlot
*slot
= map
->peekAtReader(e
.swap_filen
);
600 return NULL
; // we were writing afterall
602 IoState
*sio
= new IoState(this, &e
, cbFile
, cbIo
, data
);
604 sio
->swap_dirn
= index
;
605 sio
->swap_filen
= e
.swap_filen
;
606 sio
->payloadEnd
= sizeof(DbCellHeader
) + map
->extras(e
.swap_filen
).payloadSize
;
607 assert(sio
->payloadEnd
<= max_objsize
); // the payload fits the slot
609 debugs(47,5, HERE
<< "dir " << index
<< " has old filen: " <<
610 std::setfill('0') << std::hex
<< std::uppercase
<< std::setw(8) <<
613 assert(slot
->basics
.swap_file_sz
> 0);
614 assert(slot
->basics
.swap_file_sz
== e
.swap_file_sz
);
616 sio
->diskOffset
= diskOffset(sio
->swap_filen
);
617 assert(sio
->diskOffset
+ sio
->payloadEnd
<= diskOffsetLimit());
624 Rock::SwapDir::ioCompletedNotification()
627 fatalf("Rock cache_dir failed to initialize db file: %s", filePath
);
629 if (theFile
->error())
630 fatalf("Rock cache_dir at %s failed to open db file: %s", filePath
,
633 debugs(47, 2, "Rock cache_dir[" << index
<< "] limits: " <<
634 std::setw(12) << maxSize() << " disk bytes and " <<
635 std::setw(7) << map
->entryLimit() << " entries");
641 Rock::SwapDir::closeCompleted()
647 Rock::SwapDir::readCompleted(const char *buf
, int rlen
, int errflag
, RefCount
< ::ReadRequest
> r
)
649 ReadRequest
*request
= dynamic_cast<Rock::ReadRequest
*>(r
.getRaw());
651 IoState::Pointer sio
= request
->sio
;
653 if (errflag
== DISK_OK
&& rlen
> 0)
654 sio
->offset_
+= rlen
;
655 assert(sio
->diskOffset
+ sio
->offset_
<= diskOffsetLimit()); // post-factum
657 StoreIOState::STRCB
*callb
= sio
->read
.callback
;
659 sio
->read
.callback
= NULL
;
661 if (cbdataReferenceValidDone(sio
->read
.callback_data
, &cbdata
))
662 callb(cbdata
, r
->buf
, rlen
, sio
.getRaw());
666 Rock::SwapDir::writeCompleted(int errflag
, size_t rlen
, RefCount
< ::WriteRequest
> r
)
668 Rock::WriteRequest
*request
= dynamic_cast<Rock::WriteRequest
*>(r
.getRaw());
670 assert(request
->sio
!= NULL
);
671 IoState
&sio
= *request
->sio
;
673 if (errflag
== DISK_OK
) {
674 // close, assuming we only write once; the entry gets the read lock
675 map
->closeForWriting(sio
.swap_filen
, true);
676 // do not increment sio.offset_ because we do it in sio->write()
678 // Do not abortWriting here. The entry should keep the write lock
679 // instead of losing association with the store and confusing core.
680 map
->free(sio
.swap_filen
); // will mark as unusable, just in case
683 assert(sio
.diskOffset
+ sio
.offset_
<= diskOffsetLimit()); // post-factum
685 sio
.finishedWriting(errflag
);
689 Rock::SwapDir::full() const
691 return map
&& map
->full();
694 // storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
695 // but it should not happen for us
697 Rock::SwapDir::diskFull()
699 debugs(20, DBG_IMPORTANT
, "BUG: No space left with rock cache_dir: " <<
703 /// purge while full(); it should be sufficient to purge just one
705 Rock::SwapDir::maintain()
707 debugs(47,3, HERE
<< "cache_dir[" << index
<< "] guards: " <<
708 !repl
<< !map
<< !full() << StoreController::store_dirs_rebuilding
);
711 return; // no means (cannot find a victim)
714 return; // no victims (yet)
717 return; // no need (to find a victim)
719 // XXX: UFSSwapDir::maintain says we must quit during rebuild
720 if (StoreController::store_dirs_rebuilding
)
723 debugs(47,3, HERE
<< "cache_dir[" << index
<< "] state: " << map
->full() <<
724 ' ' << currentSize() << " < " << diskOffsetLimit());
726 // Hopefully, we find a removable entry much sooner (TODO: use time?)
727 const int maxProbed
= 10000;
728 RemovalPurgeWalker
*walker
= repl
->PurgeInit(repl
, maxProbed
);
730 // It really should not take that long, but this will stop "infinite" loops
731 const int maxFreed
= 1000;
733 // TODO: should we purge more than needed to minimize overheads?
734 for (; freed
< maxFreed
&& full(); ++freed
) {
735 if (StoreEntry
*e
= walker
->Next(walker
))
736 e
->release(); // will call our unlink() method
738 break; // no more objects
741 debugs(47,2, HERE
<< "Rock cache_dir[" << index
<< "] freed " << freed
<<
742 " scanned " << walker
->scanned
<< '/' << walker
->locked
);
744 walker
->Done(walker
);
747 debugs(47, DBG_CRITICAL
, "ERROR: Rock cache_dir[" << index
<< "] " <<
748 "is still full after freeing " << freed
<< " entries. A bug?");
753 Rock::SwapDir::reference(StoreEntry
&e
)
755 debugs(47, 5, HERE
<< &e
<< ' ' << e
.swap_dirn
<< ' ' << e
.swap_filen
);
756 if (repl
&& repl
->Referenced
)
757 repl
->Referenced(repl
, &e
, &e
.repl
);
761 Rock::SwapDir::dereference(StoreEntry
&e
, bool)
763 debugs(47, 5, HERE
<< &e
<< ' ' << e
.swap_dirn
<< ' ' << e
.swap_filen
);
764 if (repl
&& repl
->Dereferenced
)
765 repl
->Dereferenced(repl
, &e
, &e
.repl
);
767 // no need to keep e in the global store_table for us; we have our own map
772 Rock::SwapDir::unlinkdUseful() const
774 // no entry-specific files to unlink
779 Rock::SwapDir::unlink(StoreEntry
&e
)
781 debugs(47, 5, HERE
<< e
);
783 map
->free(e
.swap_filen
);
788 Rock::SwapDir::trackReferences(StoreEntry
&e
)
790 debugs(47, 5, HERE
<< e
);
792 repl
->Add(repl
, &e
, &e
.repl
);
796 Rock::SwapDir::ignoreReferences(StoreEntry
&e
)
798 debugs(47, 5, HERE
<< e
);
800 repl
->Remove(repl
, &e
, &e
.repl
);
804 Rock::SwapDir::statfs(StoreEntry
&e
) const
806 storeAppendPrintf(&e
, "\n");
807 storeAppendPrintf(&e
, "Maximum Size: %" PRIu64
" KB\n", maxSize() >> 10);
808 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
809 currentSize() / 1024.0,
810 Math::doublePercent(currentSize(), maxSize()));
813 const int limit
= map
->entryLimit();
814 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
816 const int entryCount
= map
->entryCount();
817 storeAppendPrintf(&e
, "Current entries: %9d %.2f%%\n",
818 entryCount
, (100.0 * entryCount
/ limit
));
820 if (limit
< 100) { // XXX: otherwise too expensive to count
821 Ipc::ReadWriteLockStats stats
;
822 map
->updateStats(stats
);
828 storeAppendPrintf(&e
, "Pending operations: %d out of %d\n",
829 store_open_disk_fd
, Config
.max_open_disk_fds
);
831 storeAppendPrintf(&e
, "Flags:");
834 storeAppendPrintf(&e
, " SELECTED");
837 storeAppendPrintf(&e
, " READ-ONLY");
839 storeAppendPrintf(&e
, "\n");
845 RunnerRegistrationEntry(rrAfterConfig
, SwapDirRr
);
848 void Rock::SwapDirRr::create(const RunnerRegistry
&)
850 Must(owners
.empty());
851 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
852 if (const Rock::SwapDir
*const sd
= dynamic_cast<Rock::SwapDir
*>(INDEXSD(i
))) {
853 Rock::SwapDir::DirMap::Owner
*const owner
=
854 Rock::SwapDir::DirMap::Init(sd
->path
, sd
->entryLimitAllowed());
855 owners
.push_back(owner
);
860 Rock::SwapDirRr::~SwapDirRr()
862 for (size_t i
= 0; i
< owners
.size(); ++i
)