]> git.ipfire.org Git - thirdparty/squid.git/blame - src/fs/rock/RockSwapDir.cc
Docs: Copyright updates for 2018 (#114)
[thirdparty/squid.git] / src / fs / rock / RockSwapDir.cc
CommitLineData
e2851fe7 1/*
5b74111a 2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
bbc27441
AJ
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
e2851fe7
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 47 Store Directory Routines */
10
f7f3304a 11#include "squid.h"
8a01b99e 12#include "cache_cf.h"
807feb1d 13#include "CollapsedForwarding.h"
43ebbac3 14#include "ConfigOption.h"
e2851fe7
AR
15#include "DiskIO/DiskIOModule.h"
16#include "DiskIO/DiskIOStrategy.h"
17#include "DiskIO/ReadRequest.h"
18#include "DiskIO/WriteRequest.h"
abf396ec 19#include "fs/rock/RockHeaderUpdater.h"
e2851fe7 20#include "fs/rock/RockIoRequests.h"
602d9612 21#include "fs/rock/RockIoState.h"
e2851fe7 22#include "fs/rock/RockRebuild.h"
602d9612 23#include "fs/rock/RockSwapDir.h"
67679543 24#include "globals.h"
e0bdae60 25#include "ipc/mem/Pages.h"
f5adb654
AR
26#include "MemObject.h"
27#include "Parsing.h"
4d5904f7 28#include "SquidConfig.h"
f5adb654 29#include "SquidMath.h"
5bed43d6
FC
30#include "tools.h"
31
58373ff8 32#include <cstdlib>
f5adb654 33#include <iomanip>
36c84e19 34#include <limits>
e2851fe7 35
582c2af2
FC
36#if HAVE_SYS_STAT_H
37#include <sys/stat.h>
38#endif
39
e2851fe7
AR
40const int64_t Rock::SwapDir::HeaderSize = 16*1024;
41
9d4e9cfb 42Rock::SwapDir::SwapDir(): ::SwapDir("rock"),
f53969cc
SM
43 slotSize(HeaderSize), filePath(NULL), map(NULL), io(NULL),
44 waitingForPage(NULL)
e2851fe7
AR
45{
46}
47
48Rock::SwapDir::~SwapDir()
49{
50 delete io;
f1eaa254 51 delete map;
e2851fe7
AR
52 safe_free(filePath);
53}
54
f1debb5e
DK
55// called when Squid core needs a StoreEntry with a given key
56StoreEntry *
57Rock::SwapDir::get(const cache_key *key)
58{
0a11e039 59 if (!map || !theFile || !theFile->canRead())
8abe1173
DK
60 return NULL;
61
5b3ea321 62 sfileno filen;
50dc81ec 63 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, filen);
44c95fcf 64 if (!slot)
f1debb5e
DK
65 return NULL;
66
67 // create a brand new store entry and initialize it with stored basics
68 StoreEntry *e = new StoreEntry();
ce49546e
AR
69 anchorEntry(*e, filen, *slot);
70
e1825c5d 71 e->hashInsert(key);
f1debb5e
DK
72 trackReferences(*e);
73
74 return e;
75 // the disk entry remains open for reading, protected from modifications
76}
77
ce49546e 78bool
4475555f 79Rock::SwapDir::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
ce49546e 80{
5296bbd9 81 if (!map || !theFile || !theFile->canRead())
ce49546e
AR
82 return false;
83
84 sfileno filen;
85 const Ipc::StoreMapAnchor *const slot = map->openForReading(
f53969cc 86 reinterpret_cast<cache_key*>(collapsed.key), filen);
ce49546e
AR
87 if (!slot)
88 return false;
89
90 anchorEntry(collapsed, filen, *slot);
4475555f 91 inSync = updateCollapsedWith(collapsed, *slot);
2912daee 92 return true; // even if inSync is false
ce49546e
AR
93}
94
95bool
96Rock::SwapDir::updateCollapsed(StoreEntry &collapsed)
97{
98 if (!map || !theFile || !theFile->canRead())
99 return false;
100
101 if (collapsed.swap_filen < 0) // no longer using a disk cache
102 return true;
103 assert(collapsed.swap_dirn == index);
104
105 const Ipc::StoreMapAnchor &s = map->readableEntry(collapsed.swap_filen);
106 return updateCollapsedWith(collapsed, s);
107}
108
109bool
110Rock::SwapDir::updateCollapsedWith(StoreEntry &collapsed, const Ipc::StoreMapAnchor &anchor)
111{
e6d2c263 112 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
ce49546e
AR
113 return true;
114}
115
116void
117Rock::SwapDir::anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor)
118{
119 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
120
121 e.swap_file_sz = basics.swap_file_sz;
ce49546e
AR
122 e.lastref = basics.lastref;
123 e.timestamp = basics.timestamp;
124 e.expires = basics.expires;
438b41ba 125 e.lastModified(basics.lastmod);
ce49546e
AR
126 e.refcount = basics.refcount;
127 e.flags = basics.flags;
128
2912daee
AR
129 if (anchor.complete()) {
130 e.store_status = STORE_OK;
131 e.swap_status = SWAPOUT_DONE;
132 } else {
133 e.store_status = STORE_PENDING;
134 e.swap_status = SWAPOUT_WRITING; // even though another worker writes?
135 }
136
ce49546e
AR
137 e.ping_status = PING_NONE;
138
ce49546e 139 EBIT_CLR(e.flags, RELEASE_REQUEST);
39fe14b2 140 e.clearPrivate();
ce49546e 141 EBIT_SET(e.flags, ENTRY_VALIDATED);
2912daee
AR
142
143 e.swap_dirn = index;
144 e.swap_filen = filen;
ce49546e
AR
145}
146
f58bb2f4 147void Rock::SwapDir::disconnect(StoreEntry &e)
6d8d05b5 148{
f58bb2f4 149 assert(e.swap_dirn == index);
6d8d05b5 150 assert(e.swap_filen >= 0);
f58bb2f4
AR
151 // cannot have SWAPOUT_NONE entry with swap_filen >= 0
152 assert(e.swap_status != SWAPOUT_NONE);
153
154 // do not rely on e.swap_status here because there is an async delay
155 // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
156
4475555f 157 // since e has swap_filen, its slot is locked for reading and/or writing
49769258
AR
158 // but it is difficult to know whether THIS worker is reading or writing e,
159 // especially since we may switch from writing to reading. This code relies
160 // on Rock::IoState::writeableAnchor_ being set when we locked for writing.
161 if (e.mem_obj && e.mem_obj->swapout.sio != NULL &&
9d4e9cfb 162 dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
4475555f 163 map->abortWriting(e.swap_filen);
2912daee
AR
164 e.swap_dirn = -1;
165 e.swap_filen = -1;
166 e.swap_status = SWAPOUT_NONE;
49769258 167 dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_ = NULL;
2912daee
AR
168 Store::Root().transientsAbandon(e); // broadcasts after the change
169 } else {
4475555f 170 map->closeForReading(e.swap_filen);
2912daee
AR
171 e.swap_dirn = -1;
172 e.swap_filen = -1;
173 e.swap_status = SWAPOUT_NONE;
174 }
6d8d05b5
DK
175}
176
39c1e1d9
DK
177uint64_t
178Rock::SwapDir::currentSize() const
179{
50dc81ec 180 const uint64_t spaceSize = !freeSlots ?
9d4e9cfb 181 maxSize() : (slotSize * freeSlots->size());
e51ce7da
AR
182 // everything that is not free is in use
183 return maxSize() - spaceSize;
39c1e1d9
DK
184}
185
186uint64_t
187Rock::SwapDir::currentCount() const
188{
189 return map ? map->entryCount() : 0;
190}
191
192/// In SMP mode only the disker process reports stats to avoid
193/// counting the same stats by multiple processes.
194bool
195Rock::SwapDir::doReportStat() const
196{
197 return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
198}
199
da9d3191
DK
200void
201Rock::SwapDir::swappedOut(const StoreEntry &)
202{
203 // stats are not stored but computed when needed
204}
205
b3165da6 206int64_t
36c84e19 207Rock::SwapDir::slotLimitAbsolute() const
b3165da6 208{
36c84e19
AR
209 // the max value is an invalid one; all values must be below the limit
210 assert(std::numeric_limits<Ipc::StoreMapSliceId>::max() ==
211 std::numeric_limits<SlotId>::max());
212 return std::numeric_limits<SlotId>::max();
213}
214
215int64_t
216Rock::SwapDir::slotLimitActual() const
217{
218 const int64_t sWanted = (maxSize() - HeaderSize)/slotSize;
219 const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported
220 const int64_t sLimitHi = slotLimitAbsolute();
221 return min(max(sLimitLo, sWanted), sLimitHi);
222}
223
224int64_t
225Rock::SwapDir::entryLimitActual() const
226{
227 return min(slotLimitActual(), entryLimitAbsolute());
b3165da6
DK
228}
229
73656056 230// TODO: encapsulate as a tool
e2851fe7
AR
231void
232Rock::SwapDir::create()
233{
234 assert(path);
235 assert(filePath);
236
984d890b
AR
237 if (UsingSmp() && !IamDiskProcess()) {
238 debugs (47,3, HERE << "disker will create in " << path);
239 return;
240 }
241
e2851fe7
AR
242 debugs (47,3, HERE << "creating in " << path);
243
04632397
AR
244 struct stat dir_sb;
245 if (::stat(path, &dir_sb) == 0) {
246 struct stat file_sb;
247 if (::stat(filePath, &file_sb) == 0) {
248 debugs (47, DBG_IMPORTANT, "Skipping existing Rock db: " << filePath);
249 return;
250 }
251 // else the db file is not there or is not accessible, and we will try
252 // to create it later below, generating a detailed error on failures.
253 } else { // path does not exist or is inaccessible
254 // If path exists but is not accessible, mkdir() below will fail, and
255 // the admin should see the error and act accordingly, so there is
256 // no need to distinguish ENOENT from other possible stat() errors.
51618c6a 257 debugs (47, DBG_IMPORTANT, "Creating Rock db directory: " << path);
e2851fe7 258 const int res = mkdir(path, 0700);
413f00bd
AR
259 if (res != 0)
260 createError("mkdir");
9199139f 261 }
e2851fe7 262
04632397 263 debugs (47, DBG_IMPORTANT, "Creating Rock db: " << filePath);
413f00bd
AR
264 const int swap = open(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
265 if (swap < 0)
266 createError("create");
267
e2851fe7 268#if SLOWLY_FILL_WITH_ZEROS
cc34568d
DK
269 char block[1024];
270 Must(maxSize() % sizeof(block) == 0);
e2851fe7
AR
271 memset(block, '\0', sizeof(block));
272
cc34568d 273 for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
413f00bd
AR
274 if (write(swap, block, sizeof(block)) != sizeof(block))
275 createError("write");
9199139f 276 }
e2851fe7 277#else
413f00bd
AR
278 if (ftruncate(swap, maxSize()) != 0)
279 createError("truncate");
e2851fe7
AR
280
281 char header[HeaderSize];
282 memset(header, '\0', sizeof(header));
413f00bd
AR
283 if (write(swap, header, sizeof(header)) != sizeof(header))
284 createError("write");
e2851fe7 285#endif
413f00bd
AR
286
287 close(swap);
288}
289
290// report Rock DB creation error and exit
291void
9d4e9cfb
AR
292Rock::SwapDir::createError(const char *const msg)
293{
b69e9ffa 294 int xerrno = errno; // XXX: where does errno come from?
413f00bd 295 debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
b69e9ffa 296 filePath << "; " << msg << " error: " << xstrerr(xerrno));
413f00bd 297 fatal("Rock Store db creation error");
e2851fe7
AR
298}
299
300void
301Rock::SwapDir::init()
302{
303 debugs(47,2, HERE);
304
305 // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
306 // are refcounted. We up our count once to avoid implicit delete's.
8bf217bd 307 lock();
e2851fe7 308
d26fb333
AR
309 freeSlots = shm_old(Ipc::Mem::PageStack)(freeSlotsPath());
310
902df398 311 Must(!map);
300fd297 312 map = new DirMap(inodeMapPath());
50dc81ec 313 map->cleaner = this;
f7091279 314
3b581957 315 const char *ioModule = needsDiskStrand() ? "IpcIo" : "Blocking";
c03c2bad
AR
316 if (DiskIOModule *m = DiskIOModule::Find(ioModule)) {
317 debugs(47,2, HERE << "Using DiskIO module: " << ioModule);
318 io = m->createStrategy();
319 io->init();
320 } else {
51618c6a
AR
321 debugs(47, DBG_CRITICAL, "FATAL: Rock store is missing DiskIO module: " <<
322 ioModule);
c03c2bad
AR
323 fatal("Rock Store missing a required DiskIO module");
324 }
e2851fe7
AR
325
326 theFile = io->newFile(filePath);
43ebbac3 327 theFile->configure(fileConfig);
e2851fe7 328 theFile->open(O_RDWR, 0644, this);
078274f6
AR
329
330 // Increment early. Otherwise, if one SwapDir finishes rebuild before
331 // others start, storeRebuildComplete() will think the rebuild is over!
332 // TODO: move store_dirs_rebuilding hack to store modules that need it.
333 ++StoreController::store_dirs_rebuilding;
e2851fe7
AR
334}
335
14911a4e
AR
336bool
337Rock::SwapDir::needsDiskStrand() const
338{
3b581957
DK
339 const bool wontEvenWorkWithoutDisker = Config.workers > 1;
340 const bool wouldWorkBetterWithDisker = DiskIOModule::Find("IpcIo");
341 return InDaemonMode() && (wontEvenWorkWithoutDisker ||
e29ccb57 342 wouldWorkBetterWithDisker);
14911a4e
AR
343}
344
e2851fe7
AR
345void
346Rock::SwapDir::parse(int anIndex, char *aPath)
347{
348 index = anIndex;
349
350 path = xstrdup(aPath);
351
352 // cache store is located at path/db
353 String fname(path);
354 fname.append("/rock");
355 filePath = xstrdup(fname.termedBuf());
356
24063512 357 parseSize(false);
e2851fe7
AR
358 parseOptions(0);
359
f428c9c4
AR
360 // Current openForWriting() code overwrites the old slot if needed
361 // and possible, so proactively removing old slots is probably useless.
0e240235 362 assert(!repl); // repl = createRemovalPolicy(Config.replPolicy);
e2851fe7
AR
363
364 validateOptions();
365}
366
367void
c6059970 368Rock::SwapDir::reconfigure()
e2851fe7 369{
24063512 370 parseSize(true);
e2851fe7
AR
371 parseOptions(1);
372 // TODO: can we reconfigure the replacement policy (repl)?
373 validateOptions();
374}
375
376/// parse maximum db disk size
377void
9dca980d 378Rock::SwapDir::parseSize(const bool reconfig)
e2851fe7 379{
cc34568d
DK
380 const int i = GetInteger();
381 if (i < 0)
e2851fe7 382 fatal("negative Rock cache_dir size value");
24063512
DK
383 const uint64_t new_max_size =
384 static_cast<uint64_t>(i) << 20; // MBytes to Bytes
9dca980d 385 if (!reconfig)
24063512
DK
386 max_size = new_max_size;
387 else if (new_max_size != max_size) {
388 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir '" << path << "' size "
389 "cannot be changed dynamically, value left unchanged (" <<
390 (max_size >> 20) << " MB)");
391 }
e2851fe7
AR
392}
393
43ebbac3
AR
394ConfigOption *
395Rock::SwapDir::getOptionTree() const
396{
c06a240a
FC
397 ConfigOption *copt = ::SwapDir::getOptionTree();
398 ConfigOptionVector *vector = dynamic_cast<ConfigOptionVector*>(copt);
399 if (vector) {
400 // if copt is actually a ConfigOptionVector
401 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption));
402 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption));
403 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption));
404 } else {
405 // we don't know how to handle copt, as it's not a ConfigOptionVector.
406 // free it (and return nullptr)
407 delete copt;
f8162708 408 copt = nullptr;
c06a240a 409 }
f8162708 410 return copt;
43ebbac3
AR
411}
412
24063512
DK
413bool
414Rock::SwapDir::allowOptionReconfigure(const char *const option) const
415{
e51ce7da 416 return strcmp(option, "slot-size") != 0 &&
16fea83b 417 ::SwapDir::allowOptionReconfigure(option);
24063512
DK
418}
419
43ebbac3
AR
420/// parses time-specific options; mimics ::SwapDir::optionObjectSizeParse()
421bool
9dca980d 422Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfig)
43ebbac3
AR
423{
424 // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
e51ce7da 425 // including time unit handling. Same for size and rate.
43ebbac3
AR
426
427 time_msec_t *storedTime;
428 if (strcmp(option, "swap-timeout") == 0)
429 storedTime = &fileConfig.ioTimeout;
430 else
431 return false;
432
311ebdcc 433 if (!value) {
43ebbac3 434 self_destruct();
72f7713d 435 return false;
311ebdcc 436 }
43ebbac3 437
df881a0f
AR
438 // TODO: handle time units and detect parsing errors better
439 const int64_t parsedValue = strtoll(value, NULL, 10);
440 if (parsedValue < 0) {
441 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
43ebbac3 442 self_destruct();
72f7713d 443 return false;
43ebbac3
AR
444 }
445
df881a0f
AR
446 const time_msec_t newTime = static_cast<time_msec_t>(parsedValue);
447
9dca980d 448 if (!reconfig)
7846d084
DK
449 *storedTime = newTime;
450 else if (*storedTime != newTime) {
451 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
452 << " cannot be changed dynamically, value left unchanged: " <<
453 *storedTime);
454 }
43ebbac3
AR
455
456 return true;
457}
458
459/// reports time-specific options; mimics ::SwapDir::optionObjectSizeDump()
460void
461Rock::SwapDir::dumpTimeOption(StoreEntry * e) const
462{
463 if (fileConfig.ioTimeout)
c91ca3ce 464 storeAppendPrintf(e, " swap-timeout=%" PRId64,
43ebbac3
AR
465 static_cast<int64_t>(fileConfig.ioTimeout));
466}
467
df881a0f
AR
468/// parses rate-specific options; mimics ::SwapDir::optionObjectSizeParse()
469bool
470Rock::SwapDir::parseRateOption(char const *option, const char *value, int isaReconfig)
471{
472 int *storedRate;
473 if (strcmp(option, "max-swap-rate") == 0)
474 storedRate = &fileConfig.ioRate;
475 else
476 return false;
477
311ebdcc 478 if (!value) {
df881a0f 479 self_destruct();
72f7713d 480 return false;
311ebdcc 481 }
df881a0f
AR
482
483 // TODO: handle time units and detect parsing errors better
484 const int64_t parsedValue = strtoll(value, NULL, 10);
485 if (parsedValue < 0) {
486 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
487 self_destruct();
72f7713d 488 return false;
df881a0f
AR
489 }
490
491 const int newRate = static_cast<int>(parsedValue);
492
493 if (newRate < 0) {
494 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << newRate);
495 self_destruct();
72f7713d 496 return false;
df881a0f
AR
497 }
498
7846d084
DK
499 if (!isaReconfig)
500 *storedRate = newRate;
501 else if (*storedRate != newRate) {
502 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
503 << " cannot be changed dynamically, value left unchanged: " <<
504 *storedRate);
505 }
df881a0f
AR
506
507 return true;
508}
509
510/// reports rate-specific options; mimics ::SwapDir::optionObjectSizeDump()
511void
512Rock::SwapDir::dumpRateOption(StoreEntry * e) const
513{
514 if (fileConfig.ioRate >= 0)
515 storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate);
516}
517
e51ce7da
AR
518/// parses size-specific options; mimics ::SwapDir::optionObjectSizeParse()
519bool
a57a662c 520Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfig)
e51ce7da
AR
521{
522 uint64_t *storedSize;
523 if (strcmp(option, "slot-size") == 0)
524 storedSize = &slotSize;
525 else
526 return false;
527
311ebdcc 528 if (!value) {
e51ce7da 529 self_destruct();
72f7713d 530 return false;
311ebdcc 531 }
e51ce7da
AR
532
533 // TODO: handle size units and detect parsing errors better
534 const uint64_t newSize = strtoll(value, NULL, 10);
535 if (newSize <= 0) {
536 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize);
537 self_destruct();
72f7713d 538 return false;
e51ce7da
AR
539 }
540
541 if (newSize <= sizeof(DbCellHeader)) {
542 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize);
543 self_destruct();
72f7713d 544 return false;
e51ce7da
AR
545 }
546
a57a662c 547 if (!reconfig)
e51ce7da
AR
548 *storedSize = newSize;
549 else if (*storedSize != newSize) {
550 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
551 << " cannot be changed dynamically, value left unchanged: " <<
552 *storedSize);
553 }
554
555 return true;
556}
557
558/// reports size-specific options; mimics ::SwapDir::optionObjectSizeDump()
559void
560Rock::SwapDir::dumpSizeOption(StoreEntry * e) const
561{
562 storeAppendPrintf(e, " slot-size=%" PRId64, slotSize);
563}
564
e2851fe7
AR
565/// check the results of the configuration; only level-0 debugging works here
566void
567Rock::SwapDir::validateOptions()
568{
e51ce7da
AR
569 if (slotSize <= 0)
570 fatal("Rock store requires a positive slot-size");
e2851fe7 571
9dc492d0 572 const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB
e51ce7da 573 const int64_t slotSizeRoundingWaste = slotSize;
9dc492d0 574 const int64_t maxRoundingWaste =
e51ce7da 575 max(maxSizeRoundingWaste, slotSizeRoundingWaste);
36c84e19
AR
576
577 // an entry consumes at least one slot; round up to reduce false warnings
578 const int64_t blockSize = static_cast<int64_t>(slotSize);
579 const int64_t maxObjSize = max(blockSize,
580 ((maxObjectSize()+blockSize-1)/blockSize)*blockSize);
581
582 // Does the "sfileno*max-size" limit match configured db capacity?
583 const double entriesMayOccupy = entryLimitAbsolute()*static_cast<double>(maxObjSize);
584 if (entriesMayOccupy + maxRoundingWaste < maxSize()) {
585 const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
586 debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" <<
587 "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
588 "\n\tconfigured db slot size: " << slotSize << " bytes" <<
589 "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" <<
590 "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() <<
591 "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" <<
592 "\n\tdisk space wasted: " << diskWasteSize << " bytes");
593 }
594
595 // Does the "absolute slot count" limit match configured db capacity?
596 const double slotsMayOccupy = slotLimitAbsolute()*static_cast<double>(slotSize);
597 if (slotsMayOccupy + maxRoundingWaste < maxSize()) {
598 const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
599 debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" <<
600 "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
601 "\n\tconfigured db slot size: " << slotSize << " bytes" <<
602 "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() <<
603 "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" <<
604 "\n\tdisk space wasted: " << diskWasteSize << " bytes");
9199139f 605 }
e2851fe7
AR
606}
607
608void
9199139f
AR
609Rock::SwapDir::rebuild()
610{
078274f6
AR
611 //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init()
612 AsyncJob::Start(new Rebuild(this));
e2851fe7
AR
613}
614
c728b6f9
AR
615bool
616Rock::SwapDir::canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
e2851fe7 617{
5ca027f0
AR
618 if (diskSpaceNeeded >= 0)
619 diskSpaceNeeded += sizeof(DbCellHeader);
620 if (!::SwapDir::canStore(e, diskSpaceNeeded, load))
c728b6f9 621 return false;
e2851fe7 622
c728b6f9
AR
623 if (!theFile || !theFile->canWrite())
624 return false;
8abe1173
DK
625
626 if (!map)
c728b6f9 627 return false;
8abe1173 628
e0f3492c 629 // Do not start I/O transaction if there are less than 10% free pages left.
551f8a18 630 // TODO: reserve page instead
7ef5aa64 631 if (needsDiskStrand() &&
e29ccb57 632 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage)) {
551f8a18 633 debugs(47, 5, HERE << "too few shared pages for IPC I/O left");
e0bdae60
DK
634 return false;
635 }
636
e2851fe7 637 if (io->shedLoad())
c728b6f9 638 return false;
e2851fe7 639
c728b6f9
AR
640 load = io->load();
641 return true;
e2851fe7
AR
642}
643
644StoreIOState::Pointer
645Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreIOState::STIOCB *cbIo, void *data)
646{
647 if (!theFile || theFile->error()) {
648 debugs(47,4, HERE << theFile);
649 return NULL;
650 }
651
5b3ea321 652 sfileno filen;
50dc81ec 653 Ipc::StoreMapAnchor *const slot =
5b3ea321 654 map->openForWriting(reinterpret_cast<const cache_key *>(e.key), filen);
44c95fcf 655 if (!slot) {
f5adb654 656 debugs(47, 5, HERE << "map->add failed");
f1eaa254 657 return NULL;
e2851fe7 658 }
93910d5c 659
50dc81ec 660 assert(filen >= 0);
44c95fcf 661 slot->set(e);
e2851fe7 662
c728b6f9 663 // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
dd7ac58b
AR
664 // If that does not happen, the entry will not decrement the read level!
665
50dc81ec
AR
666 Rock::SwapDir::Pointer self(this);
667 IoState *sio = new IoState(self, &e, cbFile, cbIo, data);
e2851fe7
AR
668
669 sio->swap_dirn = index;
5b3ea321 670 sio->swap_filen = filen;
50dc81ec 671 sio->writeableAnchor_ = slot;
e2851fe7 672
5b3ea321 673 debugs(47,5, HERE << "dir " << index << " created new filen " <<
9199139f 674 std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
50dc81ec 675 sio->swap_filen << std::dec << " starting at " <<
93910d5c 676 diskOffset(sio->swap_filen));
e2851fe7
AR
677
678 sio->file(theFile);
679
680 trackReferences(e);
681 return sio;
682}
683
abf396ec
AR
684StoreIOState::Pointer
685Rock::SwapDir::createUpdateIO(const Ipc::StoreMapUpdate &update, StoreIOState::STFNCB *cbFile, StoreIOState::STIOCB *cbIo, void *data)
686{
687 if (!theFile || theFile->error()) {
688 debugs(47,4, theFile);
689 return nullptr;
690 }
691
692 Must(update.fresh);
693 Must(update.fresh.fileNo >= 0);
694
695 Rock::SwapDir::Pointer self(this);
696 IoState *sio = new IoState(self, update.entry, cbFile, cbIo, data);
697
698 sio->swap_dirn = index;
699 sio->swap_filen = update.fresh.fileNo;
700 sio->writeableAnchor_ = update.fresh.anchor;
701
702 debugs(47,5, "dir " << index << " updating filen " <<
703 std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
704 sio->swap_filen << std::dec << " starting at " <<
705 diskOffset(sio->swap_filen));
706
707 sio->file(theFile);
708 return sio;
709}
710
e2851fe7 711int64_t
36c84e19 712Rock::SwapDir::diskOffset(const SlotId sid) const
e2851fe7 713{
36c84e19
AR
714 assert(sid >= 0);
715 return HeaderSize + slotSize*sid;
93910d5c
AR
716}
717
718int64_t
719Rock::SwapDir::diskOffset(Ipc::Mem::PageId &pageId) const
720{
721 assert(pageId);
722 return diskOffset(pageId.number - 1);
e2851fe7
AR
723}
724
725int64_t
726Rock::SwapDir::diskOffsetLimit() const
727{
c728b6f9 728 assert(map);
36c84e19 729 return diskOffset(map->sliceLimit());
93910d5c
AR
730}
731
732bool
50dc81ec 733Rock::SwapDir::useFreeSlot(Ipc::Mem::PageId &pageId)
93910d5c 734{
50dc81ec
AR
735 if (freeSlots->pop(pageId)) {
736 debugs(47, 5, "got a previously free slot: " << pageId);
737 return true;
738 }
93910d5c 739
50dc81ec
AR
740 // catch free slots delivered to noteFreeMapSlice()
741 assert(!waitingForPage);
742 waitingForPage = &pageId;
743 if (map->purgeOne()) {
744 assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
745 assert(pageId.set());
746 debugs(47, 5, "got a previously busy slot: " << pageId);
747 return true;
748 }
749 assert(waitingForPage == &pageId);
750 waitingForPage = NULL;
751
752 debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
753 return false;
93910d5c
AR
754}
755
50dc81ec
AR
756bool
757Rock::SwapDir::validSlotId(const SlotId slotId) const
93910d5c 758{
36c84e19 759 return 0 <= slotId && slotId < slotLimitActual();
93910d5c
AR
760}
761
762void
36c84e19 763Rock::SwapDir::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
50dc81ec
AR
764{
765 Ipc::Mem::PageId pageId;
766 pageId.pool = index+1;
767 pageId.number = sliceId+1;
768 if (waitingForPage) {
769 *waitingForPage = pageId;
770 waitingForPage = NULL;
771 } else {
772 freeSlots->push(pageId);
93910d5c
AR
773 }
774}
775
50dc81ec 776// tries to open an old entry with swap_filen for reading
e2851fe7
AR
777StoreIOState::Pointer
778Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreIOState::STIOCB *cbIo, void *data)
779{
780 if (!theFile || theFile->error()) {
781 debugs(47,4, HERE << theFile);
782 return NULL;
783 }
784
9199139f 785 if (e.swap_filen < 0) {
1adea2a6 786 debugs(47,4, HERE << e);
f1eaa254
DK
787 return NULL;
788 }
789
e0f3492c 790 // Do not start I/O transaction if there are less than 10% free pages left.
551f8a18 791 // TODO: reserve page instead
7ef5aa64 792 if (needsDiskStrand() &&
e29ccb57 793 Ipc::Mem::PageLevel(Ipc::Mem::PageId::ioPage) >= 0.9 * Ipc::Mem::PageLimit(Ipc::Mem::PageId::ioPage)) {
551f8a18 794 debugs(47, 5, HERE << "too few shared pages for IPC I/O left");
e0bdae60
DK
795 return NULL;
796 }
797
c728b6f9
AR
798 // The are two ways an entry can get swap_filen: our get() locked it for
799 // reading or our storeSwapOutStart() locked it for writing. Peeking at our
5296bbd9 800 // locked entry is safe, but no support for reading the entry we swap out.
50dc81ec 801 const Ipc::StoreMapAnchor *slot = map->peekAtReader(e.swap_filen);
44c95fcf 802 if (!slot)
c728b6f9 803 return NULL; // we were writing afterall
1adea2a6 804
50dc81ec
AR
805 Rock::SwapDir::Pointer self(this);
806 IoState *sio = new IoState(self, &e, cbFile, cbIo, data);
e2851fe7
AR
807
808 sio->swap_dirn = index;
809 sio->swap_filen = e.swap_filen;
50dc81ec
AR
810 sio->readableAnchor_ = slot;
811 sio->file(theFile);
c728b6f9 812
5b3ea321 813 debugs(47,5, HERE << "dir " << index << " has old filen: " <<
9199139f
AR
814 std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
815 sio->swap_filen);
e2851fe7 816
50dc81ec 817 assert(slot->sameKey(static_cast<const cache_key*>(e.key)));
5296bbd9
AR
818 // For collapsed disk hits: e.swap_file_sz and slot->basics.swap_file_sz
819 // may still be zero and basics.swap_file_sz may grow.
820 assert(slot->basics.swap_file_sz >= e.swap_file_sz);
e2851fe7 821
e2851fe7
AR
822 return sio;
823}
824
825void
826Rock::SwapDir::ioCompletedNotification()
827{
51618c6a 828 if (!theFile)
e2851fe7 829 fatalf("Rock cache_dir failed to initialize db file: %s", filePath);
e2851fe7 830
b69e9ffa
AJ
831 if (theFile->error()) {
832 int xerrno = errno; // XXX: where does errno come from
51618c6a 833 fatalf("Rock cache_dir at %s failed to open db file: %s", filePath,
b69e9ffa
AJ
834 xstrerr(xerrno));
835 }
e2851fe7 836
51618c6a 837 debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
36c84e19
AR
838 std::setw(12) << maxSize() << " disk bytes, " <<
839 std::setw(7) << map->entryLimit() << " entries, and " <<
840 std::setw(7) << map->sliceLimit() << " slots");
9cfba26c
AR
841
842 rebuild();
e2851fe7
AR
843}
844
845void
846Rock::SwapDir::closeCompleted()
847{
848 theFile = NULL;
849}
850
851void
ced8def3 852Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::ReadRequest> r)
e2851fe7
AR
853{
854 ReadRequest *request = dynamic_cast<Rock::ReadRequest*>(r.getRaw());
855 assert(request);
856 IoState::Pointer sio = request->sio;
857
c728b6f9
AR
858 if (errflag == DISK_OK && rlen > 0)
859 sio->offset_ += rlen;
e2851fe7 860
5296bbd9 861 sio->callReaderBack(r->buf, rlen);
e2851fe7
AR
862}
863
864void
ced8def3 865Rock::SwapDir::writeCompleted(int errflag, size_t, RefCount< ::WriteRequest> r)
e2851fe7
AR
866{
867 Rock::WriteRequest *request = dynamic_cast<Rock::WriteRequest*>(r.getRaw());
868 assert(request);
869 assert(request->sio != NULL);
870 IoState &sio = *request->sio;
1adea2a6 871
50dc81ec
AR
872 // quit if somebody called IoState::close() while we were waiting
873 if (!sio.stillWaiting()) {
874 debugs(79, 3, "ignoring closed entry " << sio.swap_filen);
5296bbd9 875 noteFreeMapSlice(request->sidNext);
50dc81ec
AR
876 return;
877 }
878
abf396ec
AR
879 debugs(79, 7, "errflag=" << errflag << " rlen=" << request->len << " eof=" << request->eof);
880
5296bbd9
AR
881 // TODO: Fail if disk dropped one of the previous write requests.
882
f58bb2f4 883 if (errflag == DISK_OK) {
c728b6f9 884 // do not increment sio.offset_ because we do it in sio->write()
ce49546e
AR
885
886 // finalize the shared slice info after writing slice contents to disk
887 Ipc::StoreMap::Slice &slice =
888 map->writeableSlice(sio.swap_filen, request->sidCurrent);
889 slice.size = request->len - sizeof(DbCellHeader);
890 slice.next = request->sidNext;
9d4e9cfb 891
5296bbd9
AR
892 if (request->eof) {
893 assert(sio.e);
894 assert(sio.writeableAnchor_);
abf396ec
AR
895 if (sio.touchingStoreEntry()) {
896 sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz =
897 sio.offset_;
5296bbd9 898
abf396ec
AR
899 // close, the entry gets the read lock
900 map->closeForWriting(sio.swap_filen, true);
901 }
49769258 902 sio.writeableAnchor_ = NULL;
abf396ec 903 sio.splicingPoint = request->sidCurrent;
93910d5c 904 sio.finishedWriting(errflag);
50dc81ec
AR
905 }
906 } else {
5296bbd9
AR
907 noteFreeMapSlice(request->sidNext);
908
abf396ec 909 writeError(sio);
50dc81ec
AR
910 sio.finishedWriting(errflag);
911 // and hope that Core will call disconnect() to close the map entry
912 }
ce49546e 913
abf396ec
AR
914 if (sio.touchingStoreEntry())
915 CollapsedForwarding::Broadcast(*sio.e);
93910d5c 916}
e2851fe7 917
93910d5c 918void
abf396ec 919Rock::SwapDir::writeError(StoreIOState &sio)
93910d5c
AR
920{
921 // Do not abortWriting here. The entry should keep the write lock
922 // instead of losing association with the store and confusing core.
abf396ec 923 map->freeEntry(sio.swap_filen); // will mark as unusable, just in case
4475555f 924
abf396ec
AR
925 if (sio.touchingStoreEntry())
926 Store::Root().transientsAbandon(*sio.e);
927 // else noop: a fresh entry update error does not affect stale entry readers
4475555f 928
50dc81ec 929 // All callers must also call IoState callback, to propagate the error.
e2851fe7
AR
930}
931
abf396ec
AR
932void
933Rock::SwapDir::updateHeaders(StoreEntry *updatedE)
934{
935 if (!map)
936 return;
937
938 Ipc::StoreMapUpdate update(updatedE);
939 if (!map->openForUpdating(update, updatedE->swap_filen))
940 return;
941
942 try {
943 AsyncJob::Start(new HeaderUpdater(this, update));
944 } catch (const std::exception &ex) {
945 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
946 map->abortUpdating(update);
947 }
948}
949
e2851fe7
AR
950bool
951Rock::SwapDir::full() const
952{
50dc81ec 953 return freeSlots != NULL && !freeSlots->size();
e2851fe7
AR
954}
955
e2851fe7
AR
956// storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
957// but it should not happen for us
958void
9199139f
AR
959Rock::SwapDir::diskFull()
960{
f5adb654
AR
961 debugs(20, DBG_IMPORTANT, "BUG: No space left with rock cache_dir: " <<
962 filePath);
e2851fe7
AR
963}
964
965/// purge while full(); it should be sufficient to purge just one
966void
967Rock::SwapDir::maintain()
968{
50dc81ec
AR
969 // The Store calls this to free some db space, but there is nothing wrong
970 // with a full() db, except when db has to shrink after reconfigure, and
971 // we do not support shrinking yet (it would have to purge specific slots).
972 // TODO: Disable maintain() requests when they are pointless.
e2851fe7
AR
973}
974
975void
976Rock::SwapDir::reference(StoreEntry &e)
977{
978 debugs(47, 5, HERE << &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
0e240235 979 if (repl && repl->Referenced)
e2851fe7
AR
980 repl->Referenced(repl, &e, &e.repl);
981}
982
4c973beb 983bool
2745fea5 984Rock::SwapDir::dereference(StoreEntry &e)
e2851fe7
AR
985{
986 debugs(47, 5, HERE << &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
0e240235 987 if (repl && repl->Dereferenced)
e2851fe7 988 repl->Dereferenced(repl, &e, &e.repl);
4c973beb
AR
989
990 // no need to keep e in the global store_table for us; we have our own map
991 return false;
e2851fe7
AR
992}
993
c521ad17
DK
994bool
995Rock::SwapDir::unlinkdUseful() const
996{
997 // no entry-specific files to unlink
998 return false;
999}
1000
e2851fe7
AR
1001void
1002Rock::SwapDir::unlink(StoreEntry &e)
1003{
f58bb2f4 1004 debugs(47, 5, HERE << e);
e2851fe7 1005 ignoreReferences(e);
50dc81ec 1006 map->freeEntry(e.swap_filen);
f58bb2f4 1007 disconnect(e);
e2851fe7
AR
1008}
1009
1bfe9ade
AR
1010void
1011Rock::SwapDir::markForUnlink(StoreEntry &e)
1012{
1013 debugs(47, 5, e);
1014 map->freeEntry(e.swap_filen);
1015}
1016
e2851fe7
AR
1017void
1018Rock::SwapDir::trackReferences(StoreEntry &e)
1019{
f58bb2f4 1020 debugs(47, 5, HERE << e);
0e240235
AR
1021 if (repl)
1022 repl->Add(repl, &e, &e.repl);
e2851fe7
AR
1023}
1024
e2851fe7
AR
1025void
1026Rock::SwapDir::ignoreReferences(StoreEntry &e)
1027{
f58bb2f4 1028 debugs(47, 5, HERE << e);
0e240235
AR
1029 if (repl)
1030 repl->Remove(repl, &e, &e.repl);
e2851fe7
AR
1031}
1032
1033void
1034Rock::SwapDir::statfs(StoreEntry &e) const
1035{
1036 storeAppendPrintf(&e, "\n");
c91ca3ce 1037 storeAppendPrintf(&e, "Maximum Size: %" PRIu64 " KB\n", maxSize() >> 10);
57f583f1 1038 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
cc34568d
DK
1039 currentSize() / 1024.0,
1040 Math::doublePercent(currentSize(), maxSize()));
e2851fe7 1041
36c84e19
AR
1042 const int entryLimit = entryLimitActual();
1043 const int slotLimit = slotLimitActual();
1044 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
1045 if (map && entryLimit > 0) {
2da4bfe6
A
1046 const int entryCount = map->entryCount();
1047 storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
1048 entryCount, (100.0 * entryCount / entryLimit));
36c84e19 1049 }
c728b6f9 1050
36c84e19
AR
1051 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
1052 if (map && slotLimit > 0) {
2da4bfe6
A
1053 const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size();
1054 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
1055 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
1056 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
1057 usedSlots, (100.0 * usedSlots / slotLimit));
1058 }
1059 if (slotLimit < 100) { // XXX: otherwise too expensive to count
1060 Ipc::ReadWriteLockStats stats;
1061 map->updateStats(stats);
1062 stats.dump(e);
1063 }
9199139f 1064 }
e2851fe7
AR
1065
1066 storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
9199139f 1067 store_open_disk_fd, Config.max_open_disk_fds);
e2851fe7
AR
1068
1069 storeAppendPrintf(&e, "Flags:");
1070
1071 if (flags.selected)
1072 storeAppendPrintf(&e, " SELECTED");
1073
1074 if (flags.read_only)
1075 storeAppendPrintf(&e, " READ-ONLY");
1076
1077 storeAppendPrintf(&e, "\n");
1078
1079}
902df398 1080
1860fbac 1081SBuf
9d4e9cfb
AR
1082Rock::SwapDir::inodeMapPath() const
1083{
1860fbac 1084 return Ipc::Mem::Segment::Name(SBuf(path), "map");
300fd297
AR
1085}
1086
1087const char *
9d4e9cfb
AR
1088Rock::SwapDir::freeSlotsPath() const
1089{
300fd297
AR
1090 static String spacesPath;
1091 spacesPath = path;
1092 spacesPath.append("_spaces");
1093 return spacesPath.termedBuf();
1094}
1095
9bb01611 1096namespace Rock
902df398 1097{
21b7990f 1098RunnerRegistrationEntry(SwapDirRr);
9bb01611 1099}
902df398 1100
21b7990f 1101void Rock::SwapDirRr::create()
902df398 1102{
50dc81ec 1103 Must(mapOwners.empty() && freeSlotsOwners.empty());
4404f1c5
DK
1104 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1105 if (const Rock::SwapDir *const sd = dynamic_cast<Rock::SwapDir *>(INDEXSD(i))) {
36c84e19 1106 const int64_t capacity = sd->slotLimitActual();
e51ce7da 1107
93910d5c 1108 SwapDir::DirMap::Owner *const mapOwner =
300fd297 1109 SwapDir::DirMap::Init(sd->inodeMapPath(), capacity);
93910d5c
AR
1110 mapOwners.push_back(mapOwner);
1111
e6d2c263 1112 // TODO: somehow remove pool id and counters from PageStack?
50dc81ec
AR
1113 Ipc::Mem::Owner<Ipc::Mem::PageStack> *const freeSlotsOwner =
1114 shm_new(Ipc::Mem::PageStack)(sd->freeSlotsPath(),
636b913c 1115 i+1, capacity, 0);
50dc81ec 1116 freeSlotsOwners.push_back(freeSlotsOwner);
93910d5c 1117
e6d2c263 1118 // TODO: add method to initialize PageStack with no free pages
93910d5c
AR
1119 while (true) {
1120 Ipc::Mem::PageId pageId;
50dc81ec 1121 if (!freeSlotsOwner->object()->pop(pageId))
93910d5c
AR
1122 break;
1123 }
902df398
DK
1124 }
1125 }
1126}
1127
9bb01611 1128Rock::SwapDirRr::~SwapDirRr()
902df398 1129{
93910d5c
AR
1130 for (size_t i = 0; i < mapOwners.size(); ++i) {
1131 delete mapOwners[i];
50dc81ec 1132 delete freeSlotsOwners[i];
93910d5c 1133 }
902df398 1134}
f53969cc 1135