]> git.ipfire.org Git - thirdparty/squid.git/blame - src/fs/rock/RockRebuild.cc
Remove unnecessary stub_tools dependency on String
[thirdparty/squid.git] / src / fs / rock / RockRebuild.cc
CommitLineData
e2851fe7
AR
1/*
2 * $Id$
3 *
4 * DEBUG: section 79 Disk IO Routines
5 */
6
c728b6f9 7#include "config.h"
e2851fe7
AR
8#include "fs/rock/RockRebuild.h"
9#include "fs/rock/RockSwapDir.h"
fcd789da 10#include "fs/rock/RockDbCell.h"
386d28bf 11#include "SquidTime.h"
e2851fe7
AR
12
13CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
14
078274f6 15Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
9199139f
AR
16 sd(dir),
17 dbSize(0),
18 dbEntrySize(0),
19 dbEntryLimit(0),
20 fd(-1),
21 dbOffset(0),
22 filen(0)
e2851fe7
AR
23{
24 assert(sd);
25 memset(&counts, 0, sizeof(counts));
26 dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
27 dbEntrySize = sd->max_objsize;
28 dbEntryLimit = sd->entryLimit();
29}
30
31Rock::Rebuild::~Rebuild()
32{
33 if (fd >= 0)
34 file_close(fd);
35}
36
37/// prepares and initiates entry loading sequence
38void
9199139f
AR
39Rock::Rebuild::start()
40{
078274f6
AR
41 // in SMP mode, only the disker is responsible for populating the map
42 if (UsingSmp() && !IamDiskProcess()) {
43 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
9199139f 44 sd->index << " from " << sd->filePath);
078274f6
AR
45 mustStop("non-disker");
46 return;
47 }
48
095ec2b1
AR
49 debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
50 " from " << sd->filePath);
e2851fe7
AR
51
52 fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
53 if (fd < 0)
54 failure("cannot open db", errno);
55
56 char buf[SwapDir::HeaderSize];
57 if (read(fd, buf, sizeof(buf)) != SwapDir::HeaderSize)
58 failure("cannot read db header", errno);
59
60 dbOffset = SwapDir::HeaderSize;
9199139f 61 filen = 0;
e2851fe7
AR
62
63 checkpoint();
64}
65
078274f6 66/// continues after a pause if not done
e2851fe7
AR
67void
68Rock::Rebuild::checkpoint()
69{
078274f6 70 if (!done())
e2851fe7 71 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
078274f6
AR
72}
73
74bool
75Rock::Rebuild::doneAll() const
76{
77 return dbOffset >= dbSize && AsyncJob::doneAll();
e2851fe7
AR
78}
79
80void
81Rock::Rebuild::Steps(void *data)
82{
078274f6
AR
83 // use async call to enable job call protection that time events lack
84 CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
e2851fe7
AR
85}
86
87void
9199139f
AR
88Rock::Rebuild::steps()
89{
5b3ea321 90 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 91 dbOffset << " <= " << dbSize);
e2851fe7 92
386d28bf 93 // Balance our desire to maximize the number of entries processed at once
9199139f 94 // (and, hence, minimize overheads and total rebuild time) with a
386d28bf
AR
95 // requirement to also process Coordinator events, disk I/Os, etc.
96 const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
97 const timeval loopStart = current_time;
98
99 int loaded = 0;
100 while (loaded < dbEntryLimit && dbOffset < dbSize) {
e2851fe7
AR
101 doOneEntry();
102 dbOffset += dbEntrySize;
386d28bf
AR
103 ++filen;
104 ++loaded;
e2851fe7
AR
105
106 if (counts.scancount % 1000 == 0)
386d28bf
AR
107 storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount);
108
109 if (opt_foreground_rebuild)
110 continue; // skip "few entries at a time" check below
111
112 getCurrentTime();
113 const double elapsedMsec = tvSubMsec(loopStart, current_time);
114 if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
115 debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
9199139f 116 elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
386d28bf
AR
117 break;
118 }
119 }
e2851fe7
AR
120
121 checkpoint();
122}
123
124void
9199139f
AR
125Rock::Rebuild::doOneEntry()
126{
5b3ea321 127 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 128 dbOffset << " <= " << dbSize);
e2851fe7 129
c728b6f9
AR
130 ++counts.scancount;
131
e2851fe7
AR
132 if (lseek(fd, dbOffset, SEEK_SET) < 0)
133 failure("cannot seek to db entry", errno);
134
c728b6f9
AR
135 MemBuf buf;
136 buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE);
137
138 if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
139 return;
140
141 // get our header
142 DbCellHeader header;
143 if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
51618c6a 144 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 145 "Ignoring truncated cache entry meta data at " << dbOffset);
c728b6f9
AR
146 counts.invalid++;
147 return;
148 }
149 memcpy(&header, buf.content(), sizeof(header));
150
151 if (!header.sane()) {
51618c6a 152 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 153 "Ignoring malformed cache entry meta data at " << dbOffset);
c728b6f9
AR
154 counts.invalid++;
155 return;
156 }
157 buf.consume(sizeof(header)); // optimize to avoid memmove()
158
e2851fe7
AR
159 cache_key key[SQUID_MD5_DIGEST_LENGTH];
160 StoreEntry loadedE;
c728b6f9 161 if (!storeRebuildParseEntry(buf, loadedE, key, counts, header.payloadSize)) {
e2851fe7 162 // skip empty slots
e1825c5d 163 if (loadedE.swap_filen > 0 || loadedE.swap_file_sz > 0) {
e2851fe7 164 counts.invalid++;
5b3ea321 165 //sd->unlink(filen); leave garbage on disk, it should not hurt
e1825c5d 166 }
e2851fe7 167 return;
9199139f 168 }
e2851fe7
AR
169
170 assert(loadedE.swap_filen < dbEntryLimit);
171 if (!storeRebuildKeepEntry(loadedE, key, counts))
172 return;
173
174 counts.objcount++;
175 // loadedE->dump(5);
176
5b3ea321 177 sd->addEntry(filen, header, loadedE);
e2851fe7
AR
178}
179
180void
9199139f
AR
181Rock::Rebuild::swanSong()
182{
078274f6 183 debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
9199139f 184 StoreController::store_dirs_rebuilding);
078274f6 185 --StoreController::store_dirs_rebuilding;
e2851fe7 186 storeRebuildComplete(&counts);
e2851fe7
AR
187}
188
189void
9199139f
AR
190Rock::Rebuild::failure(const char *msg, int errNo)
191{
5b3ea321 192 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 193 dbOffset << " <= " << dbSize);
e2851fe7
AR
194
195 if (errNo)
f5adb654
AR
196 debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
197 debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
e2851fe7
AR
198
199 assert(sd);
200 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
9199139f 201 sd->index, sd->filePath, msg);
e2851fe7 202}