]> git.ipfire.org Git - thirdparty/squid.git/blame - src/fs/rock/RockRebuild.cc
Moved unlinkd-related prototypes to own header.
[thirdparty/squid.git] / src / fs / rock / RockRebuild.cc
CommitLineData
e2851fe7 1/*
e2851fe7
AR
2 * DEBUG: section 79 Disk IO Routines
3 */
4
f7f3304a 5#include "squid.h"
438b04d4 6#include "disk.h"
e2851fe7
AR
7#include "fs/rock/RockRebuild.h"
8#include "fs/rock/RockSwapDir.h"
fcd789da 9#include "fs/rock/RockDbCell.h"
582c2af2
FC
10#include "md5.h"
11#include "protos.h"
5bed43d6 12#include "tools.h"
582c2af2 13#include "typedefs.h"
386d28bf 14#include "SquidTime.h"
687f5275 15#include "store_rebuild.h"
e2851fe7 16
21d845b1
FC
17#if HAVE_ERRNO_H
18#include <errno.h>
19#endif
20
e2851fe7
AR
21CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
22
078274f6 23Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
9199139f
AR
24 sd(dir),
25 dbSize(0),
26 dbEntrySize(0),
27 dbEntryLimit(0),
28 fd(-1),
29 dbOffset(0),
30 filen(0)
e2851fe7
AR
31{
32 assert(sd);
33 memset(&counts, 0, sizeof(counts));
34 dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
35 dbEntrySize = sd->max_objsize;
36 dbEntryLimit = sd->entryLimit();
37}
38
39Rock::Rebuild::~Rebuild()
40{
41 if (fd >= 0)
42 file_close(fd);
43}
44
45/// prepares and initiates entry loading sequence
46void
9199139f
AR
47Rock::Rebuild::start()
48{
078274f6
AR
49 // in SMP mode, only the disker is responsible for populating the map
50 if (UsingSmp() && !IamDiskProcess()) {
51 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
9199139f 52 sd->index << " from " << sd->filePath);
078274f6
AR
53 mustStop("non-disker");
54 return;
55 }
56
095ec2b1
AR
57 debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
58 " from " << sd->filePath);
e2851fe7
AR
59
60 fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
61 if (fd < 0)
62 failure("cannot open db", errno);
63
64 char buf[SwapDir::HeaderSize];
65 if (read(fd, buf, sizeof(buf)) != SwapDir::HeaderSize)
66 failure("cannot read db header", errno);
67
68 dbOffset = SwapDir::HeaderSize;
9199139f 69 filen = 0;
e2851fe7
AR
70
71 checkpoint();
72}
73
078274f6 74/// continues after a pause if not done
e2851fe7
AR
75void
76Rock::Rebuild::checkpoint()
77{
078274f6 78 if (!done())
e2851fe7 79 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
078274f6
AR
80}
81
82bool
83Rock::Rebuild::doneAll() const
84{
85 return dbOffset >= dbSize && AsyncJob::doneAll();
e2851fe7
AR
86}
87
88void
89Rock::Rebuild::Steps(void *data)
90{
078274f6
AR
91 // use async call to enable job call protection that time events lack
92 CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
e2851fe7
AR
93}
94
95void
9199139f
AR
96Rock::Rebuild::steps()
97{
5b3ea321 98 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 99 dbOffset << " <= " << dbSize);
e2851fe7 100
386d28bf 101 // Balance our desire to maximize the number of entries processed at once
9199139f 102 // (and, hence, minimize overheads and total rebuild time) with a
386d28bf
AR
103 // requirement to also process Coordinator events, disk I/Os, etc.
104 const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
105 const timeval loopStart = current_time;
106
107 int loaded = 0;
108 while (loaded < dbEntryLimit && dbOffset < dbSize) {
e2851fe7
AR
109 doOneEntry();
110 dbOffset += dbEntrySize;
386d28bf
AR
111 ++filen;
112 ++loaded;
e2851fe7
AR
113
114 if (counts.scancount % 1000 == 0)
386d28bf
AR
115 storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount);
116
117 if (opt_foreground_rebuild)
118 continue; // skip "few entries at a time" check below
119
120 getCurrentTime();
121 const double elapsedMsec = tvSubMsec(loopStart, current_time);
122 if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
123 debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
9199139f 124 elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
386d28bf
AR
125 break;
126 }
127 }
e2851fe7
AR
128
129 checkpoint();
130}
131
132void
9199139f
AR
133Rock::Rebuild::doOneEntry()
134{
5b3ea321 135 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 136 dbOffset << " <= " << dbSize);
e2851fe7 137
c728b6f9
AR
138 ++counts.scancount;
139
e2851fe7
AR
140 if (lseek(fd, dbOffset, SEEK_SET) < 0)
141 failure("cannot seek to db entry", errno);
142
c728b6f9
AR
143 MemBuf buf;
144 buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE);
145
146 if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
147 return;
148
149 // get our header
150 DbCellHeader header;
151 if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
51618c6a 152 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 153 "Ignoring truncated cache entry meta data at " << dbOffset);
cb4185f1 154 ++counts.invalid;
c728b6f9
AR
155 return;
156 }
157 memcpy(&header, buf.content(), sizeof(header));
158
159 if (!header.sane()) {
51618c6a 160 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 161 "Ignoring malformed cache entry meta data at " << dbOffset);
cb4185f1 162 ++counts.invalid;
c728b6f9
AR
163 return;
164 }
165 buf.consume(sizeof(header)); // optimize to avoid memmove()
166
e2851fe7
AR
167 cache_key key[SQUID_MD5_DIGEST_LENGTH];
168 StoreEntry loadedE;
c728b6f9 169 if (!storeRebuildParseEntry(buf, loadedE, key, counts, header.payloadSize)) {
e2851fe7 170 // skip empty slots
e1825c5d 171 if (loadedE.swap_filen > 0 || loadedE.swap_file_sz > 0) {
cb4185f1 172 ++counts.invalid;
5b3ea321 173 //sd->unlink(filen); leave garbage on disk, it should not hurt
e1825c5d 174 }
e2851fe7 175 return;
9199139f 176 }
e2851fe7
AR
177
178 assert(loadedE.swap_filen < dbEntryLimit);
179 if (!storeRebuildKeepEntry(loadedE, key, counts))
180 return;
181
cb4185f1 182 ++counts.objcount;
e2851fe7
AR
183 // loadedE->dump(5);
184
5b3ea321 185 sd->addEntry(filen, header, loadedE);
e2851fe7
AR
186}
187
188void
9199139f
AR
189Rock::Rebuild::swanSong()
190{
078274f6 191 debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
9199139f 192 StoreController::store_dirs_rebuilding);
078274f6 193 --StoreController::store_dirs_rebuilding;
e2851fe7 194 storeRebuildComplete(&counts);
e2851fe7
AR
195}
196
197void
9199139f
AR
198Rock::Rebuild::failure(const char *msg, int errNo)
199{
5b3ea321 200 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 201 dbOffset << " <= " << dbSize);
e2851fe7
AR
202
203 if (errNo)
f5adb654
AR
204 debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
205 debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
e2851fe7
AR
206
207 assert(sd);
208 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
9199139f 209 sd->index, sd->filePath, msg);
e2851fe7 210}