]> git.ipfire.org Git - thirdparty/squid.git/blame - src/fs/rock/RockRebuild.cc
Removed squid-old.h
[thirdparty/squid.git] / src / fs / rock / RockRebuild.cc
CommitLineData
e2851fe7
AR
1/*
2 * $Id$
3 *
4 * DEBUG: section 79 Disk IO Routines
5 */
6
f7f3304a 7#include "squid.h"
e2851fe7
AR
8#include "fs/rock/RockRebuild.h"
9#include "fs/rock/RockSwapDir.h"
fcd789da 10#include "fs/rock/RockDbCell.h"
582c2af2
FC
11#include "md5.h"
12#include "protos.h"
13#include "typedefs.h"
386d28bf 14#include "SquidTime.h"
e2851fe7
AR
15
16CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
17
078274f6 18Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
9199139f
AR
19 sd(dir),
20 dbSize(0),
21 dbEntrySize(0),
22 dbEntryLimit(0),
23 fd(-1),
24 dbOffset(0),
25 filen(0)
e2851fe7
AR
26{
27 assert(sd);
28 memset(&counts, 0, sizeof(counts));
29 dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
30 dbEntrySize = sd->max_objsize;
31 dbEntryLimit = sd->entryLimit();
32}
33
34Rock::Rebuild::~Rebuild()
35{
36 if (fd >= 0)
37 file_close(fd);
38}
39
40/// prepares and initiates entry loading sequence
41void
9199139f
AR
42Rock::Rebuild::start()
43{
078274f6
AR
44 // in SMP mode, only the disker is responsible for populating the map
45 if (UsingSmp() && !IamDiskProcess()) {
46 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
9199139f 47 sd->index << " from " << sd->filePath);
078274f6
AR
48 mustStop("non-disker");
49 return;
50 }
51
095ec2b1
AR
52 debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
53 " from " << sd->filePath);
e2851fe7
AR
54
55 fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
56 if (fd < 0)
57 failure("cannot open db", errno);
58
59 char buf[SwapDir::HeaderSize];
60 if (read(fd, buf, sizeof(buf)) != SwapDir::HeaderSize)
61 failure("cannot read db header", errno);
62
63 dbOffset = SwapDir::HeaderSize;
9199139f 64 filen = 0;
e2851fe7
AR
65
66 checkpoint();
67}
68
078274f6 69/// continues after a pause if not done
e2851fe7
AR
70void
71Rock::Rebuild::checkpoint()
72{
078274f6 73 if (!done())
e2851fe7 74 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
078274f6
AR
75}
76
77bool
78Rock::Rebuild::doneAll() const
79{
80 return dbOffset >= dbSize && AsyncJob::doneAll();
e2851fe7
AR
81}
82
83void
84Rock::Rebuild::Steps(void *data)
85{
078274f6
AR
86 // use async call to enable job call protection that time events lack
87 CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
e2851fe7
AR
88}
89
90void
9199139f
AR
91Rock::Rebuild::steps()
92{
5b3ea321 93 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 94 dbOffset << " <= " << dbSize);
e2851fe7 95
386d28bf 96 // Balance our desire to maximize the number of entries processed at once
9199139f 97 // (and, hence, minimize overheads and total rebuild time) with a
386d28bf
AR
98 // requirement to also process Coordinator events, disk I/Os, etc.
99 const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
100 const timeval loopStart = current_time;
101
102 int loaded = 0;
103 while (loaded < dbEntryLimit && dbOffset < dbSize) {
e2851fe7
AR
104 doOneEntry();
105 dbOffset += dbEntrySize;
386d28bf
AR
106 ++filen;
107 ++loaded;
e2851fe7
AR
108
109 if (counts.scancount % 1000 == 0)
386d28bf
AR
110 storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount);
111
112 if (opt_foreground_rebuild)
113 continue; // skip "few entries at a time" check below
114
115 getCurrentTime();
116 const double elapsedMsec = tvSubMsec(loopStart, current_time);
117 if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
118 debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
9199139f 119 elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
386d28bf
AR
120 break;
121 }
122 }
e2851fe7
AR
123
124 checkpoint();
125}
126
127void
9199139f
AR
128Rock::Rebuild::doOneEntry()
129{
5b3ea321 130 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 131 dbOffset << " <= " << dbSize);
e2851fe7 132
c728b6f9
AR
133 ++counts.scancount;
134
e2851fe7
AR
135 if (lseek(fd, dbOffset, SEEK_SET) < 0)
136 failure("cannot seek to db entry", errno);
137
c728b6f9
AR
138 MemBuf buf;
139 buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE);
140
141 if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
142 return;
143
144 // get our header
145 DbCellHeader header;
146 if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
51618c6a 147 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 148 "Ignoring truncated cache entry meta data at " << dbOffset);
cb4185f1 149 ++counts.invalid;
c728b6f9
AR
150 return;
151 }
152 memcpy(&header, buf.content(), sizeof(header));
153
154 if (!header.sane()) {
51618c6a 155 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
9199139f 156 "Ignoring malformed cache entry meta data at " << dbOffset);
cb4185f1 157 ++counts.invalid;
c728b6f9
AR
158 return;
159 }
160 buf.consume(sizeof(header)); // optimize to avoid memmove()
161
e2851fe7
AR
162 cache_key key[SQUID_MD5_DIGEST_LENGTH];
163 StoreEntry loadedE;
c728b6f9 164 if (!storeRebuildParseEntry(buf, loadedE, key, counts, header.payloadSize)) {
e2851fe7 165 // skip empty slots
e1825c5d 166 if (loadedE.swap_filen > 0 || loadedE.swap_file_sz > 0) {
cb4185f1 167 ++counts.invalid;
5b3ea321 168 //sd->unlink(filen); leave garbage on disk, it should not hurt
e1825c5d 169 }
e2851fe7 170 return;
9199139f 171 }
e2851fe7
AR
172
173 assert(loadedE.swap_filen < dbEntryLimit);
174 if (!storeRebuildKeepEntry(loadedE, key, counts))
175 return;
176
cb4185f1 177 ++counts.objcount;
e2851fe7
AR
178 // loadedE->dump(5);
179
5b3ea321 180 sd->addEntry(filen, header, loadedE);
e2851fe7
AR
181}
182
183void
9199139f
AR
184Rock::Rebuild::swanSong()
185{
078274f6 186 debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
9199139f 187 StoreController::store_dirs_rebuilding);
078274f6 188 --StoreController::store_dirs_rebuilding;
e2851fe7 189 storeRebuildComplete(&counts);
e2851fe7
AR
190}
191
192void
9199139f
AR
193Rock::Rebuild::failure(const char *msg, int errNo)
194{
5b3ea321 195 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
9199139f 196 dbOffset << " <= " << dbSize);
e2851fe7
AR
197
198 if (errNo)
f5adb654
AR
199 debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
200 debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
e2851fe7
AR
201
202 assert(sd);
203 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
9199139f 204 sd->index, sd->filePath, msg);
e2851fe7 205}