]> git.ipfire.org Git - thirdparty/squid.git/blob - src/fs/rock/RockRebuild.cc
Removed squid-old.h
[thirdparty/squid.git] / src / fs / rock / RockRebuild.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 79 Disk IO Routines
5 */
6
7 #include "squid.h"
8 #include "fs/rock/RockRebuild.h"
9 #include "fs/rock/RockSwapDir.h"
10 #include "fs/rock/RockDbCell.h"
11 #include "md5.h"
12 #include "protos.h"
13 #include "typedefs.h"
14 #include "SquidTime.h"
15
16 CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
17
18 Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
19 sd(dir),
20 dbSize(0),
21 dbEntrySize(0),
22 dbEntryLimit(0),
23 fd(-1),
24 dbOffset(0),
25 filen(0)
26 {
27 assert(sd);
28 memset(&counts, 0, sizeof(counts));
29 dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
30 dbEntrySize = sd->max_objsize;
31 dbEntryLimit = sd->entryLimit();
32 }
33
34 Rock::Rebuild::~Rebuild()
35 {
36 if (fd >= 0)
37 file_close(fd);
38 }
39
40 /// prepares and initiates entry loading sequence
41 void
42 Rock::Rebuild::start()
43 {
44 // in SMP mode, only the disker is responsible for populating the map
45 if (UsingSmp() && !IamDiskProcess()) {
46 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
47 sd->index << " from " << sd->filePath);
48 mustStop("non-disker");
49 return;
50 }
51
52 debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
53 " from " << sd->filePath);
54
55 fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
56 if (fd < 0)
57 failure("cannot open db", errno);
58
59 char buf[SwapDir::HeaderSize];
60 if (read(fd, buf, sizeof(buf)) != SwapDir::HeaderSize)
61 failure("cannot read db header", errno);
62
63 dbOffset = SwapDir::HeaderSize;
64 filen = 0;
65
66 checkpoint();
67 }
68
69 /// continues after a pause if not done
70 void
71 Rock::Rebuild::checkpoint()
72 {
73 if (!done())
74 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
75 }
76
77 bool
78 Rock::Rebuild::doneAll() const
79 {
80 return dbOffset >= dbSize && AsyncJob::doneAll();
81 }
82
83 void
84 Rock::Rebuild::Steps(void *data)
85 {
86 // use async call to enable job call protection that time events lack
87 CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
88 }
89
90 void
91 Rock::Rebuild::steps()
92 {
93 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
94 dbOffset << " <= " << dbSize);
95
96 // Balance our desire to maximize the number of entries processed at once
97 // (and, hence, minimize overheads and total rebuild time) with a
98 // requirement to also process Coordinator events, disk I/Os, etc.
99 const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
100 const timeval loopStart = current_time;
101
102 int loaded = 0;
103 while (loaded < dbEntryLimit && dbOffset < dbSize) {
104 doOneEntry();
105 dbOffset += dbEntrySize;
106 ++filen;
107 ++loaded;
108
109 if (counts.scancount % 1000 == 0)
110 storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount);
111
112 if (opt_foreground_rebuild)
113 continue; // skip "few entries at a time" check below
114
115 getCurrentTime();
116 const double elapsedMsec = tvSubMsec(loopStart, current_time);
117 if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
118 debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
119 elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
120 break;
121 }
122 }
123
124 checkpoint();
125 }
126
127 void
128 Rock::Rebuild::doOneEntry()
129 {
130 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
131 dbOffset << " <= " << dbSize);
132
133 ++counts.scancount;
134
135 if (lseek(fd, dbOffset, SEEK_SET) < 0)
136 failure("cannot seek to db entry", errno);
137
138 MemBuf buf;
139 buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE);
140
141 if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
142 return;
143
144 // get our header
145 DbCellHeader header;
146 if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
147 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
148 "Ignoring truncated cache entry meta data at " << dbOffset);
149 ++counts.invalid;
150 return;
151 }
152 memcpy(&header, buf.content(), sizeof(header));
153
154 if (!header.sane()) {
155 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
156 "Ignoring malformed cache entry meta data at " << dbOffset);
157 ++counts.invalid;
158 return;
159 }
160 buf.consume(sizeof(header)); // optimize to avoid memmove()
161
162 cache_key key[SQUID_MD5_DIGEST_LENGTH];
163 StoreEntry loadedE;
164 if (!storeRebuildParseEntry(buf, loadedE, key, counts, header.payloadSize)) {
165 // skip empty slots
166 if (loadedE.swap_filen > 0 || loadedE.swap_file_sz > 0) {
167 ++counts.invalid;
168 //sd->unlink(filen); leave garbage on disk, it should not hurt
169 }
170 return;
171 }
172
173 assert(loadedE.swap_filen < dbEntryLimit);
174 if (!storeRebuildKeepEntry(loadedE, key, counts))
175 return;
176
177 ++counts.objcount;
178 // loadedE->dump(5);
179
180 sd->addEntry(filen, header, loadedE);
181 }
182
183 void
184 Rock::Rebuild::swanSong()
185 {
186 debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
187 StoreController::store_dirs_rebuilding);
188 --StoreController::store_dirs_rebuilding;
189 storeRebuildComplete(&counts);
190 }
191
192 void
193 Rock::Rebuild::failure(const char *msg, int errNo)
194 {
195 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
196 dbOffset << " <= " << dbSize);
197
198 if (errNo)
199 debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
200 debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
201
202 assert(sd);
203 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
204 sd->index, sd->filePath, msg);
205 }