]> git.ipfire.org Git - thirdparty/squid.git/blob - src/fs/rock/RockRebuild.cc
Merged from trunk
[thirdparty/squid.git] / src / fs / rock / RockRebuild.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 79 Disk IO Routines
5 */
6
7 #include "squid.h"
8 #include "fs/rock/RockRebuild.h"
9 #include "fs/rock/RockSwapDir.h"
10 #include "fs/rock/RockDbCell.h"
11 #include "md5.h"
12 #include "protos.h"
13 #include "typedefs.h"
14 #include "SquidTime.h"
15
16 #if HAVE_ERRNO_H
17 #include <errno.h>
18 #endif
19
20 CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
21
22 Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
23 sd(dir),
24 dbSize(0),
25 dbEntrySize(0),
26 dbEntryLimit(0),
27 fd(-1),
28 dbOffset(0),
29 filen(0)
30 {
31 assert(sd);
32 memset(&counts, 0, sizeof(counts));
33 dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
34 dbEntrySize = sd->max_objsize;
35 dbEntryLimit = sd->entryLimit();
36 }
37
38 Rock::Rebuild::~Rebuild()
39 {
40 if (fd >= 0)
41 file_close(fd);
42 }
43
44 /// prepares and initiates entry loading sequence
45 void
46 Rock::Rebuild::start()
47 {
48 // in SMP mode, only the disker is responsible for populating the map
49 if (UsingSmp() && !IamDiskProcess()) {
50 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
51 sd->index << " from " << sd->filePath);
52 mustStop("non-disker");
53 return;
54 }
55
56 debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
57 " from " << sd->filePath);
58
59 fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
60 if (fd < 0)
61 failure("cannot open db", errno);
62
63 char buf[SwapDir::HeaderSize];
64 if (read(fd, buf, sizeof(buf)) != SwapDir::HeaderSize)
65 failure("cannot read db header", errno);
66
67 dbOffset = SwapDir::HeaderSize;
68 filen = 0;
69
70 checkpoint();
71 }
72
73 /// continues after a pause if not done
74 void
75 Rock::Rebuild::checkpoint()
76 {
77 if (!done())
78 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
79 }
80
81 bool
82 Rock::Rebuild::doneAll() const
83 {
84 return dbOffset >= dbSize && AsyncJob::doneAll();
85 }
86
87 void
88 Rock::Rebuild::Steps(void *data)
89 {
90 // use async call to enable job call protection that time events lack
91 CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
92 }
93
94 void
95 Rock::Rebuild::steps()
96 {
97 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
98 dbOffset << " <= " << dbSize);
99
100 // Balance our desire to maximize the number of entries processed at once
101 // (and, hence, minimize overheads and total rebuild time) with a
102 // requirement to also process Coordinator events, disk I/Os, etc.
103 const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
104 const timeval loopStart = current_time;
105
106 int loaded = 0;
107 while (loaded < dbEntryLimit && dbOffset < dbSize) {
108 doOneEntry();
109 dbOffset += dbEntrySize;
110 ++filen;
111 ++loaded;
112
113 if (counts.scancount % 1000 == 0)
114 storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount);
115
116 if (opt_foreground_rebuild)
117 continue; // skip "few entries at a time" check below
118
119 getCurrentTime();
120 const double elapsedMsec = tvSubMsec(loopStart, current_time);
121 if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
122 debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
123 elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
124 break;
125 }
126 }
127
128 checkpoint();
129 }
130
131 void
132 Rock::Rebuild::doOneEntry()
133 {
134 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
135 dbOffset << " <= " << dbSize);
136
137 ++counts.scancount;
138
139 if (lseek(fd, dbOffset, SEEK_SET) < 0)
140 failure("cannot seek to db entry", errno);
141
142 MemBuf buf;
143 buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE);
144
145 if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
146 return;
147
148 // get our header
149 DbCellHeader header;
150 if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
151 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
152 "Ignoring truncated cache entry meta data at " << dbOffset);
153 ++counts.invalid;
154 return;
155 }
156 memcpy(&header, buf.content(), sizeof(header));
157
158 if (!header.sane()) {
159 debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
160 "Ignoring malformed cache entry meta data at " << dbOffset);
161 ++counts.invalid;
162 return;
163 }
164 buf.consume(sizeof(header)); // optimize to avoid memmove()
165
166 cache_key key[SQUID_MD5_DIGEST_LENGTH];
167 StoreEntry loadedE;
168 if (!storeRebuildParseEntry(buf, loadedE, key, counts, header.payloadSize)) {
169 // skip empty slots
170 if (loadedE.swap_filen > 0 || loadedE.swap_file_sz > 0) {
171 ++counts.invalid;
172 //sd->unlink(filen); leave garbage on disk, it should not hurt
173 }
174 return;
175 }
176
177 assert(loadedE.swap_filen < dbEntryLimit);
178 if (!storeRebuildKeepEntry(loadedE, key, counts))
179 return;
180
181 ++counts.objcount;
182 // loadedE->dump(5);
183
184 sd->addEntry(filen, header, loadedE);
185 }
186
187 void
188 Rock::Rebuild::swanSong()
189 {
190 debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
191 StoreController::store_dirs_rebuilding);
192 --StoreController::store_dirs_rebuilding;
193 storeRebuildComplete(&counts);
194 }
195
196 void
197 Rock::Rebuild::failure(const char *msg, int errNo)
198 {
199 debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
200 dbOffset << " <= " << dbSize);
201
202 if (errNo)
203 debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
204 debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
205
206 assert(sd);
207 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
208 sd->index, sd->filePath, msg);
209 }