]> git.ipfire.org Git - thirdparty/squid.git/blob - src/fs/rock/RockIoState.cc
Merged from trunk
[thirdparty/squid.git] / src / fs / rock / RockIoState.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 79 Disk IO Routines */
10
11 #include "squid.h"
12 #include "base/TextException.h"
13 #include "CollapsedForwarding.h"
14 #include "DiskIO/DiskIOModule.h"
15 #include "DiskIO/DiskIOStrategy.h"
16 #include "DiskIO/WriteRequest.h"
17 #include "fs/rock/RockIoRequests.h"
18 #include "fs/rock/RockIoState.h"
19 #include "fs/rock/RockSwapDir.h"
20 #include "globals.h"
21 #include "MemObject.h"
22 #include "Parsing.h"
23 #include "Transients.h"
24
25 Rock::IoState::IoState(Rock::SwapDir::Pointer &aDir,
26 StoreEntry *anEntry,
27 StoreIOState::STFNCB *cbFile,
28 StoreIOState::STIOCB *cbIo,
29 void *data):
30 readableAnchor_(NULL),
31 writeableAnchor_(NULL),
32 sidCurrent(-1),
33 dir(aDir),
34 slotSize(dir->slotSize),
35 objOffset(0),
36 theBuf(dir->slotSize)
37 {
38 e = anEntry;
39 e->lock("rock I/O");
40 // anchor, swap_filen, and swap_dirn are set by the caller
41 file_callback = cbFile;
42 callback = cbIo;
43 callback_data = cbdataReference(data);
44 ++store_open_disk_fd; // TODO: use a dedicated counter?
45 //theFile is set by SwapDir because it depends on DiskIOStrategy
46 }
47
48 Rock::IoState::~IoState()
49 {
50 --store_open_disk_fd;
51
52 // The dir map entry may still be open for reading at the point because
53 // the map entry lock is associated with StoreEntry, not IoState.
54 // assert(!readableAnchor_);
55 assert(shutting_down || !writeableAnchor_);
56
57 if (callback_data)
58 cbdataReferenceDone(callback_data);
59 theFile = NULL;
60
61 e->unlock("rock I/O");
62 }
63
64 void
65 Rock::IoState::file(const RefCount<DiskFile> &aFile)
66 {
67 assert(!theFile);
68 assert(aFile != NULL);
69 theFile = aFile;
70 }
71
72 const Ipc::StoreMapAnchor &
73 Rock::IoState::readAnchor() const
74 {
75 assert(readableAnchor_);
76 return *readableAnchor_;
77 }
78
79 Ipc::StoreMapAnchor &
80 Rock::IoState::writeAnchor()
81 {
82 assert(writeableAnchor_);
83 return *writeableAnchor_;
84 }
85
86 /// convenience wrapper returning the map slot we are reading now
87 const Ipc::StoreMapSlice &
88 Rock::IoState::currentReadableSlice() const
89 {
90 return dir->map->readableSlice(swap_filen, sidCurrent);
91 }
92
93 void
94 Rock::IoState::read_(char *buf, size_t len, off_t coreOff, STRCB *cb, void *data)
95 {
96 debugs(79, 7, swap_filen << " reads from " << coreOff);
97
98 assert(theFile != NULL);
99 assert(coreOff >= 0);
100
101 // if we are dealing with the first read or
102 // if the offset went backwords, start searching from the beginning
103 if (sidCurrent < 0 || coreOff < objOffset) {
104 sidCurrent = readAnchor().start;
105 objOffset = 0;
106 }
107
108 while (sidCurrent >= 0 && coreOff >= objOffset + currentReadableSlice().size) {
109 objOffset += currentReadableSlice().size;
110 sidCurrent = currentReadableSlice().next;
111 }
112
113 assert(read.callback == NULL);
114 assert(read.callback_data == NULL);
115 read.callback = cb;
116 read.callback_data = cbdataReference(data);
117
118 // punt if read offset is too big (because of client bugs or collapsing)
119 if (sidCurrent < 0) {
120 debugs(79, 5, "no " << coreOff << " in " << *e);
121 callReaderBack(buf, 0);
122 return;
123 }
124
125 offset_ = coreOff;
126 len = min(len,
127 static_cast<size_t>(objOffset + currentReadableSlice().size - coreOff));
128 const uint64_t diskOffset = dir->diskOffset(sidCurrent);
129 theFile->read(new ReadRequest(::ReadRequest(buf,
130 diskOffset + sizeof(DbCellHeader) + coreOff - objOffset, len), this));
131 }
132
133 void
134 Rock::IoState::callReaderBack(const char *buf, int rlen)
135 {
136 debugs(79, 5, rlen << " bytes for " << *e);
137 StoreIOState::STRCB *callb = read.callback;
138 assert(callb);
139 read.callback = NULL;
140 void *cbdata;
141 if (cbdataReferenceValidDone(read.callback_data, &cbdata))
142 callb(cbdata, buf, rlen, this);
143 }
144
145 /// wraps tryWrite() to handle deep write failures centrally and safely
146 bool
147 Rock::IoState::write(char const *buf, size_t size, off_t coreOff, FREE *dtor)
148 {
149 bool success = false;
150 try {
151 tryWrite(buf, size, coreOff);
152 success = true;
153 } catch (const std::exception &ex) { // TODO: should we catch ... as well?
154 debugs(79, 2, "db write error: " << ex.what());
155 dir->writeError(*e);
156 finishedWriting(DISK_ERROR);
157 // 'this' might be gone beyond this point; fall through to free buf
158 }
159
160 // careful: 'this' might be gone here
161
162 if (dtor)
163 (dtor)(const_cast<char*>(buf)); // cast due to a broken API?
164
165 return success;
166 }
167
168 /**
169 * Possibly send data to be written to disk:
170 * We only write data when full slot is accumulated or when close() is called.
171 * We buffer, in part, to avoid forcing OS to _read_ old unwritten portions of
172 * the slot when the write does not end at the page or sector boundary.
173 */
174 void
175 Rock::IoState::tryWrite(char const *buf, size_t size, off_t coreOff)
176 {
177 debugs(79, 7, swap_filen << " writes " << size << " more");
178
179 // either this is the first write or append; we do not support write gaps
180 assert(!coreOff || coreOff == -1);
181
182 // allocate the first slice during the first write
183 if (!coreOff) {
184 assert(sidCurrent < 0);
185 sidCurrent = reserveSlotForWriting(); // throws on failures
186 assert(sidCurrent >= 0);
187 writeAnchor().start = sidCurrent;
188 }
189
190 // buffer incoming data in slot buffer and write overflowing or final slots
191 // quit when no data left or we stopped writing on reentrant error
192 while (size > 0 && theFile != NULL) {
193 assert(sidCurrent >= 0);
194 const size_t processed = writeToBuffer(buf, size);
195 buf += processed;
196 size -= processed;
197 const bool overflow = size > 0;
198
199 // We do not write a full buffer without overflow because
200 // we would not yet know what to set the nextSlot to.
201 if (overflow) {
202 const SlotId sidNext = reserveSlotForWriting(); // throws
203 assert(sidNext >= 0);
204 writeToDisk(sidNext);
205 } else if (Store::Root().transientReaders(*e)) {
206 // write partial buffer for all remote hit readers to see
207 writeBufToDisk(-1, false);
208 }
209 }
210
211 }
212
213 /// Buffers incoming data for the current slot.
214 /// \return the number of bytes buffered
215 size_t
216 Rock::IoState::writeToBuffer(char const *buf, size_t size)
217 {
218 // do not buffer a cell header for nothing
219 if (!size)
220 return 0;
221
222 if (!theBuf.size) {
223 // will fill the header in writeToDisk when the next slot is known
224 theBuf.appended(sizeof(DbCellHeader));
225 }
226
227 size_t forCurrentSlot = min(size, static_cast<size_t>(theBuf.spaceSize()));
228 theBuf.append(buf, forCurrentSlot);
229 offset_ += forCurrentSlot; // so that Core thinks we wrote it
230 return forCurrentSlot;
231 }
232
233 /// write what was buffered during write() calls
234 /// negative sidNext means this is the last write request for this entry
235 void
236 Rock::IoState::writeToDisk(const SlotId sidNext)
237 {
238 assert(theFile != NULL);
239 assert(theBuf.size >= sizeof(DbCellHeader));
240
241 // TODO: if DiskIO module is mmap-based, we should be writing whole pages
242 // to avoid triggering read-page;new_head+old_tail;write-page overheads
243
244 writeBufToDisk(sidNext, sidNext < 0);
245 theBuf.clear();
246
247 sidCurrent = sidNext;
248 }
249
250 /// creates and submits a request to write current slot buffer to disk
251 /// eof is true if and only this is the last slot
252 void
253 Rock::IoState::writeBufToDisk(const SlotId sidNext, bool eof)
254 {
255 // no slots after the last/eof slot (but partial slots may have a nil next)
256 assert(!eof || sidNext < 0);
257
258 // finalize db cell header
259 DbCellHeader header;
260 memcpy(header.key, e->key, sizeof(header.key));
261 header.firstSlot = writeAnchor().start;
262 header.nextSlot = sidNext;
263 header.payloadSize = theBuf.size - sizeof(DbCellHeader);
264 header.entrySize = eof ? offset_ : 0; // storeSwapOutFileClosed sets swap_file_sz after write
265 header.version = writeAnchor().basics.timestamp;
266
267 // copy finalized db cell header into buffer
268 memcpy(theBuf.mem, &header, sizeof(DbCellHeader));
269
270 // and now allocate another buffer for the WriteRequest so that
271 // we can support concurrent WriteRequests (and to ease cleaning)
272 // TODO: should we limit the number of outstanding requests?
273 size_t wBufCap = 0;
274 void *wBuf = memAllocBuf(theBuf.size, &wBufCap);
275 memcpy(wBuf, theBuf.mem, theBuf.size);
276
277 const uint64_t diskOffset = dir->diskOffset(sidCurrent);
278 debugs(79, 5, HERE << swap_filen << " at " << diskOffset << '+' <<
279 theBuf.size);
280
281 WriteRequest *const r = new WriteRequest(
282 ::WriteRequest(static_cast<char*>(wBuf), diskOffset, theBuf.size,
283 memFreeBufFunc(wBufCap)), this);
284 r->sidCurrent = sidCurrent;
285 r->sidNext = sidNext;
286 r->eof = eof;
287
288 // theFile->write may call writeCompleted immediatelly
289 theFile->write(r);
290 }
291
292 /// finds and returns a free db slot to fill or throws
293 Rock::SlotId
294 Rock::IoState::reserveSlotForWriting()
295 {
296 Ipc::Mem::PageId pageId;
297 if (dir->useFreeSlot(pageId))
298 return pageId.number-1;
299
300 // This may happen when the number of available db slots is close to the
301 // number of concurrent requests reading or writing those slots, which may
302 // happen when the db is "small" compared to the request traffic OR when we
303 // are rebuilding and have not loaded "many" entries or empty slots yet.
304 throw TexcHere("ran out of free db slots");
305 }
306
307 void
308 Rock::IoState::finishedWriting(const int errFlag)
309 {
310 // we incremented offset_ while accumulating data in write()
311 // we do not reset writeableAnchor_ here because we still keep the lock
312 CollapsedForwarding::Broadcast(*e);
313 callBack(errFlag);
314 }
315
316 void
317 Rock::IoState::close(int how)
318 {
319 debugs(79, 3, swap_filen << " offset: " << offset_ << " how: " << how <<
320 " buf: " << theBuf.size << " callback: " << callback);
321
322 if (!theFile) {
323 debugs(79, 3, "I/O already canceled");
324 assert(!callback);
325 // We keep writeableAnchor_ after callBack() on I/O errors.
326 assert(!readableAnchor_);
327 return;
328 }
329
330 switch (how) {
331 case wroteAll:
332 assert(theBuf.size > 0); // we never flush last bytes on our own
333 writeToDisk(-1); // flush last, yet unwritten slot to disk
334 return; // writeCompleted() will callBack()
335
336 case writerGone:
337 assert(writeableAnchor_);
338 dir->writeError(*e); // abort a partially stored entry
339 finishedWriting(DISK_ERROR);
340 return;
341
342 case readerDone:
343 callBack(0);
344 return;
345 }
346 }
347
348 /// close callback (STIOCB) dialer: breaks dependencies and
349 /// counts IOState concurrency level
350 class StoreIOStateCb: public CallDialer
351 {
352 public:
353 StoreIOStateCb(StoreIOState::STIOCB *cb, void *data, int err, const Rock::IoState::Pointer &anSio):
354 callback(NULL),
355 callback_data(NULL),
356 errflag(err),
357 sio(anSio) {
358
359 callback = cb;
360 callback_data = cbdataReference(data);
361 }
362
363 StoreIOStateCb(const StoreIOStateCb &cb):
364 callback(NULL),
365 callback_data(NULL),
366 errflag(cb.errflag),
367 sio(cb.sio) {
368
369 callback = cb.callback;
370 callback_data = cbdataReference(cb.callback_data);
371 }
372
373 virtual ~StoreIOStateCb() {
374 cbdataReferenceDone(callback_data); // may be nil already
375 }
376
377 void dial(AsyncCall &) {
378 void *cbd;
379 if (cbdataReferenceValidDone(callback_data, &cbd) && callback)
380 callback(cbd, errflag, sio.getRaw());
381 }
382
383 bool canDial(AsyncCall &) const {
384 return cbdataReferenceValid(callback_data) && callback;
385 }
386
387 virtual void print(std::ostream &os) const {
388 os << '(' << callback_data << ", err=" << errflag << ')';
389 }
390
391 private:
392 StoreIOStateCb &operator =(const StoreIOStateCb &); // not defined
393
394 StoreIOState::STIOCB *callback;
395 void *callback_data;
396 int errflag;
397 Rock::IoState::Pointer sio;
398 };
399
400 void
401 Rock::IoState::callBack(int errflag)
402 {
403 debugs(79,3, HERE << "errflag=" << errflag);
404 theFile = NULL;
405
406 AsyncCall::Pointer call = asyncCall(79,3, "SomeIoStateCloseCb",
407 StoreIOStateCb(callback, callback_data, errflag, this));
408 ScheduleCallHere(call);
409
410 callback = NULL;
411 cbdataReferenceDone(callback_data);
412 }
413