From 18102f7de14e3ba35d2b4e9457b3611a178bf8b9 Mon Sep 17 00:00:00 2001 From: Eduard Bagdasaryan Date: Wed, 7 Aug 2019 12:04:30 +0000 Subject: [PATCH] Fix rock disk entry contamination related to aborted swapouts (#444) MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Also probably fixed hit response corruption related to aborted rock swapins. The following disk entry contamination sequence was observed in detailed cache logs during high-load Polygraph tests. Some of the observed level-1 errors matched those in real-world deployments. 1. Worker A schedules AX – a request to write a piece of entry A to disk slot X. The disker is busy writing and reading other slots for worker A and other workers so AX stays in the A-to-Disker queue for a while. 2. Entry A aborts swapout (for any reason, including network errors while receiving the being-stored response). Squid makes disk slot X available for other entries to use. AX stays queued. 3. Another worker B picks up disk slot X (from the shared free disk slot pool) and schedules BX, a request to write a piece of entry B to disk slot X. BX gets queued in an B-to-Disker queue. AX stays queued. 4. The disker satisfies write request BX. Disk slot X contains entry B contents now. AX stays queued. 5. The disker satisfies write request AX. Disk slot X is a part of entry B slot chain but contains former entry A contents now! HTTP requests for entry B now read entry A leftovers from disk and complain about metadata mismatches (at best) or get wrong response contents (at worst). To prevent premature disk slot reuse, we now keep disk slots reserved while they are in the disk queue, even if the corresponding cache entry is long gone: Individual disk write requests now "own" the slot they are writing. The Rock::IoState object owns reserved but not yet used slots so that they can be freed when the object is gone. The disk entry owns the (successfully written) slots added to its chain in the map. The new slot ownership scheme required changes in what metadata the writing code has to maintain. For example, we now keep the address of the previous slot in the entry chain so that we can update its .next field after a successful disk write. Also, the old code detecting dropped write requests could no longer rely on the now-empty .next field in the previous map entry. The rewritten code numbers I/O transactions so that out-of-order replies can be detected without using the map. I tried to simplify the metadata maintenance code by shortening reservation lifetimes and using just-in-time [first] slot reservations. The new code may also leak fewer slots when facing C++ exceptions. As for reading, I realized that we had no protection from dropped rock read requests. If the first read request is dropped, the metadata decoding would probably fail but if subsequent reads are skipped, the client may be fed with data that is missing those skipped blocks. I did not try to reproduce these problems, but they are now fixed using the same I/O transaction numbering mechanism that the writing code now uses. Negative length checks in store_client.cc treat dropped reads as errors. I also removed commented out "partial writing" code because IoState class member changes should expose any dangerous merge problems. --- src/fs/rock/RockIoRequests.cc | 12 +-- src/fs/rock/RockIoRequests.h | 17 +++- src/fs/rock/RockIoState.cc | 155 ++++++++++++++++++++++------------ src/fs/rock/RockIoState.h | 36 ++++++-- src/fs/rock/RockSwapDir.cc | 47 +++++------ src/fs/rock/RockSwapDir.h | 1 - src/fs/rock/forward.h | 5 ++ src/store_client.cc | 13 +-- 8 files changed, 181 insertions(+), 105 deletions(-) diff --git a/src/fs/rock/RockIoRequests.cc b/src/fs/rock/RockIoRequests.cc index b13acf61a7..102cb63185 100644 --- a/src/fs/rock/RockIoRequests.cc +++ b/src/fs/rock/RockIoRequests.cc @@ -14,19 +14,19 @@ CBDATA_NAMESPACED_CLASS_INIT(Rock, ReadRequest); CBDATA_NAMESPACED_CLASS_INIT(Rock, WriteRequest); -Rock::ReadRequest::ReadRequest(const ::ReadRequest &base, - const IoState::Pointer &anSio): +Rock::ReadRequest::ReadRequest(const ::ReadRequest &base, const IoState::Pointer &anSio, const IoXactionId anId): ::ReadRequest(base), - sio(anSio) + sio(anSio), + id(anId) { } -Rock::WriteRequest::WriteRequest(const ::WriteRequest &base, - const IoState::Pointer &anSio): +Rock::WriteRequest::WriteRequest(const ::WriteRequest &base, const IoState::Pointer &anSio, const IoXactionId anId): ::WriteRequest(base), sio(anSio), + sidPrevious(-1), sidCurrent(-1), - sidNext(-1), + id(anId), eof(false) { } diff --git a/src/fs/rock/RockIoRequests.h b/src/fs/rock/RockIoRequests.h index 9187754edc..aca354acff 100644 --- a/src/fs/rock/RockIoRequests.h +++ b/src/fs/rock/RockIoRequests.h @@ -11,6 +11,7 @@ #include "DiskIO/ReadRequest.h" #include "DiskIO/WriteRequest.h" +#include "fs/rock/forward.h" #include "fs/rock/RockIoState.h" class DiskFile; @@ -23,8 +24,11 @@ class ReadRequest: public ::ReadRequest CBDATA_CLASS(ReadRequest); public: - ReadRequest(const ::ReadRequest &base, const IoState::Pointer &anSio); + ReadRequest(const ::ReadRequest &, const IoState::Pointer &, const IoXactionId); IoState::Pointer sio; + + /// identifies this read transaction for the requesting IoState + IoXactionId id; }; class WriteRequest: public ::WriteRequest @@ -32,14 +36,19 @@ class WriteRequest: public ::WriteRequest CBDATA_CLASS(WriteRequest); public: - WriteRequest(const ::WriteRequest &base, const IoState::Pointer &anSio); + WriteRequest(const ::WriteRequest &, const IoState::Pointer &, const IoXactionId); IoState::Pointer sio; + /* We own these two reserved slots until SwapDir links them into the map. */ + + /// slot that will point to sidCurrent in the cache_dir map + SlotId sidPrevious; + /// slot being written using this write request SlotId sidCurrent; - /// allocated next slot (negative if we are writing the last slot) - SlotId sidNext; + /// identifies this write transaction for the requesting IoState + IoXactionId id; /// whether this is the last request for the entry bool eof; diff --git a/src/fs/rock/RockIoState.cc b/src/fs/rock/RockIoState.cc index ec80e2403c..ab59790fdc 100644 --- a/src/fs/rock/RockIoState.cc +++ b/src/fs/rock/RockIoState.cc @@ -35,7 +35,12 @@ Rock::IoState::IoState(Rock::SwapDir::Pointer &aDir, dir(aDir), slotSize(dir->slotSize), objOffset(0), + sidFirst(-1), + sidPrevious(-1), sidCurrent(-1), + sidNext(-1), + requestsSent(0), + repliesReceived(0), theBuf(dir->slotSize) { e = anEntry; @@ -101,7 +106,8 @@ Rock::IoState::read_(char *buf, size_t len, off_t coreOff, STRCB *cb, void *data // if we are dealing with the first read or // if the offset went backwords, start searching from the beginning if (sidCurrent < 0 || coreOff < objOffset) { - sidCurrent = readAnchor().start; + // readers do not need sidFirst but set it for consistency/triage sake + sidCurrent = sidFirst = readAnchor().start; objOffset = 0; } @@ -126,14 +132,32 @@ Rock::IoState::read_(char *buf, size_t len, off_t coreOff, STRCB *cb, void *data len = min(len, static_cast(objOffset + currentReadableSlice().size - coreOff)); const uint64_t diskOffset = dir->diskOffset(sidCurrent); - theFile->read(new ReadRequest(::ReadRequest(buf, - diskOffset + sizeof(DbCellHeader) + coreOff - objOffset, len), this)); + const auto start = diskOffset + sizeof(DbCellHeader) + coreOff - objOffset; + const auto id = ++requestsSent; + const auto request = new ReadRequest(::ReadRequest(buf, start, len), this, id); + theFile->read(request); } +void +Rock::IoState::handleReadCompletion(Rock::ReadRequest &request, const int rlen, const int errFlag) +{ + if (errFlag != DISK_OK || rlen < 0) { + debugs(79, 3, errFlag << " failure for " << *e); + return callReaderBack(request.buf, -1); + } + + if (!expectedReply(request.id)) + return callReaderBack(request.buf, -1); + + debugs(79, 5, '#' << request.id << " read " << rlen << " bytes at " << offset_ << " for " << *e); + offset_ += rlen; + callReaderBack(request.buf, rlen); +} + +/// report (already sanitized/checked) I/O results to the read initiator void Rock::IoState::callReaderBack(const char *buf, int rlen) { - debugs(79, 5, rlen << " bytes for " << *e); splicingPoint = rlen >= 0 ? sidCurrent : -1; if (splicingPoint < 0) staleSplicingPointNext = -1; @@ -181,44 +205,29 @@ Rock::IoState::tryWrite(char const *buf, size_t size, off_t coreOff) { debugs(79, 7, swap_filen << " writes " << size << " more"); - // either this is the first write or append; we do not support write gaps + // either this is the first write or append; + // we do not support write gaps or rewrites assert(!coreOff || coreOff == -1); // throw if an accepted unknown-size entry grew too big or max-size changed Must(static_cast(offset_ + size) <= static_cast(dir->maxObjectSize())); - // allocate the first slice during the first write - if (!coreOff) { - assert(sidCurrent < 0); - sidCurrent = dir->reserveSlotForWriting(); // throws on failures - assert(sidCurrent >= 0); - writeAnchor().start = sidCurrent; - } - // buffer incoming data in slot buffer and write overflowing or final slots // quit when no data left or we stopped writing on reentrant error while (size > 0 && theFile != NULL) { - assert(sidCurrent >= 0); const size_t processed = writeToBuffer(buf, size); buf += processed; size -= processed; const bool overflow = size > 0; // We do not write a full buffer without overflow because - // we would not yet know what to set the nextSlot to. + // we do not want to risk writing a payload-free slot on EOF. if (overflow) { - const auto sidNext = dir->reserveSlotForWriting(); // throws + Must(sidNext < 0); + sidNext = dir->reserveSlotForWriting(); assert(sidNext >= 0); - writeToDisk(sidNext); - // } else if (Store::Root().transientReaders(*e)) { - // XXX: Partial writes cannot be read -- no map->startAppending()! - // XXX: Partial writes confuse SwapDir::droppedEarlierRequest(), - // resulting in released entries and, hence, misses and CF retries. - // XXX: The effective benefit of partial writes is reduced by - // doPages() buffering SM_PAGE_SIZE*n leftovers. - - // // write partial buffer for all remote hit readers to see - // writeBufToDisk(-1, false, false); + writeToDisk(); + Must(sidNext < 0); // short sidNext lifetime simplifies code logic } } @@ -234,7 +243,7 @@ Rock::IoState::writeToBuffer(char const *buf, size_t size) return 0; if (!theBuf.size) { - // will fill the header in writeToDisk when the next slot is known + // eventually, writeToDisk() will fill this header space theBuf.appended(sizeof(DbCellHeader)); } @@ -245,44 +254,38 @@ Rock::IoState::writeToBuffer(char const *buf, size_t size) } /// write what was buffered during write() calls -/// negative sidNext means this is the last write request for this entry void -Rock::IoState::writeToDisk(const SlotId sidNextProposal) +Rock::IoState::writeToDisk() { assert(theFile != NULL); assert(theBuf.size >= sizeof(DbCellHeader)); - const bool lastWrite = sidNextProposal < 0; + assert((sidFirst < 0) == (sidCurrent < 0)); + if (sidFirst < 0) // this is the first disk write + sidCurrent = sidFirst = dir->reserveSlotForWriting(); + + // negative sidNext means this is the last write request for this entry + const bool lastWrite = sidNext < 0; + // here, eof means that we are writing the right-most entry slot const bool eof = lastWrite && // either not updating or the updating reader has loaded everything (touchingStoreEntry() || staleSplicingPointNext < 0); - // approve sidNextProposal unless _updating_ the last slot - const SlotId sidNext = (!touchingStoreEntry() && lastWrite) ? - staleSplicingPointNext : sidNextProposal; - debugs(79, 5, "sidNext:" << sidNextProposal << "=>" << sidNext << " eof=" << eof); + debugs(79, 5, "sidCurrent=" << sidCurrent << " sidNext=" << sidNext << " eof=" << eof); // TODO: if DiskIO module is mmap-based, we should be writing whole pages // to avoid triggering read-page;new_head+old_tail;write-page overheads - writeBufToDisk(sidNext, eof, lastWrite); - theBuf.clear(); - - sidCurrent = sidNext; -} - -/// creates and submits a request to write current slot buffer to disk -/// eof is true if and only this is the last slot -void -Rock::IoState::writeBufToDisk(const SlotId sidNext, const bool eof, const bool lastWrite) -{ - // no slots after the last/eof slot (but partial slots may have a nil next) - assert(!eof || sidNext < 0); + assert(!eof || sidNext < 0); // no slots after eof // finalize db cell header DbCellHeader header; memcpy(header.key, e->key, sizeof(header.key)); - header.firstSlot = writeAnchor().start; - header.nextSlot = sidNext; + header.firstSlot = sidFirst; + + const auto lastUpdatingWrite = lastWrite && !touchingStoreEntry(); + assert(!lastUpdatingWrite || sidNext < 0); + header.nextSlot = lastUpdatingWrite ? staleSplicingPointNext : sidNext; + header.payloadSize = theBuf.size - sizeof(DbCellHeader); header.entrySize = eof ? offset_ : 0; // storeSwapOutFileClosed sets swap_file_sz after write header.version = writeAnchor().basics.timestamp; @@ -300,21 +303,52 @@ Rock::IoState::writeBufToDisk(const SlotId sidNext, const bool eof, const bool l const uint64_t diskOffset = dir->diskOffset(sidCurrent); debugs(79, 5, HERE << swap_filen << " at " << diskOffset << '+' << theBuf.size); - + const auto id = ++requestsSent; WriteRequest *const r = new WriteRequest( ::WriteRequest(static_cast(wBuf), diskOffset, theBuf.size, - memFreeBufFunc(wBufCap)), this); + memFreeBufFunc(wBufCap)), this, id); r->sidCurrent = sidCurrent; - r->sidNext = sidNext; + r->sidPrevious = sidPrevious; r->eof = lastWrite; + sidPrevious = sidCurrent; + sidCurrent = sidNext; // sidNext may be cleared/negative already + sidNext = -1; + + theBuf.clear(); + // theFile->write may call writeCompleted immediatelly theFile->write(r); } +bool +Rock::IoState::expectedReply(const IoXactionId receivedId) +{ + Must(requestsSent); // paranoid: we sent some requests + Must(receivedId); // paranoid: the request was sent by some sio + Must(receivedId <= requestsSent); // paranoid: within our range + ++repliesReceived; + const auto expectedId = repliesReceived; + if (receivedId == expectedId) + return true; + + debugs(79, 3, "no; expected reply #" << expectedId << + ", but got #" << receivedId); + return false; +} + void Rock::IoState::finishedWriting(const int errFlag) { + if (sidCurrent >= 0) { + dir->noteFreeMapSlice(sidCurrent); + sidCurrent = -1; + } + if (sidNext >= 0) { + dir->noteFreeMapSlice(sidNext); + sidNext = -1; + } + // we incremented offset_ while accumulating data in write() // we do not reset writeableAnchor_ here because we still keep the lock if (touchingStoreEntry()) @@ -326,7 +360,9 @@ void Rock::IoState::close(int how) { debugs(79, 3, swap_filen << " offset: " << offset_ << " how: " << how << - " buf: " << theBuf.size << " callback: " << callback); + " leftovers: " << theBuf.size << + " after " << requestsSent << '/' << repliesReceived << + " callback: " << callback); if (!theFile) { debugs(79, 3, "I/O already canceled"); @@ -339,8 +375,17 @@ Rock::IoState::close(int how) switch (how) { case wroteAll: assert(theBuf.size > 0); // we never flush last bytes on our own - writeToDisk(-1); // flush last, yet unwritten slot to disk - return; // writeCompleted() will callBack() + try { + writeToDisk(); // flush last, yet unwritten slot to disk + return; // writeCompleted() will callBack() + } + catch (...) { + debugs(79, 2, "db flush error: " << CurrentException); + // TODO: Move finishedWriting() into SwapDir::writeError(). + dir->writeError(*this); + finishedWriting(DISK_ERROR); + } + return; case writerGone: dir->writeError(*this); // abort a partially stored entry diff --git a/src/fs/rock/RockIoState.h b/src/fs/rock/RockIoState.h index 5b6ec0e604..5c3557c174 100644 --- a/src/fs/rock/RockIoState.h +++ b/src/fs/rock/RockIoState.h @@ -9,6 +9,7 @@ #ifndef SQUID_FS_ROCK_IO_STATE_H #define SQUID_FS_ROCK_IO_STATE_H +#include "fs/rock/forward.h" #include "fs/rock/RockSwapDir.h" #include "sbuf/MemBlob.h" @@ -41,12 +42,16 @@ public: /// whether we are still waiting for the I/O results (i.e., not closed) bool stillWaiting() const { return theFile != NULL; } - /// forwards read data to the reader that initiated this I/O - void callReaderBack(const char *buf, int rlen); + /// forwards read data (or an error) to the reader that initiated this I/O + void handleReadCompletion(Rock::ReadRequest &request, const int rlen, const int errFlag); /// called by SwapDir::writeCompleted() after the last write and on error void finishedWriting(const int errFlag); + /// notes that the disker has satisfied the given I/O request + /// \returns whether all earlier I/O requests have been satisfied already + bool expectedReply(const IoXactionId receivedId); + /* one and only one of these will be set and locked; access via *Anchor() */ const Ipc::StoreMapAnchor *readableAnchor_; ///< starting point for reading Ipc::StoreMapAnchor *writeableAnchor_; ///< starting point for writing @@ -64,15 +69,36 @@ private: void tryWrite(char const *buf, size_t size, off_t offset); size_t writeToBuffer(char const *buf, size_t size); - void writeToDisk(const SlotId nextSlot); - void writeBufToDisk(const SlotId nextSlot, const bool eof, const bool lastWrite); + void writeToDisk(); + void callReaderBack(const char *buf, int rlen); void callBack(int errflag); Rock::SwapDir::Pointer dir; ///< swap dir that initiated I/O const size_t slotSize; ///< db cell size int64_t objOffset; ///< object offset for current db slot - SlotId sidCurrent; ///< ID of the db slot currently being read or written + + /// The very first entry slot. Usually the same as anchor.first, + /// but writers set anchor.first only after the first write is done. + SlotId sidFirst; + + /// Unused by readers. + /// For writers, the slot pointing (via .next) to sidCurrent. + SlotId sidPrevious; + + /// For readers, the db slot currently being read from disk. + /// For writers, the reserved db slot currently being filled (to be written). + SlotId sidCurrent; + + /// Unused by readers. + /// For writers, the reserved db slot that sidCurrent.next will point to. + SlotId sidNext; + + /// the number of read or write requests we sent to theFile + uint64_t requestsSent; + + /// the number of successful responses we received from theFile + uint64_t repliesReceived; RefCount theFile; // "file" responsible for this I/O MemBlob theBuf; // use for write content accumulation only diff --git a/src/fs/rock/RockSwapDir.cc b/src/fs/rock/RockSwapDir.cc index a5345fd83e..6e77b63f89 100644 --- a/src/fs/rock/RockSwapDir.cc +++ b/src/fs/rock/RockSwapDir.cc @@ -845,16 +845,15 @@ Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::Re ReadRequest *request = dynamic_cast(r.getRaw()); assert(request); IoState::Pointer sio = request->sio; - - if (errflag == DISK_OK && rlen > 0) - sio->offset_ += rlen; - - sio->callReaderBack(r->buf, rlen); + sio->handleReadCompletion(*request, rlen, errflag); } void Rock::SwapDir::writeCompleted(int errflag, size_t, RefCount< ::WriteRequest> r) { + // TODO: Move details into IoState::handleWriteCompletion() after figuring + // out how to deal with map access. See readCompleted(). + Rock::WriteRequest *request = dynamic_cast(r.getRaw()); assert(request); assert(request->sio != NULL); @@ -863,7 +862,7 @@ Rock::SwapDir::writeCompleted(int errflag, size_t, RefCount< ::WriteRequest> r) // quit if somebody called IoState::close() while we were waiting if (!sio.stillWaiting()) { debugs(79, 3, "ignoring closed entry " << sio.swap_filen); - noteFreeMapSlice(request->sidNext); + noteFreeMapSlice(request->sidCurrent); return; } @@ -871,7 +870,7 @@ Rock::SwapDir::writeCompleted(int errflag, size_t, RefCount< ::WriteRequest> r) if (errflag != DISK_OK) handleWriteCompletionProblem(errflag, *request); - else if (droppedEarlierRequest(*request)) + else if (!sio.expectedReply(request->id)) handleWriteCompletionProblem(DISK_ERROR, *request); else handleWriteCompletionSuccess(*request); @@ -888,15 +887,24 @@ Rock::SwapDir::handleWriteCompletionSuccess(const WriteRequest &request) sio.splicingPoint = request.sidCurrent; // do not increment sio.offset_ because we do it in sio->write() - // finalize the shared slice info after writing slice contents to disk + assert(sio.writeableAnchor_); + if (sio.writeableAnchor_->start < 0) { // wrote the first slot + Must(request.sidPrevious < 0); + sio.writeableAnchor_->start = request.sidCurrent; + } else { + Must(request.sidPrevious >= 0); + map->writeableSlice(sio.swap_filen, request.sidPrevious).next = request.sidCurrent; + } + + // finalize the shared slice info after writing slice contents to disk; + // the chain gets possession of the slice we were writing Ipc::StoreMap::Slice &slice = map->writeableSlice(sio.swap_filen, request.sidCurrent); slice.size = request.len - sizeof(DbCellHeader); - slice.next = request.sidNext; + Must(slice.next < 0); if (request.eof) { assert(sio.e); - assert(sio.writeableAnchor_); if (sio.touchingStoreEntry()) { sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz = sio.offset_; @@ -915,7 +923,7 @@ Rock::SwapDir::handleWriteCompletionProblem(const int errflag, const WriteReques { auto &sio = *request.sio; - noteFreeMapSlice(request.sidNext); + noteFreeMapSlice(request.sidCurrent); writeError(sio); sio.finishedWriting(errflag); @@ -936,23 +944,6 @@ Rock::SwapDir::writeError(StoreIOState &sio) // All callers must also call IoState callback, to propagate the error. } -/// whether the disk has dropped at least one of the previous write requests -bool -Rock::SwapDir::droppedEarlierRequest(const WriteRequest &request) const -{ - const auto &sio = *request.sio; - assert(sio.writeableAnchor_); - const Ipc::StoreMapSliceId expectedSliceId = sio.splicingPoint < 0 ? - sio.writeableAnchor_->start : - map->writeableSlice(sio.swap_filen, sio.splicingPoint).next; - if (expectedSliceId != request.sidCurrent) { - debugs(79, 3, "yes; expected " << expectedSliceId << ", but got " << request.sidCurrent); - return true; - } - - return false; -} - void Rock::SwapDir::updateHeaders(StoreEntry *updatedE) { diff --git a/src/fs/rock/RockSwapDir.h b/src/fs/rock/RockSwapDir.h index 3c0e4fe836..4520708816 100644 --- a/src/fs/rock/RockSwapDir.h +++ b/src/fs/rock/RockSwapDir.h @@ -140,7 +140,6 @@ private: void createError(const char *const msg); void handleWriteCompletionSuccess(const WriteRequest &request); void handleWriteCompletionProblem(const int errflag, const WriteRequest &request); - bool droppedEarlierRequest(const WriteRequest &request) const; DiskIOStrategy *io; RefCount theFile; ///< cache storage for this cache_dir diff --git a/src/fs/rock/forward.h b/src/fs/rock/forward.h index a5ca2234d9..e047f01ac1 100644 --- a/src/fs/rock/forward.h +++ b/src/fs/rock/forward.h @@ -32,6 +32,9 @@ class SwapDir; /// db cell number, starting with cell 0 (always occupied by the db header) typedef sfileno SlotId; +/// unique (within a given IoState object scope) I/O transaction identifier +typedef uint64_t IoXactionId; + class Rebuild; class IoState; @@ -40,6 +43,8 @@ class HeaderUpdater; class DbCellHeader; +class ReadRequest; + class WriteRequest; } diff --git a/src/store_client.cc b/src/store_client.cc index ae0e8a0509..8c9923b917 100644 --- a/src/store_client.cc +++ b/src/store_client.cc @@ -506,6 +506,9 @@ store_client::readBody(const char *, ssize_t len) assert(_callback.pending()); debugs(90, 3, "storeClientReadBody: len " << len << ""); + if (len < 0) + return fail(); + if (copyInto.offset == 0 && len > 0 && entry->getReply()->sline.status() == Http::scNone) { /* Our structure ! */ HttpReply *rep = (HttpReply *) entry->getReply(); // bypass const @@ -567,13 +570,8 @@ storeClientReadBody(void *data, const char *buf, ssize_t len, StoreIOState::Poin bool store_client::unpackHeader(char const *buf, ssize_t len) { - int xerrno = errno; // FIXME: where does errno come from? debugs(90, 3, "store_client::unpackHeader: len " << len << ""); - - if (len < 0) { - debugs(90, 3, "WARNING: unpack error: " << xstrerr(xerrno)); - return false; - } + assert(len >= 0); int swap_hdr_sz = 0; tlv *tlv_list = nullptr; @@ -623,6 +621,9 @@ store_client::readHeader(char const *buf, ssize_t len) if (!object_ok) return; + if (len < 0) + return fail(); + if (!unpackHeader(buf, len)) { fail(); return; -- 2.39.5