Must(readBuf.length() < SQUID_TCP_SO_RCVBUF);
// now we can ensure that there is space to read new data,
// even if readBuf.spaceSize() currently returns zero.
- readBuf.rawSpace(1);
+ readBuf.rawAppendStart(1);
CommIoCbParams rd(this); // will be expanded with ReadNow results
rd.conn = io.conn;
{
SBuf buf;
const auto readLimit = maxBytes + 1; // to detect excessively large files that we do not handle
+ char *rawBuf = buf.rawAppendStart(readLimit);
#if _SQUID_WINDOWS_
DWORD result = 0;
- if (!ReadFile(fd_, buf.rawSpace(readLimit), readLimit, &result, nullptr)) {
+ if (!ReadFile(fd_, rawBuf, readLimit, &result, nullptr)) {
const auto savedError = GetLastError();
throw TexcHere(sysCallFailure("ReadFile", WindowsErrorMessage(savedError).c_str()));
}
#else
- const auto result = ::read(fd_, buf.rawSpace(readLimit), readLimit);
+ const auto result = ::read(fd_, rawBuf, readLimit);
if (result < 0) {
const auto savedErrno = errno;
throw TexcHere(sysCallError("read", savedErrno));
const auto bytesRead = static_cast<size_t>(result);
assert(bytesRead <= readLimit);
Must(!buf.length());
- buf.forceSize(bytesRead);
+ buf.rawAppendFinish(rawBuf, bytesRead);
if (buf.length() < minBytes) {
const auto failure = buf.length() ? "premature eof" : "empty file";
SBuf::size_type sz = buf.spaceSize();
if (params.size > 0 && params.size < sz)
sz = params.size;
- char *inbuf = buf.rawSpace(sz);
+ char *inbuf = buf.rawAppendStart(sz);
errno = 0;
const int retval = FD_READ_METHOD(params.conn->fd, inbuf, sz);
params.xerrno = errno;
debugs(5, 3, params.conn << ", size " << sz << ", retval " << retval << ", errno " << params.xerrno);
if (retval > 0) { // data read most common case
- buf.append(inbuf, retval);
+ buf.rawAppendFinish(inbuf, retval);
fd_bytes(params.conn->fd, retval, FD_READ);
params.flag = Comm::OK;
params.size = retval;
void
Rock::HeaderUpdater::NoteRead(void *data, const char *buf, ssize_t result, StoreIOState::Pointer)
{
+ IoCbParams io(buf, result);
// TODO: Avoid Rock::StoreIOStateCb for jobs to protect jobs for "free".
CallJobHere1(47, 7,
CbcPointer<HeaderUpdater>(static_cast<HeaderUpdater*>(data)),
Rock::HeaderUpdater,
noteRead,
- result);
+ io);
}
void
-Rock::HeaderUpdater::noteRead(ssize_t result)
+Rock::HeaderUpdater::noteRead(const Rock::HeaderUpdater::IoCbParams result)
{
- debugs(47, 7, result);
- if (!result) { // EOF
+ debugs(47, 7, result.size);
+ if (!result.size) { // EOF
stopReading("eof");
} else {
- Must(result > 0);
- bytesRead += result;
- readerBuffer.forceSize(readerBuffer.length() + result);
+ Must(result.size > 0);
+ bytesRead += result.size;
+ readerBuffer.rawAppendFinish(result.buf, result.size);
exchangeBuffer.append(readerBuffer);
debugs(47, 7, "accumulated " << exchangeBuffer.length());
}
Must(reader);
readerBuffer.clear();
storeRead(reader,
- readerBuffer.rawSpace(store->slotSize),
+ readerBuffer.rawAppendStart(store->slotSize),
store->slotSize,
bytesRead,
&NoteRead,
CBDATA_CHILD(HeaderUpdater);
public:
+ class IoCbParams {
+ public:
+ IoCbParams(const char *aBuf, ssize_t aSize) : buf(aBuf), size(aSize) {}
+ const char *buf;
+ ssize_t size;
+ };
HeaderUpdater(const Rock::SwapDir::Pointer &aStore, const Ipc::StoreMapUpdate &update);
virtual ~HeaderUpdater() override = default;
void startReading();
void stopReading(const char *why);
void readMore(const char *why);
- void noteRead(ssize_t result);
+ void noteRead(const IoCbParams result);
void noteDoneReading(int errflag);
void parseReadBytes();
SlotId staleSplicingPointNext; ///< non-updatable old HTTP body suffix start
};
+inline
+std::ostream &operator <<(std::ostream &os, const HeaderUpdater::IoCbParams ¶ms)
+{
+ os << static_cast<const void *>(params.buf) << "," << params.size;
+ return os;
+}
+
} // namespace Rock
#endif /* SQUID_FS_ROCK_HEADER_UPDATER_H */
return spaceSize(); // reallocated and probably reserved enough space
}
+char *
+SBuf::rawAppendStart(size_type anticipatedSize)
+{
+ char *space = rawSpace(anticipatedSize);
+ debugs(24, 8, id << " start appending up to " << anticipatedSize << " bytes");
+ return space;
+}
+
+void
+SBuf::rawAppendFinish(const char *start, size_type actualSize)
+{
+ Must(bufEnd() == start);
+ Must(store_->canAppend(off_ + len_, actualSize));
+ debugs(24, 8, id << " finish appending " << actualSize << " bytes");
+
+ size_type newSize = length() + actualSize;
+ if (newSize > min(maxSize,store_->capacity-off_))
+ throw SBufTooBigException(__FILE__,__LINE__);
+ len_ = newSize;
+ store_->size = off_ + newSize;
+}
+
char *
SBuf::rawSpace(size_type minSpace)
{
return buf();
}
-void
-SBuf::forceSize(size_type newSize)
-{
- debugs(24, 8, id << " force " << (newSize > length() ? "grow" : "shrink") << " to length=" << newSize);
-
- Must(store_->LockCount() == 1);
- if (newSize > min(maxSize,store_->capacity-off_))
- throw SBufTooBigException(__FILE__,__LINE__);
- len_ = newSize;
- store_->size = newSize;
-}
-
const char*
SBuf::c_str()
{
*/
const char* rawContent() const;
- /** Exports a writable pointer to the SBuf internal storage.
- * \warning Use with EXTREME caution, this is a dangerous operation.
- *
- * Returns a pointer to the first unused byte in the SBuf's storage,
- * which can be be used for appending. At least minSize bytes will
- * be available for writing.
- * The returned pointer must not be stored by the caller, as it will
- * be invalidated by the first call to a non-const method call
- * on the SBuf.
- * This call guarantees to never return NULL.
- * \see reserveSpace
- * \note Unlike reserveSpace(), this method does not guarantee exclusive
- * buffer ownership. It is instead optimized for a one writer
- * (appender), many readers scenario by avoiding unnecessary
- * copying and allocations.
- * \throw SBufTooBigException if the user tries to allocate too big a SBuf
- */
- char *rawSpace(size_type minSize);
+ /// \returns a buffer suitable for appending at most `anticipatedSize` bytes
+ /// The buffer must be used "immediately" because it is invalidated by most
+ /// non-constant SBuf method calls, including such calls against other SBuf
+ /// objects that just happen to share the same underlying MemBlob storage!
+ char *rawAppendStart(size_type anticipatedSize);
+
+ /// Updates SBuf metadata to reflect appending `actualSize` bytes to the
+ /// buffer returned by the corresponding rawAppendStart() call. Throws if
+ /// rawAppendStart(actualSize) would have returned a different value now.
+ /// \param start raw buffer previously returned by rawAppendStart()
+ /// \param actualSize the number of appended bytes
+ void rawAppendFinish(const char *start, size_type actualSize);
/** Obtain how much free space is available in the backing store.
*
*/
size_type spaceSize() const { return store_->spaceSize(); }
- /** Force a SBuf's size
- * \warning use with EXTREME caution, this is a dangerous operation
- *
- * Adapt the SBuf internal state after external interference
- * such as writing into it via rawSpace.
- * \throw TextException if SBuf doesn't have exclusive ownership of store
- * \throw SBufTooBigException if new size is bigger than available store space
- */
- void forceSize(size_type newSize);
-
/** exports a null-terminated reference to the SBuf internal storage.
* \warning ACCESSING RAW STORAGE IS DANGEROUS! DO NOT EVER USE
* THE RETURNED POINTER FOR WRITING
void checkAccessBounds(size_type pos) const;
+ /** Exports a writable pointer to the SBuf internal storage.
+ * \warning Use with EXTREME caution, this is a dangerous operation.
+ *
+ * Returns a pointer to the first unused byte in the SBuf's storage,
+ * which can be be used for appending. At least minSize bytes will
+ * be available for writing.
+ * The returned pointer must not be stored by the caller, as it will
+ * be invalidated by the first call to a non-const method call
+ * on the SBuf.
+ * This call guarantees to never return nullptr.
+ * \see reserveSpace
+ * \note Unlike reserveSpace(), this method does not guarantee exclusive
+ * buffer ownership. It is instead optimized for a one writer
+ * (appender), many readers scenario by avoiding unnecessary
+ * copying and allocations.
+ * \throw SBufTooBigException if the user tries to allocate too big a SBuf
+ */
+ char *rawSpace(size_type minSize);
+
/** Low-level append operation
*
* Takes as input a contiguous area of memory and appends its contents
int
Ssl::ServerBio::readAndBuffer(BIO *table)
{
- char *space = rbuf.rawSpace(SQUID_TCP_SO_RCVBUF);
- const int result = Ssl::Bio::read(space, rbuf.spaceSize(), table);
+ char *space = rbuf.rawAppendStart(SQUID_TCP_SO_RCVBUF);
+ const int result = Ssl::Bio::read(space, SQUID_TCP_SO_RCVBUF, table);
if (result <= 0)
return result;
- rbuf.forceSize(rbuf.length() + result);
+ rbuf.rawAppendFinish(space, result);
return result;
}
const SBufStats& SBuf::GetStats() STUB_RETVAL(SBuf::stats)
SBuf::size_type SBuf::copy(char *, size_type) const STUB_RETVAL(0)
const char* SBuf::rawContent() const STUB_RETVAL(NULL)
-char *SBuf::rawSpace(size_type) STUB_RETVAL(NULL)
-void SBuf::forceSize(size_type) STUB
+char *SBuf::rawAppendStart(size_type) STUB_RETVAL(NULL)
+void SBuf::rawAppendFinish(const char *, size_type) STUB
const char* SBuf::c_str() STUB_RETVAL("")
void SBuf::reserveCapacity(size_type) STUB
SBuf::size_type SBuf::reserve(const SBufReservationRequirements &) STUB_RETVAL(0)
{
SBuf s1(literal);
SBuf s2(fox1);
- SBuf::size_type sz=s2.length();
- char *rb=s2.rawSpace(strlen(fox2)+1);
+ char *rb=s2.rawAppendStart(strlen(fox2)+1);
strcpy(rb,fox2);
- s2.forceSize(sz+strlen(fox2));
+ s2.rawAppendFinish(rb, strlen(fox2));
CPPUNIT_ASSERT_EQUAL(s1,s2);
}