int sender; ///< kid ID of sending process
/// transients index, so that workers can find [private] entries to sync
- sfileno xitIndex;
+ sfileno xitIndex;
};
// CollapsedForwarding
return;
if (!e.mem_obj || e.mem_obj->xitTable.index < 0 ||
- !Store::Root().transientReaders(e)) {
+ !Store::Root().transientReaders(e)) {
debugs(17, 7, "nobody reads " << e);
return;
}
#ifndef SQUID_COLLAPSED_FORWARDING_H
#define SQUID_COLLAPSED_FORWARDING_H
-#include "ipc/Queue.h"
#include "ipc/forward.h"
+#include "ipc/Queue.h"
#include "typedefs.h"
#include <memory>
}
}
-/// Tries to write buffer to disk (a few times if needed);
+/// Tries to write buffer to disk (a few times if needed);
/// sets ipcIo results, but does no cleanup. The caller must cleanup.
static void
diskerWriteAttempts(IpcIoMsg &ipcIo)
}
const char *
-MemObject::storeId() const {
+MemObject::storeId() const
+{
if (!storeId_.size()) {
debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
dump();
}
const char *
-MemObject::logUri() const {
+MemObject::logUri() const
+{
return logUri_.size() ? logUri_.termedBuf() : storeId();
}
bool
-MemObject::hasUris() const {
+MemObject::hasUris() const
+{
return storeId_.size();
}
storeId_ = aStoreId;
// fast pointer comparison for a common storeCreateEntry(url,url,...) case
- if (!aLogUri || aLogUri == aStoreId)
+ if (!aLogUri || aLogUri == aStoreId)
logUri_.clean(); // use storeId_ by default to minimize copying
else
logUri_ = aLogUri;
MemObject::readAheadPolicyCanRead() const
{
const bool canRead = endOffset() - getReply()->hdr_sz <
- lowestMemReaderOffset() + Config.readAheadGap;
+ lowestMemReaderOffset() + Config.readAheadGap;
if (!canRead) {
debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
typedef enum { ioUndecided, ioWriting, ioReading, ioDone } Io;
/// State of an entry with regards to the [shared] in-transit table.
- class XitTable {
+ class XitTable
+ {
public:
XitTable(): index(-1), io(ioUndecided) {}
XitTable xitTable; ///< current [shared] memory caching state for the entry
/// State of an entry with regards to the [shared] memory caching.
- class MemCache {
+ class MemCache
+ {
public:
MemCache(): index(-1), offset(0), io(ioUndecided) {}
int32_t index; ///< entry position inside the memory cache
int64_t offset; ///< bytes written/read to/from the memory cache so far
-
+
Io io; ///< current I/O state
};
MemCache memCache; ///< current [shared] memory caching state for the entry
// used except for a positivity test. A unique value is handy for debugging.
static const uint32_t SpacePoolId = 510716;
-
MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
{
}
// check compatibility with the disk cache, if any
if (Config.cacheSwap.n_configured > 0) {
- const int64_t diskMaxSize = Store::Root().maxObjectSize();
- const int64_t memMaxSize = maxObjectSize();
- if (diskMaxSize == -1) {
- debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
- "is unlimited but mem-cache maximum object size is " <<
- memMaxSize / 1024.0 << " KB");
- } else if (diskMaxSize > memMaxSize) {
- debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
- "is too large for mem-cache: " <<
- diskMaxSize / 1024.0 << " KB > " <<
- memMaxSize / 1024.0 << " KB");
- }
+ const int64_t diskMaxSize = Store::Root().maxObjectSize();
+ const int64_t memMaxSize = maxObjectSize();
+ if (diskMaxSize == -1) {
+ debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
+ "is unlimited but mem-cache maximum object size is " <<
+ memMaxSize / 1024.0 << " KB");
+ } else if (diskMaxSize > memMaxSize) {
+ debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
+ "is too large for mem-cache: " <<
+ diskMaxSize / 1024.0 << " KB > " <<
+ memMaxSize / 1024.0 << " KB");
+ }
}
freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
MemStore::currentSize() const
{
return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
- Ipc::Mem::PageSize();
+ Ipc::Mem::PageSize();
}
uint64_t
sfileno index;
const Ipc::StoreMapAnchor *const slot = map->openForReading(
- reinterpret_cast<cache_key*>(collapsed.key), index);
+ reinterpret_cast<cache_key*>(collapsed.key), index);
if (!slot)
return false;
{
assert(collapsed.mem_obj);
- const sfileno index = collapsed.mem_obj->memCache.index;
+ const sfileno index = collapsed.mem_obj->memCache.index;
// already disconnected from the cache, no need to update
- if (index < 0)
+ if (index < 0)
return true;
if (!map)
wasEof << " wasSize " << wasSize << " <= " <<
anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
" mem.endOffset " << e.mem_obj->endOffset());
-
- if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
+
+ if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
// size of the slice data that we already copied
const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
assert(prefixSize <= wasSize);
if (wasSize >= slice.size) { // did not grow since we started copying
sliceOffset += wasSize;
sid = slice.next;
- }
+ }
} else if (wasSize >= slice.size) { // did not grow
break;
}
// XXX: have to copy because httpMsgParseStep() requires 0-termination
MemBuf mb;
mb.init(buf.length+1, buf.length+1);
- mb.append(buf.data, buf.length);
+ mb.append(buf.data, buf.length);
mb.terminate();
const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
if (result > 0) {
// not knowing when the wait is over
if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
- return;
+ return;
}
assert(map);
slice.next = lastWritingSlice = reserveSapForWriting(page);
map->extras(lastWritingSlice).page = page;
debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
- }
+ }
- copyToShmSlice(e, anchor);
+ copyToShmSlice(e, anchor);
}
debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
freeSlots->push(slot);
}
}
-
+
// catch free slots delivered to noteFreeMapSlice()
assert(!waitingFor);
waitingFor.slot = &slot;
return;
}
break;
-
+
case MemObject::ioDone:
case MemObject::ioReading:
return; // we should not write in all of the above cases
else
CollapsedForwarding::Broadcast(e);
return;
- }
- catch (const std::exception &x) { // TODO: should we catch ... as well?
+ } catch (const std::exception &x) { // TODO: should we catch ... as well?
debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
// fall through to the error handling code
}
// the entry may have been loaded and then disconnected from the cache
map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
}
-
+
e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
}
Must(!spaceOwner);
spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
- entryLimit,
- sizeof(Ipc::Mem::PageId));
+ entryLimit,
+ sizeof(Ipc::Mem::PageId));
Must(!mapOwner);
mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
}
sfileno lastWritingSlice;
/// temporary storage for slot and page ID pointers; for the waiting cache
- class SlotAndPage {
+ class SlotAndPage
+ {
public:
SlotAndPage(): slot(NULL), page(NULL) {}
bool operator !() const { return !slot && !page; }
// create a brand new store entry and initialize it with stored info
StoreEntry *e = storeCreatePureEntry(extras.url, extras.url,
- extras.reqFlags, extras.reqMethod);
+ extras.reqFlags, extras.reqMethod);
assert(e->mem_obj);
e->mem_obj->method = extras.reqMethod;
void
Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
- const HttpRequestMethod &reqMethod)
+ const HttpRequestMethod &reqMethod)
{
assert(e);
assert(e->mem_obj);
if (!map) {
debugs(20, 5, "No map to add " << *e);
return;
- }
+ }
sfileno index = 0;
Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
if (!slot) {
debugs(20, 5, "collision registering " << *e);
return;
- }
+ }
try {
if (copyToShm(*e, index, reqFlags, reqMethod)) {
map->startAppending(index);
// keep write lock -- we will be supplying others with updates
return;
- }
+ }
// fall through to the error handling code
- }
- catch (const std::exception &x) { // TODO: should we catch ... as well?
+ } catch (const std::exception &x) { // TODO: should we catch ... as well?
debugs(20, 2, "error keeping entry " << index <<
' ' << *e << ": " << x.what());
// fall through to the error handling code
- }
+ }
map->abortWriting(index);
}
const size_t urlLen = strlen(url);
Must(urlLen < sizeof(extras.url)); // we have space to store it all, plus 0
strncpy(extras.url, url, sizeof(extras.url));
- extras.url[urlLen] = '\0';
+ extras.url[urlLen] = '\0';
extras.reqFlags = reqFlags;
// TODO: every must-revalidate and similar request MUST reach the origin,
// but do we have to prohibit others from collapsing on that request?
if (Config.onoff.collapsed_forwarding && reqFlags.cachable &&
- !reqFlags.needValidation &&
- (m == Http::METHOD_GET || m == Http::METHOD_HEAD)) {
+ !reqFlags.needValidation &&
+ (m == Http::METHOD_GET || m == Http::METHOD_HEAD)) {
// make the entry available for future requests now
Store::Root().allowCollapsing(e, reqFlags, m);
}
break;
case LFT_TIME_START: {
- int precision = fmt->widthMax >=0 ? fmt->widthMax : 3;
+ int precision = fmt->widthMax >=0 ? fmt->widthMax :3;
snprintf(tmp, sizeof(tmp), "%0*" PRId64 ".%0*d", fmt->zero && (fmt->widthMin - precision - 1 >= 0) ? fmt->widthMin - precision - 1 : 0, (int64_t)al->cache.start_time.tv_sec, precision, (int)(al->cache.start_time.tv_usec / fmt->divisor));
out = tmp;
}
- break;
+ break;
case LFT_TIME_TO_HANDLE_REQUEST:
outint = al->cache.msec;
/* members below are not meaningful if empty() */
/// whether this slot is not corrupted
- bool sane(const size_t slotSize, int slotLimit) const { return
- 0 <= firstSlot && firstSlot < slotLimit &&
- -1 <= nextSlot && nextSlot < slotLimit &&
- version > 0 &&
- 0 < payloadSize && payloadSize <= slotSize - sizeof(DbCellHeader); }
+ bool sane(const size_t slotSize, int slotLimit) const {
+ return
+ 0 <= firstSlot && firstSlot < slotLimit &&
+ -1 <= nextSlot && nextSlot < slotLimit &&
+ version > 0 &&
+ 0 < payloadSize && payloadSize <= slotSize - sizeof(DbCellHeader);
+ }
uint64_t key[2]; ///< StoreEntry key
uint64_t entrySize; ///< total entry content size or zero if still unknown
offset_ = coreOff;
len = min(len,
- static_cast<size_t>(objOffset + currentReadableSlice().size - coreOff));
+ static_cast<size_t>(objOffset + currentReadableSlice().size - coreOff));
const uint64_t diskOffset = dir->diskOffset(sidCurrent);
theFile->read(new ReadRequest(::ReadRequest(buf,
- diskOffset + sizeof(DbCellHeader) + coreOff - objOffset, len), this));
+ diskOffset + sizeof(DbCellHeader) + coreOff - objOffset, len), this));
}
void
callb(cbdata, buf, rlen, this);
}
-
/// wraps tryWrite() to handle deep write failures centrally and safely
bool
Rock::IoState::write(char const *buf, size_t size, off_t coreOff, FREE *dtor)
}
// careful: 'this' might be gone here
-
+
if (dtor)
(dtor)(const_cast<char*>(buf)); // cast due to a broken API?
* Possibly send data to be written to disk:
* We only write data when full slot is accumulated or when close() is called.
* We buffer, in part, to avoid forcing OS to _read_ old unwritten portions of
- * the slot when the write does not end at the page or sector boundary.
+ * the slot when the write does not end at the page or sector boundary.
*/
void
Rock::IoState::tryWrite(char const *buf, size_t size, off_t coreOff)
WriteRequest *const r = new WriteRequest(
::WriteRequest(static_cast<char*>(wBuf), diskOffset, theBuf.size,
- memFreeBufFunc(wBufCap)), this);
+ memFreeBufFunc(wBufCap)), this);
r->sidCurrent = sidCurrent;
r->sidNext = sidNext;
r->eof = eof;
void writeToDisk(const SlotId nextSlot);
void writeBufToDisk(const SlotId nextSlot, const bool eof);
SlotId reserveSlotForWriting();
-
+
void callBack(int errflag);
Rock::SwapDir::Pointer dir; ///< swap dir that initiated I/O
* negligible performance impact but saves us from high-damage bugs.
*/
-
-namespace Rock {
+namespace Rock
+{
/// maintains information about the store entry being loaded from disk
/// used for identifying partially stored/loaded entries
-class LoadingEntry {
+class LoadingEntry
+{
public:
LoadingEntry(): size(0), version(0), state(leEmpty), anchored(0),
- mapped(0), freed(0), more(-1) {}
+ mapped(0), freed(0), more(-1) {}
/* store entry-level information indexed by sfileno */
uint64_t size; ///< payload seen so far
} /* namespace Rock */
-
Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
sd(dir),
entries(NULL),
Rock::Rebuild::doneAll() const
{
return dbOffset >= dbSize && validationPos >= dbEntryLimit &&
- AsyncJob::doneAll();
+ AsyncJob::doneAll();
}
void
cache_key key[SQUID_MD5_DIGEST_LENGTH];
StoreEntry loadedE;
const uint64_t knownSize = header.entrySize > 0 ?
- header.entrySize : anchor.basics.swap_file_sz.get();
+ header.entrySize : anchor.basics.swap_file_sz.get();
if (!storeRebuildParseEntry(buf, loadedE, key, counts, knownSize))
return false;
assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
// perhaps we loaded a later slot (with entrySize) earlier
totalSize = anchor.basics.swap_file_sz;
- } else
- if (totalSize && !anchor.basics.swap_file_sz) {
+ } else if (totalSize && !anchor.basics.swap_file_sz) {
anchor.basics.swap_file_sz = totalSize;
assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
- } else
- if (totalSize != anchor.basics.swap_file_sz) {
+ } else if (totalSize != anchor.basics.swap_file_sz) {
le.state = LoadingEntry::leCorrupted;
freeBadEntry(fileno, "size mismatch");
return;
Rock::Rebuild::startNewEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
{
// If some other from-disk entry is/was using this slot as its inode OR
- // if some other from-disk entry is/was using our inode slot, then the
+ // if some other from-disk entry is/was using our inode slot, then the
// entries are conflicting. We cannot identify other entries, so we just
// remove ours and hope that the others were/will be handled correctly.
const LoadingEntry &slice = entries[slotId];
const LoadingEntry &le = entries[fileno];
// any order will work, but do fast comparisons first:
return le.version == header.version &&
- anchor.start == static_cast<Ipc::StoreMapSliceId>(header.firstSlot) &&
- anchor.sameKey(reinterpret_cast<const cache_key*>(header.key));
+ anchor.start == static_cast<Ipc::StoreMapSliceId>(header.firstSlot) &&
+ anchor.sameKey(reinterpret_cast<const cache_key*>(header.key));
}
/// is the new header consistent with information already loaded?
LoadingEntry &le = entries[fileno];
debugs(47,9, "entry " << fileno << " state: " << le.state << ", inode: " <<
- header.firstSlot << ", size: " << header.payloadSize);
+ header.firstSlot << ", size: " << header.payloadSize);
switch (le.state) {
const int64_t Rock::SwapDir::HeaderSize = 16*1024;
-Rock::SwapDir::SwapDir(): ::SwapDir("rock"),
- slotSize(HeaderSize), filePath(NULL), map(NULL), io(NULL),
- waitingForPage(NULL)
+Rock::SwapDir::SwapDir(): ::SwapDir("rock"),
+ slotSize(HeaderSize), filePath(NULL), map(NULL), io(NULL),
+ waitingForPage(NULL)
{
}
sfileno filen;
const Ipc::StoreMapAnchor *const slot = map->openForReading(
- reinterpret_cast<cache_key*>(collapsed.key), filen);
+ reinterpret_cast<cache_key*>(collapsed.key), filen);
if (!slot)
return false;
// especially since we may switch from writing to reading. This code relies
// on Rock::IoState::writeableAnchor_ being set when we locked for writing.
if (e.mem_obj && e.mem_obj->swapout.sio != NULL &&
- dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
+ dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
map->abortWriting(e.swap_filen);
e.swap_dirn = -1;
e.swap_filen = -1;
Rock::SwapDir::currentSize() const
{
const uint64_t spaceSize = !freeSlots ?
- maxSize() : (slotSize * freeSlots->size());
+ maxSize() : (slotSize * freeSlots->size());
// everything that is not free is in use
return maxSize() - spaceSize;
}
// report Rock DB creation error and exit
void
-Rock::SwapDir::createError(const char *const msg) {
+Rock::SwapDir::createError(const char *const msg)
+{
debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
filePath << "; " << msg << " error: " << xstrerror());
fatal("Rock Store db creation error");
map->writeableSlice(sio.swap_filen, request->sidCurrent);
slice.size = request->len - sizeof(DbCellHeader);
slice.next = request->sidNext;
-
+
if (request->eof) {
assert(sio.e);
assert(sio.writeableAnchor_);
sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz =
- sio.offset_;
+ sio.offset_;
// close, the entry gets the read lock
map->closeForWriting(sio.swap_filen, true);
}
const char *
-Rock::SwapDir::inodeMapPath() const {
+Rock::SwapDir::inodeMapPath() const
+{
static String inodesPath;
inodesPath = path;
inodesPath.append("_inodes");
}
const char *
-Rock::SwapDir::freeSlotsPath() const {
+Rock::SwapDir::freeSlotsPath() const
+{
static String spacesPath;
spacesPath = path;
spacesPath.append("_spaces");
#include "DiskIO/IORequestor.h"
#include "fs/rock/RockDbCell.h"
#include "fs/rock/RockForward.h"
-#include "ipc/StoreMap.h"
#include "ipc/mem/Page.h"
#include "ipc/mem/PageStack.h"
+#include "ipc/StoreMap.h"
#include "SwapDir.h"
class DiskIOStrategy;
return;
const uint64_t expectedSize = sb.st_size > 0 ?
- static_cast<uint64_t>(sb.st_size) : 0;
+ static_cast<uint64_t>(sb.st_size) : 0;
StoreEntry tmpe;
const bool parsed = storeRebuildParseEntry(buf, tmpe, key, counts,
}
Ipc::OneToOneUniQueue &
-Ipc::BaseMultiQueue::inQueue(const int remoteProcessId) {
+Ipc::BaseMultiQueue::inQueue(const int remoteProcessId)
+{
const OneToOneUniQueue &queue =
const_cast<const BaseMultiQueue *>(this)->inQueue(remoteProcessId);
return const_cast<OneToOneUniQueue &>(queue);
}
Ipc::OneToOneUniQueue &
-Ipc::BaseMultiQueue::outQueue(const int remoteProcessId) {
+Ipc::BaseMultiQueue::outQueue(const int remoteProcessId)
+{
const OneToOneUniQueue &queue =
const_cast<const BaseMultiQueue *>(this)->outQueue(remoteProcessId);
return const_cast<OneToOneUniQueue &>(queue);
}
Ipc::QueueReader &
-Ipc::BaseMultiQueue::localReader() {
+Ipc::BaseMultiQueue::localReader()
+{
const QueueReader &reader =
const_cast<const BaseMultiQueue *>(this)->localReader();
return const_cast<QueueReader &>(reader);
}
Ipc::QueueReader &
-Ipc::BaseMultiQueue::remoteReader(const int remoteProcessId) {
+Ipc::BaseMultiQueue::remoteReader(const int remoteProcessId)
+{
const QueueReader &reader =
const_cast<const BaseMultiQueue *>(this)->remoteReader(remoteProcessId);
return const_cast<QueueReader &>(reader);
Ipc::FewToFewBiQueue::remotesCount() const
{
return theLocalGroup == groupA ? metadata->theGroupBSize :
- metadata->theGroupASize;
+ metadata->theGroupASize;
}
int
Ipc::FewToFewBiQueue::remotesIdOffset() const
{
return theLocalGroup == groupA ? metadata->theGroupBIdOffset :
- metadata->theGroupAIdOffset;
+ metadata->theGroupAIdOffset;
}
Ipc::FewToFewBiQueue::Metadata::Metadata(const int aGroupASize, const int aGroupAIdOffset, const int aGroupBSize, const int aGroupBIdOffset):
Ipc::MultiQueue::validProcessId(const int processId) const
{
return metadata->theProcessIdOffset <= processId &&
- processId < metadata->theProcessIdOffset + metadata->theProcessCount;
+ processId < metadata->theProcessIdOffset + metadata->theProcessCount;
}
const Ipc::OneToOneUniQueue &
if (const time_t diff = newVersion - inode.basics.timestamp)
return diff < 0 ? -1 : +1;
-
+
return 0;
}
s.waitingToBeFreed = true;
s.lock.unlockExclusive();
debugs(54, 5, "closed dirty entry " << fileno << " for writing " << path);
- }
+ }
}
const Ipc::StoreMap::Anchor *
if (s.sameKey(key))
freeChain(idx, s, true);
s.lock.unlockExclusive();
- } else if (s.lock.lockShared()) {
+ } else if (s.lock.lockShared()) {
if (s.sameKey(key))
s.waitingToBeFreed = true; // mark to free it later
s.lock.unlockShared();
} else {
- // we cannot be sure that the entry we found is ours because we do not
- // have a lock on it, but we still check to minimize false deletions
- if (s.sameKey(key))
- s.waitingToBeFreed = true; // mark to free it later
+ // we cannot be sure that the entry we found is ours because we do not
+ // have a lock on it, but we still check to minimize false deletions
+ if (s.sameKey(key))
+ s.waitingToBeFreed = true; // mark to free it later
}
}
return shared->slots[anchorIndexByKey(key)].anchor;
}
-
/* Ipc::StoreMapAnchor */
Ipc::StoreMapAnchor::StoreMapAnchor(): start(0)
Atomic::WordT<StoreMapSliceId> next; ///< ID of the next entry slice
};
-
/// Maintains shareable information about a StoreEntry as a whole.
/// An anchor points to one or more StoreEntry slices. This is the
/// only lockable part of shared StoreEntry information, providing
} basics;
/// where the chain of StoreEntry slices begins [app]
- Atomic::WordT<StoreMapSliceId> start;
+ Atomic::WordT<StoreMapSliceId> start;
#if 0
/// possible persistent states
/// A hack to allocate one shared array for both anchors and slices.
/// Anchors are indexed by store entry ID and are independent from each other.
/// Slices are indexed by slice IDs and form entry chains using slice.next.
-class StoreMapSlot {
+class StoreMapSlot
+{
public:
StoreMapAnchor anchor; ///< information about store entry as a whole
StoreMapSlice slice; ///< information about one stored entry piece
}
void
-StoreEntry::touch() {
+StoreEntry::touch()
+{
lastref = squid_curtime;
Store::Root().reference(*this);
}
// print only non-default status values, using unique letters
if (e.mem_status != NOT_IN_MEMORY ||
- e.store_status != STORE_PENDING ||
- e.swap_status != SWAPOUT_NONE ||
- e.ping_status != PING_NONE) {
+ e.store_status != STORE_PENDING ||
+ e.swap_status != SWAPOUT_NONE ||
+ e.ping_status != PING_NONE) {
if (e.mem_status != NOT_IN_MEMORY) os << 'm';
if (e.store_status != STORE_PENDING) os << 's';
if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
// scheduleRead calls scheduleDiskRead which asserts on STORE_MEM_CLIENTs.
const MemObject *mem = entry->mem_obj;
return mem &&
- mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
+ mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
}
static void
{
if (StoreEntry *e = find(key)) {
// this is not very precise: some get()s are not initiated by clients
- e->touch();
+ e->touch();
return e;
}
return NULL;
StoreController::transientReaders(const StoreEntry &e) const
{
return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
- transients->readers(e) : 0;
+ transients->readers(e) : 0;
}
void
if (transients)
transients->startWriting(e, reqFlags, reqMethod);
debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
- "SMP-" : "locally-") << "collapse " << *e);
+ "SMP-" : "locally-") << "collapse " << *e);
}
void
// Quit if write() fails. Sio is going to call our callback, and that
// will cleanup, but, depending on the fs, that call may be async.
const bool ok = mem->swapout.sio->write(
- mem->data_hdr.NodeGet(page),
- swap_buf_len,
- -1,
- memNodeWriteComplete);
+ mem->data_hdr.NodeGet(page),
+ swap_buf_len,
+ -1,
+ memNodeWriteComplete);
if (!ok || anEntry->swap_status != SWAPOUT_WRITING)
return false;
#include "squid.h"
#include "MemBuf.h"
-#include "SwapDir.h"
#include "store_rebuild.h"
+#include "SwapDir.h"
#if HAVE_STRING_H
#include <string.h>
#endif