extern "C" {
-inline int
-shm_open(const char *, int, mode_t) {
- errno = ENOTSUP;
- return -1;
-}
-
-inline int
-shm_unlink(const char *) {
- errno = ENOTSUP;
- return -1;
-}
+ inline int
+ shm_open(const char *, int, mode_t) {
+ errno = ENOTSUP;
+ return -1;
+ }
+
+ inline int
+ shm_unlink(const char *) {
+ errno = ENOTSUP;
+ return -1;
+ }
} /* extern "C" */
/// IpcIo wrapper for debugs() streams; XXX: find a better class name
struct SipcIo {
SipcIo(int aWorker, const IpcIoMsg &aMsg, int aDisker):
- worker(aWorker), msg(aMsg), disker(aDisker) {}
+ worker(aWorker), msg(aMsg), disker(aDisker) {}
int worker;
const IpcIoMsg &msg;
operator <<(std::ostream &os, const SipcIo &sio)
{
return os << "ipcIo" << sio.worker << '.' << sio.msg.requestId <<
- (sio.msg.command == IpcIo::cmdRead ? 'r' : 'w') << sio.disker;
+ (sio.msg.command == IpcIo::cmdRead ? 'r' : 'w') << sio.disker;
}
IpcIoFile::IpcIoFile(char const *aDb):
- dbName(aDb), diskId(-1), error_(false), lastRequestId(0),
- olderRequests(&requestMap1), newerRequests(&requestMap2),
- timeoutCheckScheduled(false)
+ dbName(aDb), diskId(-1), error_(false), lastRequestId(0),
+ olderRequests(&requestMap1), newerRequests(&requestMap2),
+ timeoutCheckScheduled(false)
{
}
}
void
-IpcIoFile::openCompleted(const Ipc::StrandSearchResponse *const response) {
+IpcIoFile::openCompleted(const Ipc::StrandSearchResponse *const response)
+{
Must(diskId < 0); // we do not know our disker yet
if (!response) {
IpcIoFile::read(ReadRequest *readRequest)
{
debugs(79,3, HERE << "(disker" << diskId << ", " << readRequest->len << ", " <<
- readRequest->offset << ")");
+ readRequest->offset << ")");
assert(ioRequestor != NULL);
assert(readRequest->len >= 0);
if (response->xerrno) {
debugs(79,1, HERE << "error: " << xstrerr(response->xerrno));
ioError = error_ = true;
- }
- else
- if (!response->page) {
+ } else if (!response->page) {
debugs(79,1, HERE << "error: run out of shared memory pages");
ioError = true;
} else {
}
const ssize_t rlen = ioError ? -1 : (ssize_t)readRequest->len;
- const int errflag = ioError ? DISK_ERROR : DISK_OK;
+ const int errflag = ioError ? DISK_ERROR :DISK_OK;
ioRequestor->readCompleted(readRequest->buf, rlen, errflag, readRequest);
}
IpcIoFile::write(WriteRequest *writeRequest)
{
debugs(79,3, HERE << "(disker" << diskId << ", " << writeRequest->len << ", " <<
- writeRequest->offset << ")");
+ writeRequest->offset << ")");
assert(ioRequestor != NULL);
assert(writeRequest->len >= 0);
if (!response) {
debugs(79, 3, HERE << "error: timeout");
ioError = true; // I/O timeout does not warrant setting error_?
- } else
- if (response->xerrno) {
+ } else if (response->xerrno) {
debugs(79,1, HERE << "error: " << xstrerr(response->xerrno));
ioError = error_ = true;
- } else
- if (response->len != writeRequest->len) {
+ } else if (response->len != writeRequest->len) {
debugs(79,1, HERE << "problem: " << response->len << " < " << writeRequest->len);
error_ = true;
}
if (!ioError) {
debugs(79,5, HERE << "wrote " << writeRequest->len << " to disker" <<
- diskId << " at " << writeRequest->offset);
- }
+ diskId << " at " << writeRequest->offset);
+ }
const ssize_t rlen = ioError ? 0 : (ssize_t)writeRequest->len;
- const int errflag = ioError ? DISK_ERROR : DISK_OK;
+ const int errflag = ioError ? DISK_ERROR :DISK_OK;
ioRequestor->writeCompleted(errflag, rlen, writeRequest);
}
debugs(47, DBG_IMPORTANT, "Worker I/O push queue overflow: " <<
SipcIo(KidIdentifier, ipcIo, diskId)); // TODO: report queue len
// TODO: grow queue size
-
+
pending->completeIo(NULL);
delete pending;
} catch (const TextException &e) {
/// whether we think there is enough time to complete the I/O
bool
-IpcIoFile::canWait() const {
+IpcIoFile::canWait() const
+{
if (!Config.Timeout.disk_io)
return true; // no timeout specified
const int expectedWait = tvSubMsec(oldestIo.start, current_time);
if (expectedWait < 0 ||
- static_cast<time_msec_t>(expectedWait) < Config.Timeout.disk_io)
+ static_cast<time_msec_t>(expectedWait) < Config.Timeout.disk_io)
return true; // expected wait time is acceptible
debugs(47,2, HERE << "cannot wait: " << expectedWait <<
{
debugs(47, 7, HERE << "coordinator response to open request");
for (IpcIoFileList::iterator i = WaitingForOpen.begin();
- i != WaitingForOpen.end(); ++i) {
+ i != WaitingForOpen.end(); ++i) {
if (response.strand.tag == (*i)->dbName) {
(*i)->openCompleted(&response);
WaitingForOpen.erase(i);
{
const int requestId = ipcIo.requestId;
debugs(47, 7, HERE << "popped disker response: " <<
- SipcIo(KidIdentifier, ipcIo, diskId));
+ SipcIo(KidIdentifier, ipcIo, diskId));
Must(requestId);
if (IpcIoPendingRequest *const pending = dequeueRequest(requestId)) {
const IpcIoFile *const ipcIoFile =
reinterpret_cast<const IpcIoFile *>(param);
for (IpcIoFileList::iterator i = WaitingForOpen.begin();
- i != WaitingForOpen.end(); ++i) {
+ i != WaitingForOpen.end(); ++i) {
if (*i == ipcIoFile) {
(*i)->openCompleted(NULL);
WaitingForOpen.erase(i);
/* IpcIoMsg */
IpcIoMsg::IpcIoMsg():
- requestId(0), offset(0), len(0), command(IpcIo::cmdNone), xerrno(0)
+ requestId(0), offset(0), len(0), command(IpcIo::cmdNone), xerrno(0)
{
start.tv_sec = 0;
}
/* IpcIoPendingRequest */
IpcIoPendingRequest::IpcIoPendingRequest(const IpcIoFile::Pointer &aFile):
- file(aFile), readRequest(NULL), writeRequest(NULL)
+ file(aFile), readRequest(NULL), writeRequest(NULL)
{
Must(file != NULL);
if (++file->lastRequestId == 0) // don't use zero value as requestId
{
if (readRequest)
file->readCompleted(readRequest, response);
- else
- if (writeRequest)
+ else if (writeRequest)
file->writeCompleted(writeRequest, response);
else {
Must(!response); // only timeouts are handled here
ipcIo.xerrno = 0;
const size_t len = static_cast<size_t>(read); // safe because read > 0
debugs(47,8, HERE << "disker" << KidIdentifier << " read " <<
- (len == ipcIo.len ? "all " : "just ") << read);
+ (len == ipcIo.len ? "all " : "just ") << read);
ipcIo.len = len;
} else {
ipcIo.xerrno = errno;
ipcIo.len = 0;
debugs(47,5, HERE << "disker" << KidIdentifier << " read error: " <<
- ipcIo.xerrno);
+ ipcIo.xerrno);
}
}
ipcIo.xerrno = 0;
const size_t len = static_cast<size_t>(wrote); // safe because wrote > 0
debugs(47,8, HERE << "disker" << KidIdentifier << " wrote " <<
- (len == ipcIo.len ? "all " : "just ") << wrote);
+ (len == ipcIo.len ? "all " : "just ") << wrote);
ipcIo.len = len;
} else {
ipcIo.xerrno = errno;
// the gap must be positive for select(2) to be given a chance
const double minBreakSecs = 0.001;
eventAdd("IpcIoFile::DiskerHandleMoreRequests",
- &IpcIoFile::DiskerHandleMoreRequests,
- NULL, minBreakSecs, 0, false);
+ &IpcIoFile::DiskerHandleMoreRequests,
+ NULL, minBreakSecs, 0, false);
DiskerHandleMoreRequestsScheduled = true;
}
debugs(47, 3, HERE << "pausing after " << popped << " I/Os in " <<
- elapsedMsec << "ms; " << (elapsedMsec/popped) << "ms per I/O");
+ elapsedMsec << "ms; " << (elapsedMsec/popped) << "ms per I/O");
break;
}
}
#include <map>
#include <memory>
-namespace Ipc {
+namespace Ipc
+{
class FewToFewBiQueue;
} // Ipc
// TODO: expand to all classes
-namespace IpcIo {
+namespace IpcIo
+{
/// what kind of I/O the disker needs to do or have done
typedef enum { cmdNone, cmdOpen, cmdRead, cmdWrite } Command;
/// converts DiskIO requests to IPC queue messages
-class IpcIoMsg {
+class IpcIoMsg
+{
public:
IpcIoMsg();
* DEBUG: section 47 Store Directory Routines
*/
-#include "IpcIoIOStrategy.h"
+#include "config.h"
#include "IpcIoFile.h"
+#include "IpcIoIOStrategy.h"
+
bool
IpcIoIOStrategy::shedLoad()
{
* DEBUG: section 47 Store Directory Routines
*/
-#include "DiskIO/Mmapped/MmappedFile.h"
-#include <sys/mman.h>
+#include "config.h"
#include "DiskIO/IORequestor.h"
+#include "DiskIO/Mmapped/MmappedFile.h"
#include "DiskIO/ReadRequest.h"
#include "DiskIO/WriteRequest.h"
+#include <sys/mman.h>
CBDATA_CLASS_INIT(MmappedFile);
// helper class to deal with mmap(2) offset alignment and other low-level specs
-class Mmapping {
+class Mmapping
+{
public:
Mmapping(int fd, size_t length, int prot, int flags, off_t offset);
~Mmapping();
}
MmappedFile::MmappedFile(char const *aPath): fd(-1),
- minOffset(0), maxOffset(-1), error_(false)
+ minOffset(0), maxOffset(-1), error_(false)
{
assert(aPath);
path_ = xstrdup(aPath);
if (fd < 0) {
debugs(79,3, HERE << "open error: " << xstrerror());
error_ = true;
- } else {
+ } else {
store_open_disk_fd++;
debugs(79,3, HERE << "FD " << fd);
struct stat sb;
if (fstat(fd, &sb) == 0)
maxOffset = sb.st_size; // we do not expect it to change
- }
+ }
callback->ioCompletedNotification();
}
file_close(fd);
fd = -1;
store_open_disk_fd--;
- }
+ }
}
void
MmappedFile::read(ReadRequest *aRequest)
{
debugs(79,3, HERE << "(FD " << fd << ", " << aRequest->len << ", " <<
- aRequest->offset << ")");
+ aRequest->offset << ")");
assert(fd >= 0);
assert(ioRequestor != NULL);
assert(maxOffset < 0 || static_cast<uint64_t>(aRequest->offset + aRequest->len) <= static_cast<uint64_t>(maxOffset));
Mmapping mapping(fd, aRequest->len, PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
- aRequest->offset);
+ aRequest->offset);
bool done = false;
- if (void *buf = mapping.map()) {
+ if (void *buf = mapping.map()) {
memcpy(aRequest->buf, buf, aRequest->len);
done = mapping.unmap();
- }
+ }
error_ = !done;
const ssize_t rlen = error_ ? -1 : (ssize_t)aRequest->len;
- const int errflag = error_ ? DISK_ERROR : DISK_OK;
+ const int errflag = error_ ? DISK_ERROR :DISK_OK;
ioRequestor->readCompleted(aRequest->buf, rlen, errflag, aRequest);
}
MmappedFile::write(WriteRequest *aRequest)
{
debugs(79,3, HERE << "(FD " << fd << ", " << aRequest->len << ", " <<
- aRequest->offset << ")");
+ aRequest->offset << ")");
assert(fd >= 0);
assert(ioRequestor != NULL);
if (written < 0) {
debugs(79,1, HERE << "error: " << xstrerr(errno));
error_ = true;
- } else
- if (static_cast<size_t>(written) != aRequest->len) {
+ } else if (static_cast<size_t>(written) != aRequest->len) {
debugs(79,1, HERE << "problem: " << written << " < " << aRequest->len);
error_ = true;
}
debugs(79,5, HERE << "wrote " << aRequest->len << " to FD " << fd << " at " << aRequest->offset);
} else {
doClose();
- }
+ }
const ssize_t rlen = error_ ? 0 : (ssize_t)aRequest->len;
- const int errflag = error_ ? DISK_ERROR : DISK_OK;
+ const int errflag = error_ ? DISK_ERROR :DISK_OK;
ioRequestor->writeCompleted(errflag, rlen, aRequest);
}
}
Mmapping::Mmapping(int aFd, size_t aLength, int aProt, int aFlags, off_t anOffset):
- fd(aFd), length(aLength), prot(aProt), flags(aFlags), offset(anOffset),
- delta(-1), buf(NULL)
+ fd(aFd), length(aLength), prot(aProt), flags(aFlags), offset(anOffset),
+ delta(-1), buf(NULL)
{
}
if (buf == MAP_FAILED) {
const int errNo = errno;
debugs(79,3, HERE << "error FD " << fd << "mmap(" << length << '+' <<
- delta << ", " << offset << '-' << delta << "): " << xstrerr(errNo));
+ delta << ", " << offset << '-' << delta << "): " << xstrerr(errNo));
buf = NULL;
return NULL;
}
Mmapping::unmap()
{
debugs(79,9, HERE << "FD " << fd <<
- " munmap(" << buf << ", " << length << '+' << delta << ')');
+ " munmap(" << buf << ", " << length << '+' << delta << ')');
if (!buf) // forgot or failed to map
return false;
if (error) {
const int errNo = errno;
debugs(79,3, HERE << "error FD " << fd <<
- " munmap(" << buf << ", " << length << '+' << delta << "): " <<
- "): " << xstrerr(errNo));
+ " munmap(" << buf << ", " << length << '+' << delta << "): " <<
+ "): " << xstrerr(errNo));
}
buf = NULL;
return !error;
* DEBUG: section 47 Store Directory Routines
*/
-#include "MmappedIOStrategy.h"
+#include "config.h"
#include "MmappedFile.h"
+#include "MmappedIOStrategy.h"
+
bool
MmappedIOStrategy::shedLoad()
{
}
int64_t
-MemObject::expectedReplySize() const {
+MemObject::expectedReplySize() const
+{
debugs(20, 7, HERE << "object_sz: " << object_sz);
if (object_sz >= 0) // complete() has been called; we know the exact answer
return object_sz;
}
void
-MemStore::init() {
+MemStore::init()
+{
const int64_t entryLimit = EntryLimit();
if (entryLimit <= 0)
return; // no memory cache configured or a misconfiguration
storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
if (limit > 0) {
storeAppendPrintf(&e, "Current entries: %"PRId64" %.2f%%\n",
- currentCount(), (100.0 * currentCount() / limit));
+ currentCount(), (100.0 * currentCount() / limit));
if (limit < 100) { // XXX: otherwise too expensive to count
Ipc::ReadWriteLockStats stats;
map->updateStats(stats);
stats.dump(e);
- }
- }
- }
+ }
+ }
+ }
}
void
const Ipc::Mem::PageId &page = extras.page;
StoreIOBuffer sourceBuf(extras.storedSize, 0,
- static_cast<char*>(PagePointer(page)));
+ static_cast<char*>(PagePointer(page)));
// XXX: We do not know the URLs yet, only the key, but we need to parse and
// store the response for the Root().get() callers to be happy because they
StoreIOBuffer sharedSpace(bufSize, 0,
static_cast<char*>(PagePointer(page)));
-
+
// check that we kept everything or purge incomplete/sparse cached entry
const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
if (eSize != copied) {
// decide whether to use a shared memory cache if the user did not specify
if (!Config.memShared.configured()) {
Config.memShared.configure(AtomicOperationsSupported &&
- Ipc::Mem::Segment::Enabled() && UsingSmp() &&
- Config.memMaxSize > 0);
- } else
- if (Config.memShared && !AtomicOperationsSupported) {
+ Ipc::Mem::Segment::Enabled() && UsingSmp() &&
+ Config.memMaxSize > 0);
+ } else if (Config.memShared && !AtomicOperationsSupported) {
// bail if the user wants shared memory cache but we cannot support it
fatal("memory_cache_shared is on, but no support for atomic operations detected");
- } else
- if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
+ } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
fatal("memory_cache_shared is on, but no support for shared memory detected");
}
/// Stores HTTP entities in RAM. Current implementation uses shared memory.
/// Unlike a disk store (SwapDir), operations are synchronous (and fast).
-class MemStore: public Store, public Ipc::StoreMapCleaner {
+class MemStore: public Store, public Ipc::StoreMapCleaner
+{
public:
MemStore();
virtual ~MemStore();
virtual void write(char const *buf, size_t size, off_t offset, FREE * free_func) = 0;
typedef enum {
- wroteAll, ///< success: caller supplied all data it wanted to swap out
- writerGone, ///< failure: caller left before swapping out everything
- readerDone ///< success or failure: either way, stop swapping in
+ wroteAll, ///< success: caller supplied all data it wanted to swap out
+ writerGone, ///< failure: caller left before swapping out everything
+ readerDone ///< success or failure: either way, stop swapping in
} CloseHow;
virtual void close(int how) = 0; ///< finish or abort swapping per CloseHow
* squid.conf and deactivate them before exiting.
*
* A module in this context is code providing a functionality or service to the
- * rest of Squid, such as src/DiskIO/Blocking, src/fs/ufs, or Cache Manager. A
+ * rest of Squid, such as src/DiskIO/Blocking, src/fs/ufs, or Cache Manager. A
* module must declare a RegisteredRunner child class to implement activation and
* deactivation logic using the run() method and destructor, respectively.
*
* This API allows the registry to determine the right [de]activation time for
* each group of similar modules, without knowing any module specifics.
- *
+ *
*/
/// well-known registries (currently, deactivation is not performed for these)
} RunnerRegistry;
/// a runnable registrant API
-class RegisteredRunner {
+class RegisteredRunner
+{
public:
// called when this runner's registry is deactivated
virtual ~RegisteredRunner() {}
ufs/ufscommon.h
librock_la_SOURCES = \
- rock/RockCommon.cc \
- rock/RockCommon.h \
- rock/RockFile.cc \
rock/RockFile.h \
rock/RockIoState.cc \
rock/RockIoState.h \
rb->counts.objcount++;
e = rb->sd->addDiskRestore(s.key,
- s.swap_filen,
- s.swap_file_sz,
- s.expires,
- s.timestamp,
- s.lastref,
- s.lastmod,
- s.refcount,
- s.flags,
- (int) rb->flags.clean);
+ s.swap_filen,
+ s.swap_file_sz,
+ s.expires,
+ s.timestamp,
+ s.lastref,
+ s.lastmod,
+ s.refcount,
+ s.flags,
+ (int) rb->flags.clean);
storeDirSwapLog(e, SWAP_LOG_ADD);
}
* use to rebuild store from disk. */
StoreEntry *
CossSwapDir::addDiskRestore(const cache_key *const key,
- int file_number,
- uint64_t swap_file_sz,
- time_t expires,
- time_t timestamp,
- time_t lastref,
- time_t lastmod,
- uint32_t refcount,
- uint16_t flags,
- int clean)
+ int file_number,
+ uint64_t swap_file_sz,
+ time_t expires,
+ time_t timestamp,
+ time_t lastref,
+ time_t lastmod,
+ uint32_t refcount,
+ uint16_t flags,
+ int clean)
{
StoreEntry *e = NULL;
debugs(47, 5, "storeCossAddDiskRestore: " << storeKeyText(key) <<
// XXX: rename to fs/rock/RockDbCell.{cc,h}
-namespace Rock {
+namespace Rock
+{
/// \ingroup Rock
/// meta-information at the beginning of every db cell
* DEBUG: section 79 Disk IO Routines
*/
+#include "config.h"
#include "fs/rock/RockIoRequests.h"
CBDATA_NAMESPACED_CLASS_INIT(Rock, ReadRequest);
CBDATA_NAMESPACED_CLASS_INIT(Rock, WriteRequest);
Rock::ReadRequest::ReadRequest(const ::ReadRequest &base,
- const IoState::Pointer &anSio):
- ::ReadRequest(base),
- sio(anSio)
+ const IoState::Pointer &anSio):
+ ::ReadRequest(base),
+ sio(anSio)
{
}
Rock::WriteRequest::WriteRequest(const ::WriteRequest &base,
- const IoState::Pointer &anSio):
- ::WriteRequest(base),
- sio(anSio)
+ const IoState::Pointer &anSio):
+ ::WriteRequest(base),
+ sio(anSio)
{
}
class DiskFile;
-namespace Rock {
+namespace Rock
+{
/// \ingroup Rock
class ReadRequest: public ::ReadRequest
#include "fs/rock/RockSwapDir.h"
Rock::IoState::IoState(SwapDir *dir,
- StoreEntry *anEntry,
- StoreIOState::STFNCB *cbFile,
- StoreIOState::STIOCB *cbIo,
- void *data):
- slotSize(0),
- diskOffset(-1),
- payloadEnd(-1)
+ StoreEntry *anEntry,
+ StoreIOState::STFNCB *cbFile,
+ StoreIOState::STIOCB *cbIo,
+ void *data):
+ slotSize(0),
+ diskOffset(-1),
+ payloadEnd(-1)
{
e = anEntry;
// swap_filen, swap_dirn, diskOffset, and payloadEnd are set by the caller
// we skip our cell header; it is only read when building the map
const int64_t cellOffset = sizeof(DbCellHeader) +
- static_cast<int64_t>(coreOff);
+ static_cast<int64_t>(coreOff);
assert(cellOffset <= payloadEnd);
// Core specifies buffer length, but we must not exceed stored entry size
read.callback_data = cbdataReference(data);
theFile->read(new ReadRequest(
- ::ReadRequest(buf, diskOffset + cellOffset, len), this));
+ ::ReadRequest(buf, diskOffset + cellOffset, len), this));
}
// We only buffer data here; we actually write when close() is called.
// to avoid triggering read-page;new_head+old_tail;write-page overheads
debugs(79, 5, HERE << swap_filen << " at " << diskOffset << '+' <<
- theBuf.contentSize());
+ theBuf.contentSize());
assert(theBuf.contentSize() <= slotSize);
// theFile->write may call writeCompleted immediatelly
theFile->write(new WriteRequest(::WriteRequest(theBuf.content(),
- diskOffset, theBuf.contentSize(), theBuf.freeFunc()), this));
+ diskOffset, theBuf.contentSize(), theBuf.freeFunc()), this));
}
-//
+//
void
Rock::IoState::finishedWriting(const int errFlag)
{
Rock::IoState::close(int how)
{
debugs(79, 3, HERE << swap_filen << " accumulated: " << offset_ <<
- " how=" << how);
+ " how=" << how);
if (how == wroteAll && !theBuf.isNull())
startWriting();
else
callBack(how == writerGone ? DISK_ERROR : 0); // TODO: add DISK_CALLER_GONE
}
-/// close callback (STIOCB) dialer: breaks dependencies and
+/// close callback (STIOCB) dialer: breaks dependencies and
/// counts IOState concurrency level
-class StoreIOStateCb: public CallDialer
+class StoreIOStateCb: public CallDialer
{
public:
StoreIOStateCb(StoreIOState::STIOCB *cb, void *data, int err, const Rock::IoState::Pointer &anSio):
- callback(NULL),
- callback_data(NULL),
- errflag(err),
- sio(anSio) {
+ callback(NULL),
+ callback_data(NULL),
+ errflag(err),
+ sio(anSio) {
callback = cb;
callback_data = cbdataReference(data);
}
StoreIOStateCb(const StoreIOStateCb &cb):
- callback(NULL),
- callback_data(NULL),
- errflag(cb.errflag),
- sio(cb.sio) {
+ callback(NULL),
+ callback_data(NULL),
+ errflag(cb.errflag),
+ sio(cb.sio) {
callback = cb.callback;
callback_data = cbdataReference(cb.callback_data);
theFile = NULL;
AsyncCall::Pointer call = asyncCall(79,3, "SomeIoStateCloseCb",
- StoreIOStateCb(callback, callback_data, errflag, this));
+ StoreIOStateCb(callback, callback_data, errflag, this));
ScheduleCallHere(call);
callback = NULL;
class DiskFile;
-namespace Rock {
+namespace Rock
+{
class SwapDir;
CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
- sd(dir),
- dbSize(0),
- dbEntrySize(0),
- dbEntryLimit(0),
- fd(-1),
- dbOffset(0),
- filen(0)
+ sd(dir),
+ dbSize(0),
+ dbEntrySize(0),
+ dbEntryLimit(0),
+ fd(-1),
+ dbOffset(0),
+ filen(0)
{
assert(sd);
memset(&counts, 0, sizeof(counts));
/// prepares and initiates entry loading sequence
void
-Rock::Rebuild::start() {
+Rock::Rebuild::start()
+{
// in SMP mode, only the disker is responsible for populating the map
if (UsingSmp() && !IamDiskProcess()) {
debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
- sd->index << " from " << sd->filePath);
+ sd->index << " from " << sd->filePath);
mustStop("non-disker");
return;
}
failure("cannot read db header", errno);
dbOffset = SwapDir::HeaderSize;
- filen = 0;
+ filen = 0;
checkpoint();
}
}
void
-Rock::Rebuild::steps() {
+Rock::Rebuild::steps()
+{
debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
- dbOffset << " <= " << dbSize);
+ dbOffset << " <= " << dbSize);
// Balance our desire to maximize the number of entries processed at once
- // (and, hence, minimize overheads and total rebuild time) with a
+ // (and, hence, minimize overheads and total rebuild time) with a
// requirement to also process Coordinator events, disk I/Os, etc.
const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
const timeval loopStart = current_time;
const double elapsedMsec = tvSubMsec(loopStart, current_time);
if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
- elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
+ elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
break;
}
}
}
void
-Rock::Rebuild::doOneEntry() {
+Rock::Rebuild::doOneEntry()
+{
debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
- dbOffset << " <= " << dbSize);
+ dbOffset << " <= " << dbSize);
++counts.scancount;
DbCellHeader header;
if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
- "Ignoring truncated cache entry meta data at " << dbOffset);
+ "Ignoring truncated cache entry meta data at " << dbOffset);
counts.invalid++;
return;
}
if (!header.sane()) {
debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
- "Ignoring malformed cache entry meta data at " << dbOffset);
+ "Ignoring malformed cache entry meta data at " << dbOffset);
counts.invalid++;
return;
}
//sd->unlink(filen); leave garbage on disk, it should not hurt
}
return;
- }
+ }
assert(loadedE.swap_filen < dbEntryLimit);
if (!storeRebuildKeepEntry(loadedE, key, counts))
}
void
-Rock::Rebuild::swanSong() {
+Rock::Rebuild::swanSong()
+{
debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
- StoreController::store_dirs_rebuilding);
+ StoreController::store_dirs_rebuilding);
--StoreController::store_dirs_rebuilding;
storeRebuildComplete(&counts);
}
void
-Rock::Rebuild::failure(const char *msg, int errNo) {
+Rock::Rebuild::failure(const char *msg, int errNo)
+{
debugs(47,5, HERE << sd->index << " filen " << filen << " at " <<
- dbOffset << " <= " << dbSize);
+ dbOffset << " <= " << dbSize);
if (errNo)
debugs(47,0, "Rock cache_dir rebuild failure: " << xstrerr(errNo));
assert(sd);
fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
- sd->index, sd->filePath, msg);
+ sd->index, sd->filePath, msg);
}
#ifndef SQUID_FS_ROCK_REBUILD_H
#define SQUID_FS_ROCK_REBUILD_H
-#include "config.h"
#include "base/AsyncJob.h"
#include "structs.h"
-namespace Rock {
+namespace Rock
+{
class SwapDir;
/// \ingroup Rock
/// manages store rebuild process: loading meta information from db on disk
-class Rebuild: public AsyncJob {
+class Rebuild: public AsyncJob
+{
public:
Rebuild(SwapDir *dir);
~Rebuild();
* DEBUG: section 92 Storage File System
*/
+#include "config.h"
#include "fs/rock/RockStoreFileSystem.h"
#include "fs/rock/RockSwapDir.h"
#include "StoreFileSystem.h"
-namespace Rock {
+namespace Rock
+{
/// \ingroup Rock, FileSystems
class StoreFileSystem: public ::StoreFileSystem
StoreSearch *
Rock::SwapDir::search(String const url, HttpRequest *)
{
- assert(false); return NULL; // XXX: implement
+ assert(false);
+ return NULL; // XXX: implement
}
// called when Squid core needs a StoreEntry with a given key
#endif
if (res != 0) {
debugs(47, DBG_CRITICAL, "Failed to create Rock db dir " << path <<
- ": " << xstrerror());
+ ": " << xstrerror());
fatal("Rock Store db creation error");
- }
- }
+ }
+ }
#if SLOWLY_FILL_WITH_ZEROS
/* TODO just set the file size */
for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
if (write(swap, block, sizeof(block)) != sizeof(block)) {
debugs(47,0, "Failed to create Rock Store db in " << filePath <<
- ": " << xstrerror());
+ ": " << xstrerror());
fatal("Rock Store db creation error");
- }
- }
+ }
+ }
close(swap);
#else
const int swap = open(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
if (swap < 0) {
debugs(47,0, "Failed to initialize Rock Store db in " << filePath <<
- "; create error: " << xstrerror());
+ "; create error: " << xstrerror());
fatal("Rock Store db creation error");
}
if (ftruncate(swap, maxSize()) != 0) {
debugs(47,0, "Failed to initialize Rock Store db in " << filePath <<
- "; truncate error: " << xstrerror());
+ "; truncate error: " << xstrerror());
fatal("Rock Store db creation error");
}
memset(header, '\0', sizeof(header));
if (write(swap, header, sizeof(header)) != sizeof(header)) {
debugs(47,0, "Failed to initialize Rock Store db in " << filePath <<
- "; write error: " << xstrerror());
+ "; write error: " << xstrerror());
fatal("Rock Store db initialization error");
}
close(swap);
debugs(47, 0, "\tusable db size: " << diskOffsetLimit() << " bytes");
debugs(47, 0, "\tdisk space waste: " << totalWaste << " bytes");
debugs(47, 0, "WARNING: Rock store config wastes space.");
- }
+ }
*/
}
void
-Rock::SwapDir::rebuild() {
+Rock::SwapDir::rebuild()
+{
//++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init()
AsyncJob::Start(new Rebuild(this));
}
Rock::SwapDir::addEntry(const int filen, const DbCellHeader &header, const StoreEntry &from)
{
debugs(47, 8, HERE << &from << ' ' << from.getMD5Text() <<
- ", filen="<< std::setfill('0') << std::hex << std::uppercase <<
- std::setw(8) << filen);
+ ", filen="<< std::setfill('0') << std::hex << std::uppercase <<
+ std::setw(8) << filen);
sfileno newLocation = 0;
if (Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(from.key), newLocation)) {
sio->diskOffset = diskOffset(sio->swap_filen);
debugs(47,5, HERE << "dir " << index << " created new filen " <<
- std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
- sio->swap_filen << std::dec << " at " << sio->diskOffset);
+ std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
+ sio->swap_filen << std::dec << " at " << sio->diskOffset);
assert(sio->diskOffset + payloadEnd <= diskOffsetLimit());
return NULL;
}
- if (e.swap_filen < 0) {
+ if (e.swap_filen < 0) {
debugs(47,4, HERE << e);
return NULL;
}
assert(sio->payloadEnd <= max_objsize); // the payload fits the slot
debugs(47,5, HERE << "dir " << index << " has old filen: " <<
- std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
- sio->swap_filen);
+ std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
+ sio->swap_filen);
assert(slot->basics.swap_file_sz > 0);
assert(slot->basics.swap_file_sz == e.swap_file_sz);
xstrerror());
debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
- std::setw(12) << maxSize() << " disk bytes and " <<
- std::setw(7) << map->entryLimit() << " entries");
+ std::setw(12) << maxSize() << " disk bytes and " <<
+ std::setw(7) << map->entryLimit() << " entries");
rebuild();
}
// storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
// but it should not happen for us
void
-Rock::SwapDir::diskFull() {
+Rock::SwapDir::diskFull()
+{
debugs(20, DBG_IMPORTANT, "Internal ERROR: No space left with " <<
- "rock cache_dir: " << filePath);
+ "rock cache_dir: " << filePath);
}
/// purge while full(); it should be sufficient to purge just one
void
Rock::SwapDir::maintain()
{
- debugs(47,3, HERE << "cache_dir[" << index << "] guards: " <<
- !repl << !map << !full() << StoreController::store_dirs_rebuilding);
+ debugs(47,3, HERE << "cache_dir[" << index << "] guards: " <<
+ !repl << !map << !full() << StoreController::store_dirs_rebuilding);
if (!repl)
return; // no means (cannot find a victim)
for (; freed < maxFreed && full(); ++freed) {
if (StoreEntry *e = walker->Next(walker))
e->release(); // will call our unlink() method
- else
+ else
break; // no more objects
- }
+ }
debugs(47,2, HERE << "Rock cache_dir[" << index << "] freed " << freed <<
- " scanned " << walker->scanned << '/' << walker->locked);
+ " scanned " << walker->scanned << '/' << walker->locked);
walker->Done(walker);
if (full()) {
debugs(47,0, "ERROR: Rock cache_dir[" << index << "] " <<
- "is still full after freeing " << freed << " entries. A bug?");
- }
+ "is still full after freeing " << freed << " entries. A bug?");
+ }
}
void
if (limit > 0) {
const int entryCount = map->entryCount();
storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
- entryCount, (100.0 * entryCount / limit));
+ entryCount, (100.0 * entryCount / limit));
if (limit < 100) { // XXX: otherwise too expensive to count
Ipc::ReadWriteLockStats stats;
stats.dump(e);
}
}
- }
+ }
storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
- store_open_disk_fd, Config.max_open_disk_fds);
+ store_open_disk_fd, Config.max_open_disk_fds);
storeAppendPrintf(&e, "Flags:");
class ReadRequest;
class WriteRequest;
-namespace Rock {
+namespace Rock
+{
class Rebuild;
StoreEntry tmpe;
const bool loaded = storeRebuildParseEntry(buf, tmpe, key, counts,
- (int64_t)sb.st_size);
+ (int64_t)sb.st_size);
file_close(fd);
store_open_disk_fd--;
/// Supplies atomic operations for an integral Value in memory shared by kids.
/// Used to implement non-blocking shared locks, queues, tables, and pools.
template <class Value>
-class AtomicWordT {
+class AtomicWordT
+{
public:
AtomicWordT() {} // leave value unchanged
AtomicWordT(Value aValue): value(aValue) {} // XXX: unsafe
enum { AtomicOperationsSupported = 1 };
#else
-/// A wrapper to provide AtomicWordT API (and asserting implementation)
+/// A wrapper to provide AtomicWordT API (and asserting implementation)
/// where we do not support atomic operations. This avoids ifdefs in core code.
template <class Value>
-class AtomicWordT {
+class AtomicWordT
+{
public:
AtomicWordT() {} // leave value unchanged
AtomicWordT(Value aValue): value(aValue) {} // XXX: unsafe
Value operator --(int) { assert(false); return *this; }
bool swap_if(const int comparand, const int replacement)
- { assert(false); return false; }
+ { assert(false); return false; }
/// v1 = value; value &= v2; return v1;
Value fetchAndAnd(const Value v2)
- { assert(false); return value; }
+ { assert(false); return value; }
// TODO: no need for __sync_bool_compare_and_swap here?
bool operator ==(int v2) { assert(false); return false; }
searchers.push_back(request);
debugs(54, 3, HERE << "cannot yet tell kid" << request.requestorId <<
- " who " << request.tag << " is");
+ " who " << request.tag << " is");
}
void
const StrandCoord& strand)
{
debugs(54, 3, HERE << "tell kid" << request.requestorId << " that " <<
- request.tag << " is kid" << strand.kidId);
+ request.tag << " is kid" << strand.kidId);
const StrandSearchResponse response(strand);
TypedMsgHdr message;
response.pack(message);
// TODO: processes may not be kids; is there a better place to put this?
-/// process kinds
+/// process kinds
typedef enum {
pkOther = 0, ///< we do not know or do not care
pkCoordinator = 1, ///< manages all other kids
// OneToOneUniQueue
Ipc::OneToOneUniQueue::OneToOneUniQueue(const unsigned int aMaxItemSize, const int aCapacity):
- theIn(0), theOut(0), theSize(0), theMaxItemSize(aMaxItemSize),
- theCapacity(aCapacity)
+ theIn(0), theOut(0), theSize(0), theMaxItemSize(aMaxItemSize),
+ theCapacity(aCapacity)
{
Must(theMaxItemSize > 0);
Must(theCapacity > 0);
}
Ipc::FewToFewBiQueue::FewToFewBiQueue(const String &id, const Group aLocalGroup, const int aLocalProcessId):
- metadata(shm_old(Metadata)(MetadataId(id).termedBuf())),
- queues(shm_old(OneToOneUniQueues)(QueuesId(id).termedBuf())),
- readers(shm_old(QueueReaders)(ReadersId(id).termedBuf())),
- theLocalGroup(aLocalGroup), theLocalProcessId(aLocalProcessId),
- theLastPopProcessId(readers->theCapacity)
+ metadata(shm_old(Metadata)(MetadataId(id).termedBuf())),
+ queues(shm_old(OneToOneUniQueues)(QueuesId(id).termedBuf())),
+ readers(shm_old(QueueReaders)(ReadersId(id).termedBuf())),
+ theLocalGroup(aLocalGroup), theLocalProcessId(aLocalProcessId),
+ theLastPopProcessId(readers->theCapacity)
{
Must(queues->theCapacity == metadata->theGroupASize * metadata->theGroupBSize * 2);
Must(readers->theCapacity == metadata->theGroupASize + metadata->theGroupBSize);
switch (group) {
case groupA:
return metadata->theGroupAIdOffset <= processId &&
- processId < metadata->theGroupAIdOffset + metadata->theGroupASize;
+ processId < metadata->theGroupAIdOffset + metadata->theGroupASize;
case groupB:
return metadata->theGroupBIdOffset <= processId &&
- processId < metadata->theGroupBIdOffset + metadata->theGroupBSize;
+ processId < metadata->theGroupBIdOffset + metadata->theGroupBSize;
}
return false;
}
{
Must(validProcessId(group, processId));
const int index = group == groupA ?
- processId - metadata->theGroupAIdOffset :
- metadata->theGroupASize + processId - metadata->theGroupBIdOffset;
+ processId - metadata->theGroupAIdOffset :
+ metadata->theGroupASize + processId - metadata->theGroupBIdOffset;
return readers->theReaders[index];
}
}
Ipc::FewToFewBiQueue::Metadata::Metadata(const int aGroupASize, const int aGroupAIdOffset, const int aGroupBSize, const int aGroupBIdOffset):
- theGroupASize(aGroupASize), theGroupAIdOffset(aGroupAIdOffset),
- theGroupBSize(aGroupBSize), theGroupBIdOffset(aGroupBIdOffset)
+ theGroupASize(aGroupASize), theGroupAIdOffset(aGroupAIdOffset),
+ theGroupBSize(aGroupBSize), theGroupBIdOffset(aGroupBIdOffset)
{
Must(theGroupASize > 0);
Must(theGroupBSize > 0);
}
Ipc::FewToFewBiQueue::Owner::Owner(const String &id, const int groupASize, const int groupAIdOffset, const int groupBSize, const int groupBIdOffset, const unsigned int maxItemSize, const int capacity):
- metadataOwner(shm_new(Metadata)(MetadataId(id).termedBuf(), groupASize, groupAIdOffset, groupBSize, groupBIdOffset)),
- queuesOwner(shm_new(OneToOneUniQueues)(QueuesId(id).termedBuf(), groupASize*groupBSize*2, maxItemSize, capacity)),
- readersOwner(shm_new(QueueReaders)(ReadersId(id).termedBuf(), groupASize+groupBSize))
+ metadataOwner(shm_new(Metadata)(MetadataId(id).termedBuf(), groupASize, groupAIdOffset, groupBSize, groupBIdOffset)),
+ queuesOwner(shm_new(OneToOneUniQueues)(QueuesId(id).termedBuf(), groupASize*groupBSize*2, maxItemSize, capacity)),
+ readersOwner(shm_new(QueueReaders)(ReadersId(id).termedBuf(), groupASize+groupBSize))
{
}
class String;
-namespace Ipc {
+namespace Ipc
+{
/// State of the reading end of a queue (i.e., of the code calling pop()).
/// Multiple queues attached to one reader share this state.
-class QueueReader {
+class QueueReader
+{
public:
QueueReader(); // the initial state is "blocked without a signal"
};
/// shared array of QueueReaders
-class QueueReaders {
+class QueueReaders
+{
public:
QueueReaders(const int aCapacity);
size_t sharedMemorySize() const;
* queue is full, the writer will just not push and come back later (with a
* different value). We can add support for blocked writers if needed.
*/
-class OneToOneUniQueue {
+class OneToOneUniQueue
+{
public:
// pop() and push() exceptions; TODO: use TextException instead
class Full {};
};
/// shared array of OneToOneUniQueues
-class OneToOneUniQueues {
+class OneToOneUniQueues
+{
public:
OneToOneUniQueues(const int aCapacity, const unsigned int maxItemSize, const int queueCapacity);
* communicate. Process in each group has a unique integer ID in
* [groupIdOffset, groupIdOffset + groupSize) range.
*/
-class FewToFewBiQueue {
+class FewToFewBiQueue
+{
public:
typedef OneToOneUniQueue::Full Full;
typedef OneToOneUniQueue::ItemTooLarge ItemTooLarge;
};
public:
- class Owner {
+ class Owner
+ {
public:
Owner(const String &id, const int groupASize, const int groupAIdOffset, const int groupBSize, const int groupBIdOffset, const unsigned int maxItemSize, const int capacity);
~Owner();
{
memset(this, 0, sizeof(*this));
}
-
+
void
Ipc::ReadWriteLockStats::dump(StoreEntry &e) const
{
return;
storeAppendPrintf(&e, "Reading: %9d %6.2f%%\n",
- readable, (100.0 * readable / count));
+ readable, (100.0 * readable / count));
storeAppendPrintf(&e, "Writing: %9d %6.2f%%\n",
- writeable, (100.0 * writeable / count));
+ writeable, (100.0 * writeable / count));
storeAppendPrintf(&e, "Idle: %9d %6.2f%%\n",
- idle, (100.0 * idle / count));
+ idle, (100.0 * idle / count));
if (readers || writers) {
const int locked = readers + writers;
storeAppendPrintf(&e, "Readers: %9d %6.2f%%\n",
- readers, (100.0 * readers / locked));
+ readers, (100.0 * readers / locked));
storeAppendPrintf(&e, "Writers: %9d %6.2f%%\n",
- writers, (100.0 * writers / locked));
+ writers, (100.0 * writers / locked));
}
}
class StoreEntry;
-namespace Ipc {
+namespace Ipc
+{
class ReadWriteLockStats;
/// an atomic readers-writer or shared-exclusive lock suitable for maps/tables
-class ReadWriteLock {
+class ReadWriteLock
+{
public:
// default constructor is OK because of shared memory zero-initialization
/// approximate stats of a set of ReadWriteLocks
-class ReadWriteLockStats {
+class ReadWriteLockStats
+{
public:
ReadWriteLockStats();
}
Ipc::StoreMap::StoreMap(const char *const aPath): cleaner(NULL), path(aPath),
- shared(shm_old(Shared)(aPath))
+ shared(shm_old(Shared)(aPath))
{
assert(shared->limit > 0); // we should not be created otherwise
debugs(54, 5, HERE << "attached map [" << path << "] created: " <<
Ipc::StoreMap::free(const sfileno fileno)
{
debugs(54, 5, HERE << " marking slot at " << fileno << " to be freed in"
- " map [" << path << ']');
+ " map [" << path << ']');
assert(valid(fileno));
Slot &s = shared->slots[fileno];
/* Ipc::StoreMap::Shared */
Ipc::StoreMap::Shared::Shared(const int aLimit, const size_t anExtrasSize):
- limit(aLimit), extrasSize(anExtrasSize), count(0)
+ limit(aLimit), extrasSize(anExtrasSize), count(0)
{
}
#include "ipc/mem/Pointer.h"
#include "typedefs.h"
-namespace Ipc {
+namespace Ipc
+{
/// a StoreMap element, holding basic shareable StoreEntry info
-class StoreMapSlot {
+class StoreMapSlot
+{
public:
StoreMapSlot();
uint64_t swap_file_sz;
uint16_t refcount;
uint16_t flags;
- } basics;
+ } basics;
/// possible persistent states
typedef enum {
Empty, ///< ready for writing, with nothing of value
Writeable, ///< transitions from Empty to Readable
Readable, ///< ready for reading
- } State;
+ } State;
State state; ///< current state
};
class StoreMapCleaner;
-/// map of StoreMapSlots indexed by their keys, with read/write slot locking
+/// map of StoreMapSlots indexed by their keys, with read/write slot locking
/// kids extend to store custom data
class StoreMap
{
typedef StoreMapSlot Slot;
private:
- struct Shared
- {
+ struct Shared {
Shared(const int aLimit, const size_t anExtrasSize);
size_t sharedMemorySize() const;
static size_t SharedMemorySize(const int limit, const size_t anExtrasSize);
int slotIndexByKey(const cache_key *const key) const;
Slot &slotByKey(const cache_key *const key);
- Slot *openForReading(Slot &s);
+ Slot *openForReading(Slot &s);
void abortWriting(const sfileno fileno);
void freeIfNeeded(Slot &s);
void freeLocked(Slot &s, bool keepLocked);
template <class ExtrasT>
StoreMapWithExtras<ExtrasT>::StoreMapWithExtras(const char *const path):
- StoreMap(path)
+ StoreMap(path)
{
const size_t sharedSizeWithoutExtras =
Shared::SharedMemorySize(entryLimit(), 0);
Ipc::HereIamMessage::HereIamMessage(const StrandCoord &aStrand):
- strand(aStrand)
+ strand(aStrand)
{
}
}
Ipc::StrandSearchRequest::StrandSearchRequest(const TypedMsgHdr &hdrMsg):
- requestorId(-1)
+ requestorId(-1)
{
hdrMsg.checkType(mtStrandSearchRequest);
hdrMsg.getPod(requestorId);
/* StrandSearchResponse */
Ipc::StrandSearchResponse::StrandSearchResponse(const Ipc::StrandCoord &aStrand):
- strand(aStrand)
+ strand(aStrand)
{
}
#include <iosfwd>
#endif
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
/// Shared memory page identifier, address, or handler
-class PageId {
+class PageId
+{
public:
PageId(): pool(0), number(0), purpose(maxPurpose) {}
}
Ipc::Mem::PagePool::PagePool(const char *const id):
- pageIndex(shm_old(PageStack)(id)),
- theLevels(reinterpret_cast<AtomicWord *>(
- reinterpret_cast<char *>(pageIndex.getRaw()) +
- pageIndex->stackSize())),
- theBuf(reinterpret_cast<char *>(theLevels + PageId::maxPurpose))
+ pageIndex(shm_old(PageStack)(id)),
+ theLevels(reinterpret_cast<AtomicWord *>(
+ reinterpret_cast<char *>(pageIndex.getRaw()) +
+ pageIndex->stackSize())),
+ theBuf(reinterpret_cast<char *>(theLevels + PageId::maxPurpose))
{
}
#include "ipc/mem/PageStack.h"
#include "ipc/mem/Pointer.h"
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
/// Atomic container of shared memory pages. Implemented using a collection of
/// Segments, each with a PageStack index of free pages. All pools must be
/// created by a single process.
-class PagePool {
+class PagePool
+{
public:
typedef Ipc::Mem::Owner<PageStack> Owner;
Ipc::Mem::PageStack::PageStack(const uint32_t aPoolId, const unsigned int aCapacity, const size_t aPageSize):
- thePoolId(aPoolId), theCapacity(aCapacity), thePageSize(aPageSize),
- theSize(theCapacity),
- theLastReadable(prev(theSize)), theFirstWritable(next(theLastReadable))
+ thePoolId(aPoolId), theCapacity(aCapacity), thePageSize(aPageSize),
+ theSize(theCapacity),
+ theLastReadable(prev(theSize)), theFirstWritable(next(theLastReadable))
{
// initially, all pages are free
for (Offset i = 0; i < theSize; ++i)
Ipc::Mem::PageStack::pageIdIsValid(const PageId &page) const
{
return page.pool == thePoolId && page.number != Writable &&
- page.number <= capacity();
+ page.number <= capacity();
}
size_t
#include "ipc/AtomicWord.h"
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
class PageId;
/// Atomic container of "free" page numbers inside a single SharedMemory space.
/// Assumptions: all page numbers are unique, positive, have an known maximum,
/// and can be temporary unavailable as long as they are never trully lost.
-class PageStack {
+class PageStack
+{
public:
typedef uint32_t Value; ///< stack item type (a free page number)
// TODO: make configurable to avoid waste when mem-cached objects are small/big
size_t
-Ipc::Mem::PageSize() {
+Ipc::Mem::PageSize()
+{
return 32*1024;
}
Ipc::Mem::GetPage(const PageId::Purpose purpose, PageId &page)
{
return ThePagePool && PagesAvailable(purpose) > 0 ?
- ThePagePool->get(purpose, page) : false;
+ ThePagePool->get(purpose, page) : false;
}
void
#include "ipc/mem/Page.h"
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
/* Single page manipulation */
#include "ipc/mem/Segment.h"
#include "RefCount.h"
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
/// allocates/deallocates shared memory; creates and later destroys a
/// Class object using that memory
template <class Class>
Owner<Class>::Owner(const char *const id, const off_t sharedSize):
- theSegment(id), theObject(NULL)
+ theSegment(id), theObject(NULL)
{
theSegment.create(sharedSize);
Must(theSegment.mem());
#include <unistd.h>
Ipc::Mem::Segment::Segment(const char *const id):
- theName(GenerateName(id)), theFD(-1), theMem(NULL),
- theSize(0), theReserved(0), doUnlink(false)
+ theName(GenerateName(id)), theFD(-1), theMem(NULL),
+ theSize(0), theReserved(0), doUnlink(false)
{
}
-Ipc::Mem::Segment::~Segment() {
+Ipc::Mem::Segment::~Segment()
+{
if (theFD >= 0) {
detach();
if (close(theFD) != 0)
}
bool
-Ipc::Mem::Segment::Enabled() {
+Ipc::Mem::Segment::Enabled()
+{
#if HAVE_SHM
return true;
#else
#include "SquidString.h"
-namespace Ipc {
+namespace Ipc
+{
-namespace Mem {
+namespace Mem
+{
/// POSIX shared memory segment
-class Segment {
+class Segment
+{
public:
/// Create a shared memory segment.
Segment(const char *const id);
return SquidMain(argc, argv);
} catch (const std::exception &e) {
debugs(1, DBG_CRITICAL, "FATAL: dying from an unhandled exception: " <<
- e.what());
+ e.what());
throw;
} catch (...) {
debugs(1, DBG_CRITICAL, "FATAL: dying from an unhandled exception.");
xstrncpy(TheKidName, processName + 1, nameLen + 1);
if (!strcmp(TheKidName, "squid-coord"))
TheProcessKind = pkCoordinator;
- else
- if (!strcmp(TheKidName, "squid"))
+ else if (!strcmp(TheKidName, "squid"))
TheProcessKind = pkWorker;
- else
- if (!strcmp(TheKidName, "squid-disk"))
+ else if (!strcmp(TheKidName, "squid-disk"))
TheProcessKind = pkDisker;
else
TheProcessKind = pkOther; // including coordinator
kid.start(pid);
syslog(LOG_NOTICE, "Squid Parent: %s process %d started",
- kid.name().termedBuf(), pid);
+ kid.name().termedBuf(), pid);
}
/* parent */
stats.n_disk_objects = Store::Root().currentCount();
stats.objects_size = stats.n_disk_objects > 0 ?
- stats.store_swap_size / stats.n_disk_objects : 0.0;
+ stats.store_swap_size / stats.n_disk_objects : 0.0;
stats.unlink_requests = statCounter.unlink.requests;
assert(storePendingNClients(this) == 0);
- if (EBIT_TEST(flags, RELEASE_REQUEST))
- {
+ if (EBIT_TEST(flags, RELEASE_REQUEST)) {
this->release();
return 0;
}
const int64_t expectedSize = mem_obj->expectedReplySize();
// objects of unknown size are not allowed into memory cache, for now
if (expectedSize < 0 ||
- expectedSize > static_cast<int64_t>(Config.Store.maxInMemObjSize))
+ expectedSize > static_cast<int64_t>(Config.Store.maxInMemObjSize))
return 0;
}
assert (isEmpty());
assert(mem_obj);
-
+
const HttpReply *rep = getReply();
assert(rep);
debugs(20, 7, "storeSwapOut: expectedEnd = " << expectedEnd);
if (expectedEnd > store_maxobjsize) {
debugs(20, 3, "storeSwapOut: will not fit: " << expectedEnd <<
- " > " << store_maxobjsize);
+ " > " << store_maxobjsize);
decision = MemObject::SwapOut::swImpossible;
return false; // known to outgrow the limit eventually
}
const int64_t currentEnd = mem_obj->endOffset();
if (currentEnd > store_maxobjsize) {
debugs(20, 3, "storeSwapOut: does not fit: " << currentEnd <<
- " > " << store_maxobjsize);
+ " > " << store_maxobjsize);
decision = MemObject::SwapOut::swImpossible;
return false; // already does not fit and may only get bigger
}
// prevent default swPossible answer for yet unknown length
if (expectedEnd < 0) {
debugs(20, 3, "storeSwapOut: wait for more info: " <<
- store_maxobjsize);
+ store_maxobjsize);
return false; // may fit later, but will be rejected now
}
}
std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
{
return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
- e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
- e.swap_status;
+ e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
+ e.swap_status;
}
/* NullStoreEntry */
int StoreController::store_dirs_rebuilding = 1;
StoreController::StoreController() : swapDir (new StoreHashIndex())
- , memStore(NULL)
+ , memStore(NULL)
{}
StoreController::~StoreController()
if (StoreEntry *e = sd->get(key)) {
debugs(20, 3, HERE << "cache_dir " << idx <<
- " got cached entry: " << *e);
+ " got cached entry: " << *e);
return e;
}
}
}
debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
- " cache_dirs have " << storeKeyText(key));
+ " cache_dirs have " << storeKeyText(key));
return NULL;
}
// leave keepInLocalMemory false; memStore maintains its own cache
} else {
keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
- // the local memory cache is not overflowing
- (mem_node::InUseCount() <= store_pages_max);
+ // the local memory cache is not overflowing
+ (mem_node::InUseCount() <= store_pages_max);
}
// An idle, unlocked entry that belongs to a SwapDir which controls
store_hash_buckets = storeKeyHashBuckets(buckets);
debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
- (Config.memShared ? " [shared]" : ""));
+ (Config.memShared ? " [shared]" : ""));
debugs(20, 1, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
store_table = hash_create(storeKeyHashCmp,
if (len < 0) {
const int xerrno = errno;
debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << diskIndex << "]: " <<
- "Ignoring cached entry after meta data read failure: " << xstrerr(xerrno));
+ "Ignoring cached entry after meta data read failure: " << xstrerr(xerrno));
return false;
}
"SIZE MISMATCH " << tmpe.swap_file_sz << "!=" << expectedSize);
return false;
}
- } else
- if (tmpe.swap_file_sz <= 0) {
+ } else if (tmpe.swap_file_sz <= 0) {
debugs(47, DBG_IMPORTANT, "WARNING: Ignoring cache entry with " <<
"unknown size: " << tmpe);
return false;
if (store_status != STORE_OK) {
const int64_t expectedSize = mem_obj->expectedReplySize();
const int64_t maxKnownSize = expectedSize < 0 ?
- swapout_maxsize : expectedSize;
+ swapout_maxsize : expectedSize;
debugs(20, 7, HERE << "storeSwapOut: maxKnownSize= " << maxKnownSize);
if (maxKnownSize < store_maxobjsize) {
* Should we add an option to limit this memory consumption?
*/
debugs(20, 5, "storeSwapOut: Deferring swapout start for " <<
- (store_maxobjsize - maxKnownSize) << " bytes");
+ (store_maxobjsize - maxKnownSize) << " bytes");
return;
- }
+ }
}
// TODO: it is better to trim as soon as we swap something out, not before
/// Used for boolean enabled/disabled options with complex default logic.
/// Allows Squid to compute the right default after configuration.
/// Checks that not-yet-defined option values are not used.
-class YesNoNone {
+class YesNoNone
+{
// TODO: generalize to non-boolean option types
public:
YesNoNone(): option(0) {}
/// returns true iff enabled; asserts if the option has not been configured
operator void *() const; // TODO: use a fancy/safer version of the operator
- /// enables or disables the option;
+ /// enables or disables the option;
void configure(bool beSet);
/// whether the option was enabled or disabled, by user or Squid
bool
storeRebuildKeepEntry(const StoreEntry &tmpe, const cache_key *key,
- struct _store_rebuild_data &counts)
+ struct _store_rebuild_data &counts)
{
- return false;
+ return false;
}
bool