From: Alex Rousskov Date: Tue, 6 Sep 2011 22:32:30 +0000 (-0600) Subject: SourceFormat Enforcement X-Git-Tag: take08~22 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9199139f0eeb83e0c6ac3832b2af849cc3df7fa6;p=thirdparty%2Fsquid.git SourceFormat Enforcement --- diff --git a/compat/shm.h b/compat/shm.h index d1b66bd426..c9135d5969 100644 --- a/compat/shm.h +++ b/compat/shm.h @@ -26,17 +26,17 @@ extern "C" { -inline int -shm_open(const char *, int, mode_t) { - errno = ENOTSUP; - return -1; -} - -inline int -shm_unlink(const char *) { - errno = ENOTSUP; - return -1; -} + inline int + shm_open(const char *, int, mode_t) { + errno = ENOTSUP; + return -1; + } + + inline int + shm_unlink(const char *) { + errno = ENOTSUP; + return -1; + } } /* extern "C" */ diff --git a/src/DiskIO/IpcIo/DiskIOIpcIo.cc b/src/DiskIO/IpcIo/DiskIOIpcIo.cc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/DiskIO/IpcIo/IpcIoFile.cc b/src/DiskIO/IpcIo/IpcIoFile.cc index c118ff18a8..3d50a70b9a 100644 --- a/src/DiskIO/IpcIo/IpcIoFile.cc +++ b/src/DiskIO/IpcIo/IpcIoFile.cc @@ -37,7 +37,7 @@ static void DiskerClose(const String &path); /// IpcIo wrapper for debugs() streams; XXX: find a better class name struct SipcIo { SipcIo(int aWorker, const IpcIoMsg &aMsg, int aDisker): - worker(aWorker), msg(aMsg), disker(aDisker) {} + worker(aWorker), msg(aMsg), disker(aDisker) {} int worker; const IpcIoMsg &msg; @@ -48,14 +48,14 @@ std::ostream & operator <<(std::ostream &os, const SipcIo &sio) { return os << "ipcIo" << sio.worker << '.' << sio.msg.requestId << - (sio.msg.command == IpcIo::cmdRead ? 'r' : 'w') << sio.disker; + (sio.msg.command == IpcIo::cmdRead ? 'r' : 'w') << sio.disker; } IpcIoFile::IpcIoFile(char const *aDb): - dbName(aDb), diskId(-1), error_(false), lastRequestId(0), - olderRequests(&requestMap1), newerRequests(&requestMap2), - timeoutCheckScheduled(false) + dbName(aDb), diskId(-1), error_(false), lastRequestId(0), + olderRequests(&requestMap1), newerRequests(&requestMap2), + timeoutCheckScheduled(false) { } @@ -114,7 +114,8 @@ IpcIoFile::open(int flags, mode_t mode, RefCount callback) } void -IpcIoFile::openCompleted(const Ipc::StrandSearchResponse *const response) { +IpcIoFile::openCompleted(const Ipc::StrandSearchResponse *const response) +{ Must(diskId < 0); // we do not know our disker yet if (!response) { @@ -181,7 +182,7 @@ void IpcIoFile::read(ReadRequest *readRequest) { debugs(79,3, HERE << "(disker" << diskId << ", " << readRequest->len << ", " << - readRequest->offset << ")"); + readRequest->offset << ")"); assert(ioRequestor != NULL); assert(readRequest->len >= 0); @@ -208,9 +209,7 @@ IpcIoFile::readCompleted(ReadRequest *readRequest, if (response->xerrno) { debugs(79,1, HERE << "error: " << xstrerr(response->xerrno)); ioError = error_ = true; - } - else - if (!response->page) { + } else if (!response->page) { debugs(79,1, HERE << "error: run out of shared memory pages"); ioError = true; } else { @@ -222,7 +221,7 @@ IpcIoFile::readCompleted(ReadRequest *readRequest, } const ssize_t rlen = ioError ? -1 : (ssize_t)readRequest->len; - const int errflag = ioError ? DISK_ERROR : DISK_OK; + const int errflag = ioError ? DISK_ERROR :DISK_OK; ioRequestor->readCompleted(readRequest->buf, rlen, errflag, readRequest); } @@ -230,7 +229,7 @@ void IpcIoFile::write(WriteRequest *writeRequest) { debugs(79,3, HERE << "(disker" << diskId << ", " << writeRequest->len << ", " << - writeRequest->offset << ")"); + writeRequest->offset << ")"); assert(ioRequestor != NULL); assert(writeRequest->len >= 0); @@ -254,12 +253,10 @@ IpcIoFile::writeCompleted(WriteRequest *writeRequest, if (!response) { debugs(79, 3, HERE << "error: timeout"); ioError = true; // I/O timeout does not warrant setting error_? - } else - if (response->xerrno) { + } else if (response->xerrno) { debugs(79,1, HERE << "error: " << xstrerr(response->xerrno)); ioError = error_ = true; - } else - if (response->len != writeRequest->len) { + } else if (response->len != writeRequest->len) { debugs(79,1, HERE << "problem: " << response->len << " < " << writeRequest->len); error_ = true; } @@ -269,11 +266,11 @@ IpcIoFile::writeCompleted(WriteRequest *writeRequest, if (!ioError) { debugs(79,5, HERE << "wrote " << writeRequest->len << " to disker" << - diskId << " at " << writeRequest->offset); - } + diskId << " at " << writeRequest->offset); + } const ssize_t rlen = ioError ? 0 : (ssize_t)writeRequest->len; - const int errflag = ioError ? DISK_ERROR : DISK_OK; + const int errflag = ioError ? DISK_ERROR :DISK_OK; ioRequestor->writeCompleted(errflag, rlen, writeRequest); } @@ -334,7 +331,7 @@ IpcIoFile::push(IpcIoPendingRequest *const pending) debugs(47, DBG_IMPORTANT, "Worker I/O push queue overflow: " << SipcIo(KidIdentifier, ipcIo, diskId)); // TODO: report queue len // TODO: grow queue size - + pending->completeIo(NULL); delete pending; } catch (const TextException &e) { @@ -346,7 +343,8 @@ IpcIoFile::push(IpcIoPendingRequest *const pending) /// whether we think there is enough time to complete the I/O bool -IpcIoFile::canWait() const { +IpcIoFile::canWait() const +{ if (!Config.Timeout.disk_io) return true; // no timeout specified @@ -356,7 +354,7 @@ IpcIoFile::canWait() const { const int expectedWait = tvSubMsec(oldestIo.start, current_time); if (expectedWait < 0 || - static_cast(expectedWait) < Config.Timeout.disk_io) + static_cast(expectedWait) < Config.Timeout.disk_io) return true; // expected wait time is acceptible debugs(47,2, HERE << "cannot wait: " << expectedWait << @@ -370,7 +368,7 @@ IpcIoFile::HandleOpenResponse(const Ipc::StrandSearchResponse &response) { debugs(47, 7, HERE << "coordinator response to open request"); for (IpcIoFileList::iterator i = WaitingForOpen.begin(); - i != WaitingForOpen.end(); ++i) { + i != WaitingForOpen.end(); ++i) { if (response.strand.tag == (*i)->dbName) { (*i)->openCompleted(&response); WaitingForOpen.erase(i); @@ -402,7 +400,7 @@ IpcIoFile::handleResponse(IpcIoMsg &ipcIo) { const int requestId = ipcIo.requestId; debugs(47, 7, HERE << "popped disker response: " << - SipcIo(KidIdentifier, ipcIo, diskId)); + SipcIo(KidIdentifier, ipcIo, diskId)); Must(requestId); if (IpcIoPendingRequest *const pending = dequeueRequest(requestId)) { @@ -448,7 +446,7 @@ IpcIoFile::OpenTimeout(void *const param) const IpcIoFile *const ipcIoFile = reinterpret_cast(param); for (IpcIoFileList::iterator i = WaitingForOpen.begin(); - i != WaitingForOpen.end(); ++i) { + i != WaitingForOpen.end(); ++i) { if (*i == ipcIoFile) { (*i)->openCompleted(NULL); WaitingForOpen.erase(i); @@ -558,7 +556,7 @@ IpcIoFile::getFD() const /* IpcIoMsg */ IpcIoMsg::IpcIoMsg(): - requestId(0), offset(0), len(0), command(IpcIo::cmdNone), xerrno(0) + requestId(0), offset(0), len(0), command(IpcIo::cmdNone), xerrno(0) { start.tv_sec = 0; } @@ -566,7 +564,7 @@ IpcIoMsg::IpcIoMsg(): /* IpcIoPendingRequest */ IpcIoPendingRequest::IpcIoPendingRequest(const IpcIoFile::Pointer &aFile): - file(aFile), readRequest(NULL), writeRequest(NULL) + file(aFile), readRequest(NULL), writeRequest(NULL) { Must(file != NULL); if (++file->lastRequestId == 0) // don't use zero value as requestId @@ -578,8 +576,7 @@ IpcIoPendingRequest::completeIo(IpcIoMsg *const response) { if (readRequest) file->readCompleted(readRequest, response); - else - if (writeRequest) + else if (writeRequest) file->writeCompleted(writeRequest, response); else { Must(!response); // only timeouts are handled here @@ -611,13 +608,13 @@ diskerRead(IpcIoMsg &ipcIo) ipcIo.xerrno = 0; const size_t len = static_cast(read); // safe because read > 0 debugs(47,8, HERE << "disker" << KidIdentifier << " read " << - (len == ipcIo.len ? "all " : "just ") << read); + (len == ipcIo.len ? "all " : "just ") << read); ipcIo.len = len; } else { ipcIo.xerrno = errno; ipcIo.len = 0; debugs(47,5, HERE << "disker" << KidIdentifier << " read error: " << - ipcIo.xerrno); + ipcIo.xerrno); } } @@ -633,7 +630,7 @@ diskerWrite(IpcIoMsg &ipcIo) ipcIo.xerrno = 0; const size_t len = static_cast(wrote); // safe because wrote > 0 debugs(47,8, HERE << "disker" << KidIdentifier << " wrote " << - (len == ipcIo.len ? "all " : "just ") << wrote); + (len == ipcIo.len ? "all " : "just ") << wrote); ipcIo.len = len; } else { ipcIo.xerrno = errno; @@ -679,12 +676,12 @@ IpcIoFile::DiskerHandleRequests() // the gap must be positive for select(2) to be given a chance const double minBreakSecs = 0.001; eventAdd("IpcIoFile::DiskerHandleMoreRequests", - &IpcIoFile::DiskerHandleMoreRequests, - NULL, minBreakSecs, 0, false); + &IpcIoFile::DiskerHandleMoreRequests, + NULL, minBreakSecs, 0, false); DiskerHandleMoreRequestsScheduled = true; } debugs(47, 3, HERE << "pausing after " << popped << " I/Os in " << - elapsedMsec << "ms; " << (elapsedMsec/popped) << "ms per I/O"); + elapsedMsec << "ms; " << (elapsedMsec/popped) << "ms per I/O"); break; } } diff --git a/src/DiskIO/IpcIo/IpcIoFile.h b/src/DiskIO/IpcIo/IpcIoFile.h index e5915c1fc7..4bf39c4243 100644 --- a/src/DiskIO/IpcIo/IpcIoFile.h +++ b/src/DiskIO/IpcIo/IpcIoFile.h @@ -11,12 +11,14 @@ #include #include -namespace Ipc { +namespace Ipc +{ class FewToFewBiQueue; } // Ipc // TODO: expand to all classes -namespace IpcIo { +namespace IpcIo +{ /// what kind of I/O the disker needs to do or have done typedef enum { cmdNone, cmdOpen, cmdRead, cmdWrite } Command; @@ -25,7 +27,8 @@ typedef enum { cmdNone, cmdOpen, cmdRead, cmdWrite } Command; /// converts DiskIO requests to IPC queue messages -class IpcIoMsg { +class IpcIoMsg +{ public: IpcIoMsg(); diff --git a/src/DiskIO/IpcIo/IpcIoIOStrategy.cc b/src/DiskIO/IpcIo/IpcIoIOStrategy.cc index c5e46c6b0d..10a165e0b6 100644 --- a/src/DiskIO/IpcIo/IpcIoIOStrategy.cc +++ b/src/DiskIO/IpcIo/IpcIoIOStrategy.cc @@ -5,8 +5,10 @@ * DEBUG: section 47 Store Directory Routines */ -#include "IpcIoIOStrategy.h" +#include "config.h" #include "IpcIoFile.h" +#include "IpcIoIOStrategy.h" + bool IpcIoIOStrategy::shedLoad() { diff --git a/src/DiskIO/Mmapped/DiskIOMmapped.cc b/src/DiskIO/Mmapped/DiskIOMmapped.cc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/DiskIO/Mmapped/MmappedFile.cc b/src/DiskIO/Mmapped/MmappedFile.cc index 665fc8fb26..9facd43ef3 100644 --- a/src/DiskIO/Mmapped/MmappedFile.cc +++ b/src/DiskIO/Mmapped/MmappedFile.cc @@ -4,16 +4,18 @@ * DEBUG: section 47 Store Directory Routines */ -#include "DiskIO/Mmapped/MmappedFile.h" -#include +#include "config.h" #include "DiskIO/IORequestor.h" +#include "DiskIO/Mmapped/MmappedFile.h" #include "DiskIO/ReadRequest.h" #include "DiskIO/WriteRequest.h" +#include CBDATA_CLASS_INIT(MmappedFile); // helper class to deal with mmap(2) offset alignment and other low-level specs -class Mmapping { +class Mmapping +{ public: Mmapping(int fd, size_t length, int prot, int flags, off_t offset); ~Mmapping(); @@ -51,7 +53,7 @@ MmappedFile::operator delete(void *address) } MmappedFile::MmappedFile(char const *aPath): fd(-1), - minOffset(0), maxOffset(-1), error_(false) + minOffset(0), maxOffset(-1), error_(false) { assert(aPath); path_ = xstrdup(aPath); @@ -77,7 +79,7 @@ MmappedFile::open(int flags, mode_t mode, RefCount callback) if (fd < 0) { debugs(79,3, HERE << "open error: " << xstrerror()); error_ = true; - } else { + } else { store_open_disk_fd++; debugs(79,3, HERE << "FD " << fd); @@ -85,7 +87,7 @@ MmappedFile::open(int flags, mode_t mode, RefCount callback) struct stat sb; if (fstat(fd, &sb) == 0) maxOffset = sb.st_size; // we do not expect it to change - } + } callback->ioCompletedNotification(); } @@ -107,7 +109,7 @@ void MmappedFile::doClose() file_close(fd); fd = -1; store_open_disk_fd--; - } + } } void @@ -141,7 +143,7 @@ void MmappedFile::read(ReadRequest *aRequest) { debugs(79,3, HERE << "(FD " << fd << ", " << aRequest->len << ", " << - aRequest->offset << ")"); + aRequest->offset << ")"); assert(fd >= 0); assert(ioRequestor != NULL); @@ -154,17 +156,17 @@ MmappedFile::read(ReadRequest *aRequest) assert(maxOffset < 0 || static_cast(aRequest->offset + aRequest->len) <= static_cast(maxOffset)); Mmapping mapping(fd, aRequest->len, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, - aRequest->offset); + aRequest->offset); bool done = false; - if (void *buf = mapping.map()) { + if (void *buf = mapping.map()) { memcpy(aRequest->buf, buf, aRequest->len); done = mapping.unmap(); - } + } error_ = !done; const ssize_t rlen = error_ ? -1 : (ssize_t)aRequest->len; - const int errflag = error_ ? DISK_ERROR : DISK_OK; + const int errflag = error_ ? DISK_ERROR :DISK_OK; ioRequestor->readCompleted(aRequest->buf, rlen, errflag, aRequest); } @@ -172,7 +174,7 @@ void MmappedFile::write(WriteRequest *aRequest) { debugs(79,3, HERE << "(FD " << fd << ", " << aRequest->len << ", " << - aRequest->offset << ")"); + aRequest->offset << ")"); assert(fd >= 0); assert(ioRequestor != NULL); @@ -189,8 +191,7 @@ MmappedFile::write(WriteRequest *aRequest) if (written < 0) { debugs(79,1, HERE << "error: " << xstrerr(errno)); error_ = true; - } else - if (static_cast(written) != aRequest->len) { + } else if (static_cast(written) != aRequest->len) { debugs(79,1, HERE << "problem: " << written << " < " << aRequest->len); error_ = true; } @@ -202,10 +203,10 @@ MmappedFile::write(WriteRequest *aRequest) debugs(79,5, HERE << "wrote " << aRequest->len << " to FD " << fd << " at " << aRequest->offset); } else { doClose(); - } + } const ssize_t rlen = error_ ? 0 : (ssize_t)aRequest->len; - const int errflag = error_ ? DISK_ERROR : DISK_OK; + const int errflag = error_ ? DISK_ERROR :DISK_OK; ioRequestor->writeCompleted(errflag, rlen, aRequest); } @@ -217,8 +218,8 @@ MmappedFile::ioInProgress() const } Mmapping::Mmapping(int aFd, size_t aLength, int aProt, int aFlags, off_t anOffset): - fd(aFd), length(aLength), prot(aProt), flags(aFlags), offset(anOffset), - delta(-1), buf(NULL) + fd(aFd), length(aLength), prot(aProt), flags(aFlags), offset(anOffset), + delta(-1), buf(NULL) { } @@ -240,7 +241,7 @@ Mmapping::map() if (buf == MAP_FAILED) { const int errNo = errno; debugs(79,3, HERE << "error FD " << fd << "mmap(" << length << '+' << - delta << ", " << offset << '-' << delta << "): " << xstrerr(errNo)); + delta << ", " << offset << '-' << delta << "): " << xstrerr(errNo)); buf = NULL; return NULL; } @@ -252,7 +253,7 @@ bool Mmapping::unmap() { debugs(79,9, HERE << "FD " << fd << - " munmap(" << buf << ", " << length << '+' << delta << ')'); + " munmap(" << buf << ", " << length << '+' << delta << ')'); if (!buf) // forgot or failed to map return false; @@ -261,8 +262,8 @@ Mmapping::unmap() if (error) { const int errNo = errno; debugs(79,3, HERE << "error FD " << fd << - " munmap(" << buf << ", " << length << '+' << delta << "): " << - "): " << xstrerr(errNo)); + " munmap(" << buf << ", " << length << '+' << delta << "): " << + "): " << xstrerr(errNo)); } buf = NULL; return !error; diff --git a/src/DiskIO/Mmapped/MmappedIOStrategy.cc b/src/DiskIO/Mmapped/MmappedIOStrategy.cc index 53141b8d82..6f015905b6 100644 --- a/src/DiskIO/Mmapped/MmappedIOStrategy.cc +++ b/src/DiskIO/Mmapped/MmappedIOStrategy.cc @@ -5,8 +5,10 @@ * DEBUG: section 47 Store Directory Routines */ -#include "MmappedIOStrategy.h" +#include "config.h" #include "MmappedFile.h" +#include "MmappedIOStrategy.h" + bool MmappedIOStrategy::shedLoad() { diff --git a/src/MemObject.cc b/src/MemObject.cc index d46f373d70..08e8ead3cb 100644 --- a/src/MemObject.cc +++ b/src/MemObject.cc @@ -264,7 +264,8 @@ MemObject::size() const } int64_t -MemObject::expectedReplySize() const { +MemObject::expectedReplySize() const +{ debugs(20, 7, HERE << "object_sz: " << object_sz); if (object_sz >= 0) // complete() has been called; we know the exact answer return object_sz; diff --git a/src/MemStore.cc b/src/MemStore.cc index c4109ea931..70f0994667 100644 --- a/src/MemStore.cc +++ b/src/MemStore.cc @@ -28,7 +28,8 @@ MemStore::~MemStore() } void -MemStore::init() { +MemStore::init() +{ const int64_t entryLimit = EntryLimit(); if (entryLimit <= 0) return; // no memory cache configured or a misconfiguration @@ -62,15 +63,15 @@ MemStore::stat(StoreEntry &e) const storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); if (limit > 0) { storeAppendPrintf(&e, "Current entries: %"PRId64" %.2f%%\n", - currentCount(), (100.0 * currentCount() / limit)); + currentCount(), (100.0 * currentCount() / limit)); if (limit < 100) { // XXX: otherwise too expensive to count Ipc::ReadWriteLockStats stats; map->updateStats(stats); stats.dump(e); - } - } - } + } + } + } } void @@ -198,7 +199,7 @@ MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras) const Ipc::Mem::PageId &page = extras.page; StoreIOBuffer sourceBuf(extras.storedSize, 0, - static_cast(PagePointer(page))); + static_cast(PagePointer(page))); // XXX: We do not know the URLs yet, only the key, but we need to parse and // store the response for the Root().get() callers to be happy because they @@ -300,7 +301,7 @@ MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras) StoreIOBuffer sharedSpace(bufSize, 0, static_cast(PagePointer(page))); - + // check that we kept everything or purge incomplete/sparse cached entry const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace); if (eSize != copied) { @@ -362,14 +363,12 @@ void MemStoreRr::run(const RunnerRegistry &) // decide whether to use a shared memory cache if the user did not specify if (!Config.memShared.configured()) { Config.memShared.configure(AtomicOperationsSupported && - Ipc::Mem::Segment::Enabled() && UsingSmp() && - Config.memMaxSize > 0); - } else - if (Config.memShared && !AtomicOperationsSupported) { + Ipc::Mem::Segment::Enabled() && UsingSmp() && + Config.memMaxSize > 0); + } else if (Config.memShared && !AtomicOperationsSupported) { // bail if the user wants shared memory cache but we cannot support it fatal("memory_cache_shared is on, but no support for atomic operations detected"); - } else - if (Config.memShared && !Ipc::Mem::Segment::Enabled()) { + } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) { fatal("memory_cache_shared is on, but no support for shared memory detected"); } diff --git a/src/MemStore.h b/src/MemStore.h index 7f803d7908..c896c54790 100644 --- a/src/MemStore.h +++ b/src/MemStore.h @@ -18,7 +18,8 @@ typedef Ipc::StoreMapWithExtras MemStoreMap; /// Stores HTTP entities in RAM. Current implementation uses shared memory. /// Unlike a disk store (SwapDir), operations are synchronous (and fast). -class MemStore: public Store, public Ipc::StoreMapCleaner { +class MemStore: public Store, public Ipc::StoreMapCleaner +{ public: MemStore(); virtual ~MemStore(); diff --git a/src/StoreIOState.h b/src/StoreIOState.h index f33bf4fa63..9576cfd28c 100644 --- a/src/StoreIOState.h +++ b/src/StoreIOState.h @@ -86,9 +86,9 @@ public: virtual void write(char const *buf, size_t size, off_t offset, FREE * free_func) = 0; typedef enum { - wroteAll, ///< success: caller supplied all data it wanted to swap out - writerGone, ///< failure: caller left before swapping out everything - readerDone ///< success or failure: either way, stop swapping in + wroteAll, ///< success: caller supplied all data it wanted to swap out + writerGone, ///< failure: caller left before swapping out everything + readerDone ///< success or failure: either way, stop swapping in } CloseHow; virtual void close(int how) = 0; ///< finish or abort swapping per CloseHow diff --git a/src/base/RunnersRegistry.h b/src/base/RunnersRegistry.h index f5c7ac645c..1d8adfc6b8 100644 --- a/src/base/RunnersRegistry.h +++ b/src/base/RunnersRegistry.h @@ -10,13 +10,13 @@ * squid.conf and deactivate them before exiting. * * A module in this context is code providing a functionality or service to the - * rest of Squid, such as src/DiskIO/Blocking, src/fs/ufs, or Cache Manager. A + * rest of Squid, such as src/DiskIO/Blocking, src/fs/ufs, or Cache Manager. A * module must declare a RegisteredRunner child class to implement activation and * deactivation logic using the run() method and destructor, respectively. * * This API allows the registry to determine the right [de]activation time for * each group of similar modules, without knowing any module specifics. - * + * */ /// well-known registries (currently, deactivation is not performed for these) @@ -26,7 +26,8 @@ typedef enum { } RunnerRegistry; /// a runnable registrant API -class RegisteredRunner { +class RegisteredRunner +{ public: // called when this runner's registry is deactivated virtual ~RegisteredRunner() {} diff --git a/src/fs/Makefile.am b/src/fs/Makefile.am index 853408c7c7..d291e006ea 100644 --- a/src/fs/Makefile.am +++ b/src/fs/Makefile.am @@ -29,9 +29,6 @@ libufs_la_SOURCES = \ ufs/ufscommon.h librock_la_SOURCES = \ - rock/RockCommon.cc \ - rock/RockCommon.h \ - rock/RockFile.cc \ rock/RockFile.h \ rock/RockIoState.cc \ rock/RockIoState.h \ diff --git a/src/fs/coss/store_dir_coss.cc b/src/fs/coss/store_dir_coss.cc index a3a426adfa..4e4ad97f6e 100644 --- a/src/fs/coss/store_dir_coss.cc +++ b/src/fs/coss/store_dir_coss.cc @@ -473,15 +473,15 @@ storeCossRebuildFromSwapLog(void *data) rb->counts.objcount++; e = rb->sd->addDiskRestore(s.key, - s.swap_filen, - s.swap_file_sz, - s.expires, - s.timestamp, - s.lastref, - s.lastmod, - s.refcount, - s.flags, - (int) rb->flags.clean); + s.swap_filen, + s.swap_file_sz, + s.expires, + s.timestamp, + s.lastref, + s.lastmod, + s.refcount, + s.flags, + (int) rb->flags.clean); storeDirSwapLog(e, SWAP_LOG_ADD); } @@ -493,15 +493,15 @@ storeCossRebuildFromSwapLog(void *data) * use to rebuild store from disk. */ StoreEntry * CossSwapDir::addDiskRestore(const cache_key *const key, - int file_number, - uint64_t swap_file_sz, - time_t expires, - time_t timestamp, - time_t lastref, - time_t lastmod, - uint32_t refcount, - uint16_t flags, - int clean) + int file_number, + uint64_t swap_file_sz, + time_t expires, + time_t timestamp, + time_t lastref, + time_t lastmod, + uint32_t refcount, + uint16_t flags, + int clean) { StoreEntry *e = NULL; debugs(47, 5, "storeCossAddDiskRestore: " << storeKeyText(key) << diff --git a/src/fs/rock/RockCommon.cc b/src/fs/rock/RockCommon.cc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/fs/rock/RockCommon.h b/src/fs/rock/RockCommon.h deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/fs/rock/RockFile.h b/src/fs/rock/RockFile.h index 097dbcbe0b..0f8dfac1eb 100644 --- a/src/fs/rock/RockFile.h +++ b/src/fs/rock/RockFile.h @@ -3,7 +3,8 @@ // XXX: rename to fs/rock/RockDbCell.{cc,h} -namespace Rock { +namespace Rock +{ /// \ingroup Rock /// meta-information at the beginning of every db cell diff --git a/src/fs/rock/RockIoRequests.cc b/src/fs/rock/RockIoRequests.cc index 0754e3e980..c4689252f1 100644 --- a/src/fs/rock/RockIoRequests.cc +++ b/src/fs/rock/RockIoRequests.cc @@ -4,21 +4,22 @@ * DEBUG: section 79 Disk IO Routines */ +#include "config.h" #include "fs/rock/RockIoRequests.h" CBDATA_NAMESPACED_CLASS_INIT(Rock, ReadRequest); CBDATA_NAMESPACED_CLASS_INIT(Rock, WriteRequest); Rock::ReadRequest::ReadRequest(const ::ReadRequest &base, - const IoState::Pointer &anSio): - ::ReadRequest(base), - sio(anSio) + const IoState::Pointer &anSio): + ::ReadRequest(base), + sio(anSio) { } Rock::WriteRequest::WriteRequest(const ::WriteRequest &base, - const IoState::Pointer &anSio): - ::WriteRequest(base), - sio(anSio) + const IoState::Pointer &anSio): + ::WriteRequest(base), + sio(anSio) { } diff --git a/src/fs/rock/RockIoRequests.h b/src/fs/rock/RockIoRequests.h index e6914e8692..0fd0e8b7c8 100644 --- a/src/fs/rock/RockIoRequests.h +++ b/src/fs/rock/RockIoRequests.h @@ -7,7 +7,8 @@ class DiskFile; -namespace Rock { +namespace Rock +{ /// \ingroup Rock class ReadRequest: public ::ReadRequest diff --git a/src/fs/rock/RockIoState.cc b/src/fs/rock/RockIoState.cc index dbb60f74b0..e73c6a6878 100644 --- a/src/fs/rock/RockIoState.cc +++ b/src/fs/rock/RockIoState.cc @@ -15,13 +15,13 @@ #include "fs/rock/RockSwapDir.h" Rock::IoState::IoState(SwapDir *dir, - StoreEntry *anEntry, - StoreIOState::STFNCB *cbFile, - StoreIOState::STIOCB *cbIo, - void *data): - slotSize(0), - diskOffset(-1), - payloadEnd(-1) + StoreEntry *anEntry, + StoreIOState::STFNCB *cbFile, + StoreIOState::STIOCB *cbIo, + void *data): + slotSize(0), + diskOffset(-1), + payloadEnd(-1) { e = anEntry; // swap_filen, swap_dirn, diskOffset, and payloadEnd are set by the caller @@ -58,7 +58,7 @@ Rock::IoState::read_(char *buf, size_t len, off_t coreOff, STRCB *cb, void *data // we skip our cell header; it is only read when building the map const int64_t cellOffset = sizeof(DbCellHeader) + - static_cast(coreOff); + static_cast(coreOff); assert(cellOffset <= payloadEnd); // Core specifies buffer length, but we must not exceed stored entry size @@ -71,7 +71,7 @@ Rock::IoState::read_(char *buf, size_t len, off_t coreOff, STRCB *cb, void *data read.callback_data = cbdataReference(data); theFile->read(new ReadRequest( - ::ReadRequest(buf, diskOffset + cellOffset, len), this)); + ::ReadRequest(buf, diskOffset + cellOffset, len), this)); } // We only buffer data here; we actually write when close() is called. @@ -114,15 +114,15 @@ Rock::IoState::startWriting() // to avoid triggering read-page;new_head+old_tail;write-page overheads debugs(79, 5, HERE << swap_filen << " at " << diskOffset << '+' << - theBuf.contentSize()); + theBuf.contentSize()); assert(theBuf.contentSize() <= slotSize); // theFile->write may call writeCompleted immediatelly theFile->write(new WriteRequest(::WriteRequest(theBuf.content(), - diskOffset, theBuf.contentSize(), theBuf.freeFunc()), this)); + diskOffset, theBuf.contentSize(), theBuf.freeFunc()), this)); } -// +// void Rock::IoState::finishedWriting(const int errFlag) { @@ -134,33 +134,33 @@ void Rock::IoState::close(int how) { debugs(79, 3, HERE << swap_filen << " accumulated: " << offset_ << - " how=" << how); + " how=" << how); if (how == wroteAll && !theBuf.isNull()) startWriting(); else callBack(how == writerGone ? DISK_ERROR : 0); // TODO: add DISK_CALLER_GONE } -/// close callback (STIOCB) dialer: breaks dependencies and +/// close callback (STIOCB) dialer: breaks dependencies and /// counts IOState concurrency level -class StoreIOStateCb: public CallDialer +class StoreIOStateCb: public CallDialer { public: StoreIOStateCb(StoreIOState::STIOCB *cb, void *data, int err, const Rock::IoState::Pointer &anSio): - callback(NULL), - callback_data(NULL), - errflag(err), - sio(anSio) { + callback(NULL), + callback_data(NULL), + errflag(err), + sio(anSio) { callback = cb; callback_data = cbdataReference(data); } StoreIOStateCb(const StoreIOStateCb &cb): - callback(NULL), - callback_data(NULL), - errflag(cb.errflag), - sio(cb.sio) { + callback(NULL), + callback_data(NULL), + errflag(cb.errflag), + sio(cb.sio) { callback = cb.callback; callback_data = cbdataReference(cb.callback_data); @@ -201,7 +201,7 @@ Rock::IoState::callBack(int errflag) theFile = NULL; AsyncCall::Pointer call = asyncCall(79,3, "SomeIoStateCloseCb", - StoreIOStateCb(callback, callback_data, errflag, this)); + StoreIOStateCb(callback, callback_data, errflag, this)); ScheduleCallHere(call); callback = NULL; diff --git a/src/fs/rock/RockIoState.h b/src/fs/rock/RockIoState.h index 33adf631ae..11b3ddd628 100644 --- a/src/fs/rock/RockIoState.h +++ b/src/fs/rock/RockIoState.h @@ -6,7 +6,8 @@ class DiskFile; -namespace Rock { +namespace Rock +{ class SwapDir; diff --git a/src/fs/rock/RockRebuild.cc b/src/fs/rock/RockRebuild.cc index ff4c5b84bc..c0f43ba75f 100644 --- a/src/fs/rock/RockRebuild.cc +++ b/src/fs/rock/RockRebuild.cc @@ -13,13 +13,13 @@ CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild); Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"), - sd(dir), - dbSize(0), - dbEntrySize(0), - dbEntryLimit(0), - fd(-1), - dbOffset(0), - filen(0) + sd(dir), + dbSize(0), + dbEntrySize(0), + dbEntryLimit(0), + fd(-1), + dbOffset(0), + filen(0) { assert(sd); memset(&counts, 0, sizeof(counts)); @@ -36,11 +36,12 @@ Rock::Rebuild::~Rebuild() /// prepares and initiates entry loading sequence void -Rock::Rebuild::start() { +Rock::Rebuild::start() +{ // in SMP mode, only the disker is responsible for populating the map if (UsingSmp() && !IamDiskProcess()) { debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" << - sd->index << " from " << sd->filePath); + sd->index << " from " << sd->filePath); mustStop("non-disker"); return; } @@ -57,7 +58,7 @@ Rock::Rebuild::start() { failure("cannot read db header", errno); dbOffset = SwapDir::HeaderSize; - filen = 0; + filen = 0; checkpoint(); } @@ -84,12 +85,13 @@ Rock::Rebuild::Steps(void *data) } void -Rock::Rebuild::steps() { +Rock::Rebuild::steps() +{ debugs(47,5, HERE << sd->index << " filen " << filen << " at " << - dbOffset << " <= " << dbSize); + dbOffset << " <= " << dbSize); // Balance our desire to maximize the number of entries processed at once - // (and, hence, minimize overheads and total rebuild time) with a + // (and, hence, minimize overheads and total rebuild time) with a // requirement to also process Coordinator events, disk I/Os, etc. const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms const timeval loopStart = current_time; @@ -111,7 +113,7 @@ Rock::Rebuild::steps() { const double elapsedMsec = tvSubMsec(loopStart, current_time); if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) { debugs(47, 5, HERE << "pausing after " << loaded << " entries in " << - elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry"); + elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry"); break; } } @@ -120,9 +122,10 @@ Rock::Rebuild::steps() { } void -Rock::Rebuild::doOneEntry() { +Rock::Rebuild::doOneEntry() +{ debugs(47,5, HERE << sd->index << " filen " << filen << " at " << - dbOffset << " <= " << dbSize); + dbOffset << " <= " << dbSize); ++counts.scancount; @@ -139,7 +142,7 @@ Rock::Rebuild::doOneEntry() { DbCellHeader header; if (buf.contentSize() < static_cast(sizeof(header))) { debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " << - "Ignoring truncated cache entry meta data at " << dbOffset); + "Ignoring truncated cache entry meta data at " << dbOffset); counts.invalid++; return; } @@ -147,7 +150,7 @@ Rock::Rebuild::doOneEntry() { if (!header.sane()) { debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " << - "Ignoring malformed cache entry meta data at " << dbOffset); + "Ignoring malformed cache entry meta data at " << dbOffset); counts.invalid++; return; } @@ -162,7 +165,7 @@ Rock::Rebuild::doOneEntry() { //sd->unlink(filen); leave garbage on disk, it should not hurt } return; - } + } assert(loadedE.swap_filen < dbEntryLimit); if (!storeRebuildKeepEntry(loadedE, key, counts)) @@ -175,17 +178,19 @@ Rock::Rebuild::doOneEntry() { } void -Rock::Rebuild::swanSong() { +Rock::Rebuild::swanSong() +{ debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " << - StoreController::store_dirs_rebuilding); + StoreController::store_dirs_rebuilding); --StoreController::store_dirs_rebuilding; storeRebuildComplete(&counts); } void -Rock::Rebuild::failure(const char *msg, int errNo) { +Rock::Rebuild::failure(const char *msg, int errNo) +{ debugs(47,5, HERE << sd->index << " filen " << filen << " at " << - dbOffset << " <= " << dbSize); + dbOffset << " <= " << dbSize); if (errNo) debugs(47,0, "Rock cache_dir rebuild failure: " << xstrerr(errNo)); @@ -193,5 +198,5 @@ Rock::Rebuild::failure(const char *msg, int errNo) { assert(sd); fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.", - sd->index, sd->filePath, msg); + sd->index, sd->filePath, msg); } diff --git a/src/fs/rock/RockRebuild.h b/src/fs/rock/RockRebuild.h index 0d0c556675..20d025156a 100644 --- a/src/fs/rock/RockRebuild.h +++ b/src/fs/rock/RockRebuild.h @@ -1,17 +1,18 @@ #ifndef SQUID_FS_ROCK_REBUILD_H #define SQUID_FS_ROCK_REBUILD_H -#include "config.h" #include "base/AsyncJob.h" #include "structs.h" -namespace Rock { +namespace Rock +{ class SwapDir; /// \ingroup Rock /// manages store rebuild process: loading meta information from db on disk -class Rebuild: public AsyncJob { +class Rebuild: public AsyncJob +{ public: Rebuild(SwapDir *dir); ~Rebuild(); diff --git a/src/fs/rock/RockStoreFileSystem.cc b/src/fs/rock/RockStoreFileSystem.cc index 35685a1205..9f7ef6b77f 100644 --- a/src/fs/rock/RockStoreFileSystem.cc +++ b/src/fs/rock/RockStoreFileSystem.cc @@ -4,6 +4,7 @@ * DEBUG: section 92 Storage File System */ +#include "config.h" #include "fs/rock/RockStoreFileSystem.h" #include "fs/rock/RockSwapDir.h" diff --git a/src/fs/rock/RockStoreFileSystem.h b/src/fs/rock/RockStoreFileSystem.h index cba06100ff..09931909df 100644 --- a/src/fs/rock/RockStoreFileSystem.h +++ b/src/fs/rock/RockStoreFileSystem.h @@ -3,7 +3,8 @@ #include "StoreFileSystem.h" -namespace Rock { +namespace Rock +{ /// \ingroup Rock, FileSystems class StoreFileSystem: public ::StoreFileSystem diff --git a/src/fs/rock/RockSwapDir.cc b/src/fs/rock/RockSwapDir.cc index 4de154b5d7..6277da17bc 100644 --- a/src/fs/rock/RockSwapDir.cc +++ b/src/fs/rock/RockSwapDir.cc @@ -36,7 +36,8 @@ Rock::SwapDir::~SwapDir() StoreSearch * Rock::SwapDir::search(String const url, HttpRequest *) { - assert(false); return NULL; // XXX: implement + assert(false); + return NULL; // XXX: implement } // called when Squid core needs a StoreEntry with a given key @@ -155,10 +156,10 @@ Rock::SwapDir::create() #endif if (res != 0) { debugs(47, DBG_CRITICAL, "Failed to create Rock db dir " << path << - ": " << xstrerror()); + ": " << xstrerror()); fatal("Rock Store db creation error"); - } - } + } + } #if SLOWLY_FILL_WITH_ZEROS /* TODO just set the file size */ @@ -170,22 +171,22 @@ Rock::SwapDir::create() for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) { if (write(swap, block, sizeof(block)) != sizeof(block)) { debugs(47,0, "Failed to create Rock Store db in " << filePath << - ": " << xstrerror()); + ": " << xstrerror()); fatal("Rock Store db creation error"); - } - } + } + } close(swap); #else const int swap = open(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600); if (swap < 0) { debugs(47,0, "Failed to initialize Rock Store db in " << filePath << - "; create error: " << xstrerror()); + "; create error: " << xstrerror()); fatal("Rock Store db creation error"); } if (ftruncate(swap, maxSize()) != 0) { debugs(47,0, "Failed to initialize Rock Store db in " << filePath << - "; truncate error: " << xstrerror()); + "; truncate error: " << xstrerror()); fatal("Rock Store db creation error"); } @@ -193,7 +194,7 @@ Rock::SwapDir::create() memset(header, '\0', sizeof(header)); if (write(swap, header, sizeof(header)) != sizeof(header)) { debugs(47,0, "Failed to initialize Rock Store db in " << filePath << - "; write error: " << xstrerror()); + "; write error: " << xstrerror()); fatal("Rock Store db initialization error"); } close(swap); @@ -310,12 +311,13 @@ Rock::SwapDir::validateOptions() debugs(47, 0, "\tusable db size: " << diskOffsetLimit() << " bytes"); debugs(47, 0, "\tdisk space waste: " << totalWaste << " bytes"); debugs(47, 0, "WARNING: Rock store config wastes space."); - } + } */ } void -Rock::SwapDir::rebuild() { +Rock::SwapDir::rebuild() +{ //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init() AsyncJob::Start(new Rebuild(this)); } @@ -326,8 +328,8 @@ bool Rock::SwapDir::addEntry(const int filen, const DbCellHeader &header, const StoreEntry &from) { debugs(47, 8, HERE << &from << ' ' << from.getMD5Text() << - ", filen="<< std::setfill('0') << std::hex << std::uppercase << - std::setw(8) << filen); + ", filen="<< std::setfill('0') << std::hex << std::uppercase << + std::setw(8) << filen); sfileno newLocation = 0; if (Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast(from.key), newLocation)) { @@ -409,8 +411,8 @@ Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreI sio->diskOffset = diskOffset(sio->swap_filen); debugs(47,5, HERE << "dir " << index << " created new filen " << - std::setfill('0') << std::hex << std::uppercase << std::setw(8) << - sio->swap_filen << std::dec << " at " << sio->diskOffset); + std::setfill('0') << std::hex << std::uppercase << std::setw(8) << + sio->swap_filen << std::dec << " at " << sio->diskOffset); assert(sio->diskOffset + payloadEnd <= diskOffsetLimit()); @@ -443,7 +445,7 @@ Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreIOS return NULL; } - if (e.swap_filen < 0) { + if (e.swap_filen < 0) { debugs(47,4, HERE << e); return NULL; } @@ -470,8 +472,8 @@ Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreIOS assert(sio->payloadEnd <= max_objsize); // the payload fits the slot debugs(47,5, HERE << "dir " << index << " has old filen: " << - std::setfill('0') << std::hex << std::uppercase << std::setw(8) << - sio->swap_filen); + std::setfill('0') << std::hex << std::uppercase << std::setw(8) << + sio->swap_filen); assert(slot->basics.swap_file_sz > 0); assert(slot->basics.swap_file_sz == e.swap_file_sz); @@ -494,8 +496,8 @@ Rock::SwapDir::ioCompletedNotification() xstrerror()); debugs(47, 2, "Rock cache_dir[" << index << "] limits: " << - std::setw(12) << maxSize() << " disk bytes and " << - std::setw(7) << map->entryLimit() << " entries"); + std::setw(12) << maxSize() << " disk bytes and " << + std::setw(7) << map->entryLimit() << " entries"); rebuild(); } @@ -557,17 +559,18 @@ Rock::SwapDir::full() const // storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT, // but it should not happen for us void -Rock::SwapDir::diskFull() { +Rock::SwapDir::diskFull() +{ debugs(20, DBG_IMPORTANT, "Internal ERROR: No space left with " << - "rock cache_dir: " << filePath); + "rock cache_dir: " << filePath); } /// purge while full(); it should be sufficient to purge just one void Rock::SwapDir::maintain() { - debugs(47,3, HERE << "cache_dir[" << index << "] guards: " << - !repl << !map << !full() << StoreController::store_dirs_rebuilding); + debugs(47,3, HERE << "cache_dir[" << index << "] guards: " << + !repl << !map << !full() << StoreController::store_dirs_rebuilding); if (!repl) return; // no means (cannot find a victim) @@ -596,19 +599,19 @@ Rock::SwapDir::maintain() for (; freed < maxFreed && full(); ++freed) { if (StoreEntry *e = walker->Next(walker)) e->release(); // will call our unlink() method - else + else break; // no more objects - } + } debugs(47,2, HERE << "Rock cache_dir[" << index << "] freed " << freed << - " scanned " << walker->scanned << '/' << walker->locked); + " scanned " << walker->scanned << '/' << walker->locked); walker->Done(walker); if (full()) { debugs(47,0, "ERROR: Rock cache_dir[" << index << "] " << - "is still full after freeing " << freed << " entries. A bug?"); - } + "is still full after freeing " << freed << " entries. A bug?"); + } } void @@ -671,7 +674,7 @@ Rock::SwapDir::statfs(StoreEntry &e) const if (limit > 0) { const int entryCount = map->entryCount(); storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n", - entryCount, (100.0 * entryCount / limit)); + entryCount, (100.0 * entryCount / limit)); if (limit < 100) { // XXX: otherwise too expensive to count Ipc::ReadWriteLockStats stats; @@ -679,10 +682,10 @@ Rock::SwapDir::statfs(StoreEntry &e) const stats.dump(e); } } - } + } storeAppendPrintf(&e, "Pending operations: %d out of %d\n", - store_open_disk_fd, Config.max_open_disk_fds); + store_open_disk_fd, Config.max_open_disk_fds); storeAppendPrintf(&e, "Flags:"); diff --git a/src/fs/rock/RockSwapDir.h b/src/fs/rock/RockSwapDir.h index 095dee5e12..30a33d5e02 100644 --- a/src/fs/rock/RockSwapDir.h +++ b/src/fs/rock/RockSwapDir.h @@ -11,7 +11,8 @@ class DiskFile; class ReadRequest; class WriteRequest; -namespace Rock { +namespace Rock +{ class Rebuild; diff --git a/src/fs/ufs/ufscommon.cc b/src/fs/ufs/ufscommon.cc index 6a5634ba4c..0adb4b18e6 100644 --- a/src/fs/ufs/ufscommon.cc +++ b/src/fs/ufs/ufscommon.cc @@ -405,7 +405,7 @@ RebuildState::rebuildFromDirectory() StoreEntry tmpe; const bool loaded = storeRebuildParseEntry(buf, tmpe, key, counts, - (int64_t)sb.st_size); + (int64_t)sb.st_size); file_close(fd); store_open_disk_fd--; diff --git a/src/ipc/AtomicWord.h b/src/ipc/AtomicWord.h index 815fcbb2f0..04f42fffb8 100644 --- a/src/ipc/AtomicWord.h +++ b/src/ipc/AtomicWord.h @@ -10,7 +10,8 @@ /// Supplies atomic operations for an integral Value in memory shared by kids. /// Used to implement non-blocking shared locks, queues, tables, and pools. template -class AtomicWordT { +class AtomicWordT +{ public: AtomicWordT() {} // leave value unchanged AtomicWordT(Value aValue): value(aValue) {} // XXX: unsafe @@ -40,10 +41,11 @@ private: enum { AtomicOperationsSupported = 1 }; #else -/// A wrapper to provide AtomicWordT API (and asserting implementation) +/// A wrapper to provide AtomicWordT API (and asserting implementation) /// where we do not support atomic operations. This avoids ifdefs in core code. template -class AtomicWordT { +class AtomicWordT +{ public: AtomicWordT() {} // leave value unchanged AtomicWordT(Value aValue): value(aValue) {} // XXX: unsafe @@ -55,11 +57,11 @@ public: Value operator --(int) { assert(false); return *this; } bool swap_if(const int comparand, const int replacement) - { assert(false); return false; } + { assert(false); return false; } /// v1 = value; value &= v2; return v1; Value fetchAndAnd(const Value v2) - { assert(false); return value; } + { assert(false); return value; } // TODO: no need for __sync_bool_compare_and_swap here? bool operator ==(int v2) { assert(false); return false; } diff --git a/src/ipc/Coordinator.cc b/src/ipc/Coordinator.cc index 35231c02d0..6379f4b59e 100644 --- a/src/ipc/Coordinator.cc +++ b/src/ipc/Coordinator.cc @@ -199,7 +199,7 @@ Ipc::Coordinator::handleSearchRequest(const Ipc::StrandSearchRequest &request) searchers.push_back(request); debugs(54, 3, HERE << "cannot yet tell kid" << request.requestorId << - " who " << request.tag << " is"); + " who " << request.tag << " is"); } void @@ -207,7 +207,7 @@ Ipc::Coordinator::notifySearcher(const Ipc::StrandSearchRequest &request, const StrandCoord& strand) { debugs(54, 3, HERE << "tell kid" << request.requestorId << " that " << - request.tag << " is kid" << strand.kidId); + request.tag << " is kid" << strand.kidId); const StrandSearchResponse response(strand); TypedMsgHdr message; response.pack(message); diff --git a/src/ipc/Kid.h b/src/ipc/Kid.h index 7543c7952a..a6d6e5f9f2 100644 --- a/src/ipc/Kid.h +++ b/src/ipc/Kid.h @@ -88,7 +88,7 @@ private: // TODO: processes may not be kids; is there a better place to put this? -/// process kinds +/// process kinds typedef enum { pkOther = 0, ///< we do not know or do not care pkCoordinator = 1, ///< manages all other kids diff --git a/src/ipc/Queue.cc b/src/ipc/Queue.cc index 41e240674e..12eb5e6b2f 100644 --- a/src/ipc/Queue.cc +++ b/src/ipc/Queue.cc @@ -69,8 +69,8 @@ Ipc::QueueReaders::SharedMemorySize(const int capacity) // OneToOneUniQueue Ipc::OneToOneUniQueue::OneToOneUniQueue(const unsigned int aMaxItemSize, const int aCapacity): - theIn(0), theOut(0), theSize(0), theMaxItemSize(aMaxItemSize), - theCapacity(aCapacity) + theIn(0), theOut(0), theSize(0), theMaxItemSize(aMaxItemSize), + theCapacity(aCapacity) { Must(theMaxItemSize > 0); Must(theCapacity > 0); @@ -135,11 +135,11 @@ Ipc::FewToFewBiQueue::Init(const String &id, const int groupASize, const int gro } Ipc::FewToFewBiQueue::FewToFewBiQueue(const String &id, const Group aLocalGroup, const int aLocalProcessId): - metadata(shm_old(Metadata)(MetadataId(id).termedBuf())), - queues(shm_old(OneToOneUniQueues)(QueuesId(id).termedBuf())), - readers(shm_old(QueueReaders)(ReadersId(id).termedBuf())), - theLocalGroup(aLocalGroup), theLocalProcessId(aLocalProcessId), - theLastPopProcessId(readers->theCapacity) + metadata(shm_old(Metadata)(MetadataId(id).termedBuf())), + queues(shm_old(OneToOneUniQueues)(QueuesId(id).termedBuf())), + readers(shm_old(QueueReaders)(ReadersId(id).termedBuf())), + theLocalGroup(aLocalGroup), theLocalProcessId(aLocalProcessId), + theLastPopProcessId(readers->theCapacity) { Must(queues->theCapacity == metadata->theGroupASize * metadata->theGroupBSize * 2); Must(readers->theCapacity == metadata->theGroupASize + metadata->theGroupBSize); @@ -154,10 +154,10 @@ Ipc::FewToFewBiQueue::validProcessId(const Group group, const int processId) con switch (group) { case groupA: return metadata->theGroupAIdOffset <= processId && - processId < metadata->theGroupAIdOffset + metadata->theGroupASize; + processId < metadata->theGroupAIdOffset + metadata->theGroupASize; case groupB: return metadata->theGroupBIdOffset <= processId && - processId < metadata->theGroupBIdOffset + metadata->theGroupBSize; + processId < metadata->theGroupBIdOffset + metadata->theGroupBSize; } return false; } @@ -201,8 +201,8 @@ Ipc::FewToFewBiQueue::reader(const Group group, const int processId) { Must(validProcessId(group, processId)); const int index = group == groupA ? - processId - metadata->theGroupAIdOffset : - metadata->theGroupASize + processId - metadata->theGroupBIdOffset; + processId - metadata->theGroupAIdOffset : + metadata->theGroupASize + processId - metadata->theGroupBIdOffset; return readers->theReaders[index]; } @@ -222,17 +222,17 @@ Ipc::FewToFewBiQueue::clearReaderSignal(const int remoteProcessId) } Ipc::FewToFewBiQueue::Metadata::Metadata(const int aGroupASize, const int aGroupAIdOffset, const int aGroupBSize, const int aGroupBIdOffset): - theGroupASize(aGroupASize), theGroupAIdOffset(aGroupAIdOffset), - theGroupBSize(aGroupBSize), theGroupBIdOffset(aGroupBIdOffset) + theGroupASize(aGroupASize), theGroupAIdOffset(aGroupAIdOffset), + theGroupBSize(aGroupBSize), theGroupBIdOffset(aGroupBIdOffset) { Must(theGroupASize > 0); Must(theGroupBSize > 0); } Ipc::FewToFewBiQueue::Owner::Owner(const String &id, const int groupASize, const int groupAIdOffset, const int groupBSize, const int groupBIdOffset, const unsigned int maxItemSize, const int capacity): - metadataOwner(shm_new(Metadata)(MetadataId(id).termedBuf(), groupASize, groupAIdOffset, groupBSize, groupBIdOffset)), - queuesOwner(shm_new(OneToOneUniQueues)(QueuesId(id).termedBuf(), groupASize*groupBSize*2, maxItemSize, capacity)), - readersOwner(shm_new(QueueReaders)(ReadersId(id).termedBuf(), groupASize+groupBSize)) + metadataOwner(shm_new(Metadata)(MetadataId(id).termedBuf(), groupASize, groupAIdOffset, groupBSize, groupBIdOffset)), + queuesOwner(shm_new(OneToOneUniQueues)(QueuesId(id).termedBuf(), groupASize*groupBSize*2, maxItemSize, capacity)), + readersOwner(shm_new(QueueReaders)(ReadersId(id).termedBuf(), groupASize+groupBSize)) { } diff --git a/src/ipc/Queue.h b/src/ipc/Queue.h index c211031fec..225d1de00c 100644 --- a/src/ipc/Queue.h +++ b/src/ipc/Queue.h @@ -15,11 +15,13 @@ class String; -namespace Ipc { +namespace Ipc +{ /// State of the reading end of a queue (i.e., of the code calling pop()). /// Multiple queues attached to one reader share this state. -class QueueReader { +class QueueReader +{ public: QueueReader(); // the initial state is "blocked without a signal" @@ -49,7 +51,8 @@ public: }; /// shared array of QueueReaders -class QueueReaders { +class QueueReaders +{ public: QueueReaders(const int aCapacity); size_t sharedMemorySize() const; @@ -69,7 +72,8 @@ public: * queue is full, the writer will just not push and come back later (with a * different value). We can add support for blocked writers if needed. */ -class OneToOneUniQueue { +class OneToOneUniQueue +{ public: // pop() and push() exceptions; TODO: use TextException instead class Full {}; @@ -110,7 +114,8 @@ private: }; /// shared array of OneToOneUniQueues -class OneToOneUniQueues { +class OneToOneUniQueues +{ public: OneToOneUniQueues(const int aCapacity, const unsigned int maxItemSize, const int queueCapacity); @@ -135,7 +140,8 @@ public: * communicate. Process in each group has a unique integer ID in * [groupIdOffset, groupIdOffset + groupSize) range. */ -class FewToFewBiQueue { +class FewToFewBiQueue +{ public: typedef OneToOneUniQueue::Full Full; typedef OneToOneUniQueue::ItemTooLarge ItemTooLarge; @@ -154,7 +160,8 @@ private: }; public: - class Owner { + class Owner + { public: Owner(const String &id, const int groupASize, const int groupAIdOffset, const int groupBSize, const int groupBIdOffset, const unsigned int maxItemSize, const int capacity); ~Owner(); diff --git a/src/ipc/ReadWriteLock.cc b/src/ipc/ReadWriteLock.cc index 9fef9c1706..f319264f88 100644 --- a/src/ipc/ReadWriteLock.cc +++ b/src/ipc/ReadWriteLock.cc @@ -71,7 +71,7 @@ Ipc::ReadWriteLockStats::ReadWriteLockStats() { memset(this, 0, sizeof(*this)); } - + void Ipc::ReadWriteLockStats::dump(StoreEntry &e) const { @@ -81,17 +81,17 @@ Ipc::ReadWriteLockStats::dump(StoreEntry &e) const return; storeAppendPrintf(&e, "Reading: %9d %6.2f%%\n", - readable, (100.0 * readable / count)); + readable, (100.0 * readable / count)); storeAppendPrintf(&e, "Writing: %9d %6.2f%%\n", - writeable, (100.0 * writeable / count)); + writeable, (100.0 * writeable / count)); storeAppendPrintf(&e, "Idle: %9d %6.2f%%\n", - idle, (100.0 * idle / count)); + idle, (100.0 * idle / count)); if (readers || writers) { const int locked = readers + writers; storeAppendPrintf(&e, "Readers: %9d %6.2f%%\n", - readers, (100.0 * readers / locked)); + readers, (100.0 * readers / locked)); storeAppendPrintf(&e, "Writers: %9d %6.2f%%\n", - writers, (100.0 * writers / locked)); + writers, (100.0 * writers / locked)); } } diff --git a/src/ipc/ReadWriteLock.h b/src/ipc/ReadWriteLock.h index 91507dee46..4d8f57202c 100644 --- a/src/ipc/ReadWriteLock.h +++ b/src/ipc/ReadWriteLock.h @@ -5,12 +5,14 @@ class StoreEntry; -namespace Ipc { +namespace Ipc +{ class ReadWriteLockStats; /// an atomic readers-writer or shared-exclusive lock suitable for maps/tables -class ReadWriteLock { +class ReadWriteLock +{ public: // default constructor is OK because of shared memory zero-initialization @@ -30,7 +32,8 @@ public: /// approximate stats of a set of ReadWriteLocks -class ReadWriteLockStats { +class ReadWriteLockStats +{ public: ReadWriteLockStats(); diff --git a/src/ipc/StoreMap.cc b/src/ipc/StoreMap.cc index 3fbe6dc7ea..5499decd7d 100644 --- a/src/ipc/StoreMap.cc +++ b/src/ipc/StoreMap.cc @@ -26,7 +26,7 @@ Ipc::StoreMap::Init(const char *const path, const int limit) } Ipc::StoreMap::StoreMap(const char *const aPath): cleaner(NULL), path(aPath), - shared(shm_old(Shared)(aPath)) + shared(shm_old(Shared)(aPath)) { assert(shared->limit > 0); // we should not be created otherwise debugs(54, 5, HERE << "attached map [" << path << "] created: " << @@ -129,7 +129,7 @@ void Ipc::StoreMap::free(const sfileno fileno) { debugs(54, 5, HERE << " marking slot at " << fileno << " to be freed in" - " map [" << path << ']'); + " map [" << path << ']'); assert(valid(fileno)); Slot &s = shared->slots[fileno]; @@ -307,7 +307,7 @@ Ipc::StoreMapSlot::set(const StoreEntry &from) /* Ipc::StoreMap::Shared */ Ipc::StoreMap::Shared::Shared(const int aLimit, const size_t anExtrasSize): - limit(aLimit), extrasSize(anExtrasSize), count(0) + limit(aLimit), extrasSize(anExtrasSize), count(0) { } diff --git a/src/ipc/StoreMap.h b/src/ipc/StoreMap.h index ce17077eea..54138cfad9 100644 --- a/src/ipc/StoreMap.h +++ b/src/ipc/StoreMap.h @@ -5,10 +5,12 @@ #include "ipc/mem/Pointer.h" #include "typedefs.h" -namespace Ipc { +namespace Ipc +{ /// a StoreMap element, holding basic shareable StoreEntry info -class StoreMapSlot { +class StoreMapSlot +{ public: StoreMapSlot(); @@ -33,20 +35,20 @@ public: uint64_t swap_file_sz; uint16_t refcount; uint16_t flags; - } basics; + } basics; /// possible persistent states typedef enum { Empty, ///< ready for writing, with nothing of value Writeable, ///< transitions from Empty to Readable Readable, ///< ready for reading - } State; + } State; State state; ///< current state }; class StoreMapCleaner; -/// map of StoreMapSlots indexed by their keys, with read/write slot locking +/// map of StoreMapSlots indexed by their keys, with read/write slot locking /// kids extend to store custom data class StoreMap { @@ -54,8 +56,7 @@ public: typedef StoreMapSlot Slot; private: - struct Shared - { + struct Shared { Shared(const int aLimit, const size_t anExtrasSize); size_t sharedMemorySize() const; static size_t SharedMemorySize(const int limit, const size_t anExtrasSize); @@ -115,7 +116,7 @@ private: int slotIndexByKey(const cache_key *const key) const; Slot &slotByKey(const cache_key *const key); - Slot *openForReading(Slot &s); + Slot *openForReading(Slot &s); void abortWriting(const sfileno fileno); void freeIfNeeded(Slot &s); void freeLocked(Slot &s, bool keepLocked); @@ -166,7 +167,7 @@ StoreMapWithExtras::Init(const char *const path, const int limit) template StoreMapWithExtras::StoreMapWithExtras(const char *const path): - StoreMap(path) + StoreMap(path) { const size_t sharedSizeWithoutExtras = Shared::SharedMemorySize(entryLimit(), 0); diff --git a/src/ipc/StrandCoord.cc b/src/ipc/StrandCoord.cc index 6116437087..c10c4839db 100644 --- a/src/ipc/StrandCoord.cc +++ b/src/ipc/StrandCoord.cc @@ -38,7 +38,7 @@ void Ipc::StrandCoord::pack(TypedMsgHdr &hdrMsg) const Ipc::HereIamMessage::HereIamMessage(const StrandCoord &aStrand): - strand(aStrand) + strand(aStrand) { } diff --git a/src/ipc/StrandSearch.cc b/src/ipc/StrandSearch.cc index a5ff3c8afd..5b281d92e7 100644 --- a/src/ipc/StrandSearch.cc +++ b/src/ipc/StrandSearch.cc @@ -17,7 +17,7 @@ Ipc::StrandSearchRequest::StrandSearchRequest(): requestorId(-1) } Ipc::StrandSearchRequest::StrandSearchRequest(const TypedMsgHdr &hdrMsg): - requestorId(-1) + requestorId(-1) { hdrMsg.checkType(mtStrandSearchRequest); hdrMsg.getPod(requestorId); @@ -35,7 +35,7 @@ void Ipc::StrandSearchRequest::pack(TypedMsgHdr &hdrMsg) const /* StrandSearchResponse */ Ipc::StrandSearchResponse::StrandSearchResponse(const Ipc::StrandCoord &aStrand): - strand(aStrand) + strand(aStrand) { } diff --git a/src/ipc/mem/Page.h b/src/ipc/mem/Page.h index b2f69df911..cb48ea447d 100644 --- a/src/ipc/mem/Page.h +++ b/src/ipc/mem/Page.h @@ -10,12 +10,15 @@ #include #endif -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ /// Shared memory page identifier, address, or handler -class PageId { +class PageId +{ public: PageId(): pool(0), number(0), purpose(maxPurpose) {} diff --git a/src/ipc/mem/PagePool.cc b/src/ipc/mem/PagePool.cc index 36b5231775..e903fec4fb 100644 --- a/src/ipc/mem/PagePool.cc +++ b/src/ipc/mem/PagePool.cc @@ -23,11 +23,11 @@ Ipc::Mem::PagePool::Init(const char *const id, const unsigned int capacity, cons } Ipc::Mem::PagePool::PagePool(const char *const id): - pageIndex(shm_old(PageStack)(id)), - theLevels(reinterpret_cast( - reinterpret_cast(pageIndex.getRaw()) + - pageIndex->stackSize())), - theBuf(reinterpret_cast(theLevels + PageId::maxPurpose)) + pageIndex(shm_old(PageStack)(id)), + theLevels(reinterpret_cast( + reinterpret_cast(pageIndex.getRaw()) + + pageIndex->stackSize())), + theBuf(reinterpret_cast(theLevels + PageId::maxPurpose)) { } diff --git a/src/ipc/mem/PagePool.h b/src/ipc/mem/PagePool.h index 44a59740ad..e6d808a639 100644 --- a/src/ipc/mem/PagePool.h +++ b/src/ipc/mem/PagePool.h @@ -10,14 +10,17 @@ #include "ipc/mem/PageStack.h" #include "ipc/mem/Pointer.h" -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ /// Atomic container of shared memory pages. Implemented using a collection of /// Segments, each with a PageStack index of free pages. All pools must be /// created by a single process. -class PagePool { +class PagePool +{ public: typedef Ipc::Mem::Owner Owner; diff --git a/src/ipc/mem/PageStack.cc b/src/ipc/mem/PageStack.cc index 07789d55ad..d3c4d7ae18 100644 --- a/src/ipc/mem/PageStack.cc +++ b/src/ipc/mem/PageStack.cc @@ -16,9 +16,9 @@ const Ipc::Mem::PageStack::Value Writable = 0; Ipc::Mem::PageStack::PageStack(const uint32_t aPoolId, const unsigned int aCapacity, const size_t aPageSize): - thePoolId(aPoolId), theCapacity(aCapacity), thePageSize(aPageSize), - theSize(theCapacity), - theLastReadable(prev(theSize)), theFirstWritable(next(theLastReadable)) + thePoolId(aPoolId), theCapacity(aCapacity), thePageSize(aPageSize), + theSize(theCapacity), + theLastReadable(prev(theSize)), theFirstWritable(next(theLastReadable)) { // initially, all pages are free for (Offset i = 0; i < theSize; ++i) @@ -101,7 +101,7 @@ bool Ipc::Mem::PageStack::pageIdIsValid(const PageId &page) const { return page.pool == thePoolId && page.number != Writable && - page.number <= capacity(); + page.number <= capacity(); } size_t diff --git a/src/ipc/mem/PageStack.h b/src/ipc/mem/PageStack.h index d64cbd723d..ddb1ba9ed0 100644 --- a/src/ipc/mem/PageStack.h +++ b/src/ipc/mem/PageStack.h @@ -8,16 +8,19 @@ #include "ipc/AtomicWord.h" -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ class PageId; /// Atomic container of "free" page numbers inside a single SharedMemory space. /// Assumptions: all page numbers are unique, positive, have an known maximum, /// and can be temporary unavailable as long as they are never trully lost. -class PageStack { +class PageStack +{ public: typedef uint32_t Value; ///< stack item type (a free page number) diff --git a/src/ipc/mem/Pages.cc b/src/ipc/mem/Pages.cc index ceed891f99..085abd7a7e 100644 --- a/src/ipc/mem/Pages.cc +++ b/src/ipc/mem/Pages.cc @@ -22,7 +22,8 @@ static Ipc::Mem::PagePool *ThePagePool = 0; // TODO: make configurable to avoid waste when mem-cached objects are small/big size_t -Ipc::Mem::PageSize() { +Ipc::Mem::PageSize() +{ return 32*1024; } @@ -30,7 +31,7 @@ bool Ipc::Mem::GetPage(const PageId::Purpose purpose, PageId &page) { return ThePagePool && PagesAvailable(purpose) > 0 ? - ThePagePool->get(purpose, page) : false; + ThePagePool->get(purpose, page) : false; } void diff --git a/src/ipc/mem/Pages.h b/src/ipc/mem/Pages.h index e447c51f20..aeea1890e1 100644 --- a/src/ipc/mem/Pages.h +++ b/src/ipc/mem/Pages.h @@ -8,9 +8,11 @@ #include "ipc/mem/Page.h" -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ /* Single page manipulation */ diff --git a/src/ipc/mem/Pointer.h b/src/ipc/mem/Pointer.h index 01a606d746..324d38adb7 100644 --- a/src/ipc/mem/Pointer.h +++ b/src/ipc/mem/Pointer.h @@ -10,9 +10,11 @@ #include "ipc/mem/Segment.h" #include "RefCount.h" -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ /// allocates/deallocates shared memory; creates and later destroys a /// Class object using that memory @@ -86,7 +88,7 @@ public: template Owner::Owner(const char *const id, const off_t sharedSize): - theSegment(id), theObject(NULL) + theSegment(id), theObject(NULL) { theSegment.create(sharedSize); Must(theSegment.mem()); diff --git a/src/ipc/mem/Segment.cc b/src/ipc/mem/Segment.cc index 545a158607..424f8c4621 100644 --- a/src/ipc/mem/Segment.cc +++ b/src/ipc/mem/Segment.cc @@ -18,12 +18,13 @@ #include Ipc::Mem::Segment::Segment(const char *const id): - theName(GenerateName(id)), theFD(-1), theMem(NULL), - theSize(0), theReserved(0), doUnlink(false) + theName(GenerateName(id)), theFD(-1), theMem(NULL), + theSize(0), theReserved(0), doUnlink(false) { } -Ipc::Mem::Segment::~Segment() { +Ipc::Mem::Segment::~Segment() +{ if (theFD >= 0) { detach(); if (close(theFD) != 0) @@ -34,7 +35,8 @@ Ipc::Mem::Segment::~Segment() { } bool -Ipc::Mem::Segment::Enabled() { +Ipc::Mem::Segment::Enabled() +{ #if HAVE_SHM return true; #else diff --git a/src/ipc/mem/Segment.h b/src/ipc/mem/Segment.h index 792ab9f75d..3becf7426e 100644 --- a/src/ipc/mem/Segment.h +++ b/src/ipc/mem/Segment.h @@ -8,12 +8,15 @@ #include "SquidString.h" -namespace Ipc { +namespace Ipc +{ -namespace Mem { +namespace Mem +{ /// POSIX shared memory segment -class Segment { +class Segment +{ public: /// Create a shared memory segment. Segment(const char *const id); diff --git a/src/main.cc b/src/main.cc index bd83f522b4..546eb92bed 100644 --- a/src/main.cc +++ b/src/main.cc @@ -1247,7 +1247,7 @@ SquidMainSafe(int argc, char **argv) return SquidMain(argc, argv); } catch (const std::exception &e) { debugs(1, DBG_CRITICAL, "FATAL: dying from an unhandled exception: " << - e.what()); + e.what()); throw; } catch (...) { debugs(1, DBG_CRITICAL, "FATAL: dying from an unhandled exception."); @@ -1269,11 +1269,9 @@ ConfigureCurrentKid(const char *processName) xstrncpy(TheKidName, processName + 1, nameLen + 1); if (!strcmp(TheKidName, "squid-coord")) TheProcessKind = pkCoordinator; - else - if (!strcmp(TheKidName, "squid")) + else if (!strcmp(TheKidName, "squid")) TheProcessKind = pkWorker; - else - if (!strcmp(TheKidName, "squid-disk")) + else if (!strcmp(TheKidName, "squid-disk")) TheProcessKind = pkDisker; else TheProcessKind = pkOther; // including coordinator @@ -1759,7 +1757,7 @@ watch_child(char *argv[]) kid.start(pid); syslog(LOG_NOTICE, "Squid Parent: %s process %d started", - kid.name().termedBuf(), pid); + kid.name().termedBuf(), pid); } /* parent */ diff --git a/src/stat.cc b/src/stat.cc index 6f0980f106..b1b7b3df35 100644 --- a/src/stat.cc +++ b/src/stat.cc @@ -557,7 +557,7 @@ GetInfo(Mgr::InfoActionData& stats) stats.n_disk_objects = Store::Root().currentCount(); stats.objects_size = stats.n_disk_objects > 0 ? - stats.store_swap_size / stats.n_disk_objects : 0.0; + stats.store_swap_size / stats.n_disk_objects : 0.0; stats.unlink_requests = statCounter.unlink.requests; diff --git a/src/store.cc b/src/store.cc index 72a127e940..1ced0bbcfe 100644 --- a/src/store.cc +++ b/src/store.cc @@ -550,8 +550,7 @@ StoreEntry::unlock() assert(storePendingNClients(this) == 0); - if (EBIT_TEST(flags, RELEASE_REQUEST)) - { + if (EBIT_TEST(flags, RELEASE_REQUEST)) { this->release(); return 0; } @@ -1449,7 +1448,7 @@ StoreEntry::memoryCachable() const const int64_t expectedSize = mem_obj->expectedReplySize(); // objects of unknown size are not allowed into memory cache, for now if (expectedSize < 0 || - expectedSize > static_cast(Config.Store.maxInMemObjSize)) + expectedSize > static_cast(Config.Store.maxInMemObjSize)) return 0; } @@ -1858,7 +1857,7 @@ StoreEntry::startWriting() assert (isEmpty()); assert(mem_obj); - + const HttpReply *rep = getReply(); assert(rep); @@ -1944,7 +1943,7 @@ StoreEntry::swapoutPossible() debugs(20, 7, "storeSwapOut: expectedEnd = " << expectedEnd); if (expectedEnd > store_maxobjsize) { debugs(20, 3, "storeSwapOut: will not fit: " << expectedEnd << - " > " << store_maxobjsize); + " > " << store_maxobjsize); decision = MemObject::SwapOut::swImpossible; return false; // known to outgrow the limit eventually } @@ -1953,7 +1952,7 @@ StoreEntry::swapoutPossible() const int64_t currentEnd = mem_obj->endOffset(); if (currentEnd > store_maxobjsize) { debugs(20, 3, "storeSwapOut: does not fit: " << currentEnd << - " > " << store_maxobjsize); + " > " << store_maxobjsize); decision = MemObject::SwapOut::swImpossible; return false; // already does not fit and may only get bigger } @@ -1961,7 +1960,7 @@ StoreEntry::swapoutPossible() // prevent default swPossible answer for yet unknown length if (expectedEnd < 0) { debugs(20, 3, "storeSwapOut: wait for more info: " << - store_maxobjsize); + store_maxobjsize); return false; // may fit later, but will be rejected now } } @@ -2118,8 +2117,8 @@ StoreEntry::isAccepting() const std::ostream &operator <<(std::ostream &os, const StoreEntry &e) { return os << e.swap_filen << '@' << e.swap_dirn << '=' << - e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' << - e.swap_status; + e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' << + e.swap_status; } /* NullStoreEntry */ diff --git a/src/store_dir.cc b/src/store_dir.cc index f5bb6e231f..84223a1b11 100644 --- a/src/store_dir.cc +++ b/src/store_dir.cc @@ -75,7 +75,7 @@ static STDIRSELECT storeDirSelectSwapDirLeastLoad; int StoreController::store_dirs_rebuilding = 1; StoreController::StoreController() : swapDir (new StoreHashIndex()) - , memStore(NULL) + , memStore(NULL) {} StoreController::~StoreController() @@ -736,14 +736,14 @@ StoreController::get(const cache_key *key) if (StoreEntry *e = sd->get(key)) { debugs(20, 3, HERE << "cache_dir " << idx << - " got cached entry: " << *e); + " got cached entry: " << *e); return e; } } } debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured << - " cache_dirs have " << storeKeyText(key)); + " cache_dirs have " << storeKeyText(key)); return NULL; } @@ -762,8 +762,8 @@ StoreController::handleIdleEntry(StoreEntry &e) // leave keepInLocalMemory false; memStore maintains its own cache } else { keepInLocalMemory = e.memoryCachable() && // entry is in good shape and - // the local memory cache is not overflowing - (mem_node::InUseCount() <= store_pages_max); + // the local memory cache is not overflowing + (mem_node::InUseCount() <= store_pages_max); } // An idle, unlocked entry that belongs to a SwapDir which controls @@ -874,7 +874,7 @@ StoreHashIndex::init() store_hash_buckets = storeKeyHashBuckets(buckets); debugs(20, 1, "Using " << store_hash_buckets << " Store buckets"); debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" << - (Config.memShared ? " [shared]" : "")); + (Config.memShared ? " [shared]" : "")); debugs(20, 1, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB"); store_table = hash_create(storeKeyHashCmp, diff --git a/src/store_rebuild.cc b/src/store_rebuild.cc index 40b188d3eb..9962646f51 100644 --- a/src/store_rebuild.cc +++ b/src/store_rebuild.cc @@ -295,7 +295,7 @@ storeRebuildLoadEntry(int fd, int diskIndex, MemBuf &buf, if (len < 0) { const int xerrno = errno; debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << diskIndex << "]: " << - "Ignoring cached entry after meta data read failure: " << xstrerr(xerrno)); + "Ignoring cached entry after meta data read failure: " << xstrerr(xerrno)); return false; } @@ -355,8 +355,7 @@ storeRebuildParseEntry(MemBuf &buf, StoreEntry &tmpe, cache_key *key, "SIZE MISMATCH " << tmpe.swap_file_sz << "!=" << expectedSize); return false; } - } else - if (tmpe.swap_file_sz <= 0) { + } else if (tmpe.swap_file_sz <= 0) { debugs(47, DBG_IMPORTANT, "WARNING: Ignoring cache entry with " << "unknown size: " << tmpe); return false; diff --git a/src/store_swapout.cc b/src/store_swapout.cc index 97deed21f5..aa4eefdb79 100644 --- a/src/store_swapout.cc +++ b/src/store_swapout.cc @@ -213,7 +213,7 @@ StoreEntry::swapOut() if (store_status != STORE_OK) { const int64_t expectedSize = mem_obj->expectedReplySize(); const int64_t maxKnownSize = expectedSize < 0 ? - swapout_maxsize : expectedSize; + swapout_maxsize : expectedSize; debugs(20, 7, HERE << "storeSwapOut: maxKnownSize= " << maxKnownSize); if (maxKnownSize < store_maxobjsize) { @@ -226,9 +226,9 @@ StoreEntry::swapOut() * Should we add an option to limit this memory consumption? */ debugs(20, 5, "storeSwapOut: Deferring swapout start for " << - (store_maxobjsize - maxKnownSize) << " bytes"); + (store_maxobjsize - maxKnownSize) << " bytes"); return; - } + } } // TODO: it is better to trim as soon as we swap something out, not before diff --git a/src/structs.h b/src/structs.h index 74af3b8f54..7aada04b95 100644 --- a/src/structs.h +++ b/src/structs.h @@ -149,7 +149,8 @@ class SwapDir; /// Used for boolean enabled/disabled options with complex default logic. /// Allows Squid to compute the right default after configuration. /// Checks that not-yet-defined option values are not used. -class YesNoNone { +class YesNoNone +{ // TODO: generalize to non-boolean option types public: YesNoNone(): option(0) {} @@ -157,7 +158,7 @@ public: /// returns true iff enabled; asserts if the option has not been configured operator void *() const; // TODO: use a fancy/safer version of the operator - /// enables or disables the option; + /// enables or disables the option; void configure(bool beSet); /// whether the option was enabled or disabled, by user or Squid diff --git a/src/tests/stub_store_rebuild.cc b/src/tests/stub_store_rebuild.cc index ba9ad4f6ef..22c567a0d9 100644 --- a/src/tests/stub_store_rebuild.cc +++ b/src/tests/stub_store_rebuild.cc @@ -53,9 +53,9 @@ storeRebuildLoadEntry(int, int, MemBuf&, _store_rebuild_data&) bool storeRebuildKeepEntry(const StoreEntry &tmpe, const cache_key *key, - struct _store_rebuild_data &counts) + struct _store_rebuild_data &counts) { - return false; + return false; } bool diff --git a/src/tests/testRock.cc b/src/tests/testRock.cc index e69de29bb2..1ac2545de8 100644 --- a/src/tests/testRock.cc +++ b/src/tests/testRock.cc @@ -0,0 +1 @@ +#include "config.h"