-
/*
- * DEBUG: section 20 Storage Manager
- * AUTHOR: Harvest Derived
- *
- * SQUID Web Proxy Cache http://www.squid-cache.org/
- * ----------------------------------------------------------
- *
- * Squid is the result of efforts by numerous individuals from
- * the Internet community; see the CONTRIBUTORS file for full
- * details. Many organizations have provided support for Squid's
- * development; see the SPONSORS file for full details. Squid is
- * Copyrighted (C) 2001 by the Regents of the University of
- * California; see the COPYRIGHT file for full details. Squid
- * incorporates software developed and/or copyrighted by other
- * sources; see the CREDITS file for full details.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
+ * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
*
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
*/
+/* DEBUG: section 20 Storage Manager */
+
#include "squid.h"
#include "CacheDigest.h"
#include "CacheManager.h"
#include "comm/Connection.h"
+#include "comm/Read.h"
#include "ETag.h"
#include "event.h"
#include "fde.h"
#include "RequestFlags.h"
#include "SquidConfig.h"
#include "SquidTime.h"
-#include "Stack.h"
#include "StatCounters.h"
#include "stmem.h"
+#include "Store.h"
#include "store_digest.h"
#include "store_key_md5.h"
#include "store_key_md5.h"
#include "store_log.h"
#include "store_rebuild.h"
-#include "Store.h"
#include "StoreClient.h"
#include "StoreIOState.h"
#include "StoreMeta.h"
#if USE_DELAY_POOLS
#include "DelayPools.h"
#endif
-#if HAVE_LIMITS_H
-#include <limits.h>
-#endif
+
+/** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
+ * XXX: convert to MEMPROXY_CLASS() API
+ */
+#include "mem/Pool.h"
+
+#include <climits>
+#include <stack>
#define REBUILD_TIMESTAMP_DELTA_MAX 2
/*
* local variables
*/
-static Stack<StoreEntry*> LateReleaseStack;
+static std::stack<StoreEntry*> LateReleaseStack;
MemAllocator *StoreEntry::pool = NULL;
StorePointer Store::CurrentRoot = NULL;
void
Store::Stats(StoreEntry * output)
{
- assert (output);
+ assert(output);
Root().stat(*output);
}
{}
void
-Store::unlink (StoreEntry &anEntry)
+Store::unlink(StoreEntry &)
{
fatal("Store::unlink on invalid Store\n");
}
void *
StoreEntry::operator new (size_t bytecount)
{
- assert (bytecount == sizeof (StoreEntry));
+ assert(bytecount == sizeof (StoreEntry));
if (!pool) {
pool = memPoolCreate ("StoreEntry", bytecount);
{
/* This object can be cached for a long time */
- if (EBIT_TEST(flags, ENTRY_CACHABLE))
+ if (!EBIT_TEST(flags, RELEASE_REQUEST))
setPublicKey();
}
/* This object should never be cached at all */
expireNow();
releaseRequest(); /* delete object when not used */
- /* releaseRequest clears ENTRY_CACHABLE flag */
}
void
{
/* This object may be negatively cached */
negativeCache();
-
- if (EBIT_TEST(flags, ENTRY_CACHABLE))
- setPublicKey();
+ makePublic();
}
size_t
}
bool
-StoreEntry::checkDeferRead(int fd) const
+StoreEntry::checkDeferRead(int) const
{
return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
}
void
-StoreEntry::setNoDelay (bool const newValue)
+StoreEntry::setNoDelay(bool const newValue)
{
if (mem_obj)
mem_obj->setNoDelay(newValue);
}
+// XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
+// open swapin file, aggressively trim memory, and ignore read-ahead gap.
+// It does not mean we will read from disk exclusively (or at all!).
+// XXX: May create STORE_DISK_CLIENT with no disk caching configured.
+// XXX: Collapsed clients cannot predict their type.
store_client_t
StoreEntry::storeClientType() const
{
* offset 0 in the memory object is the HTTP headers.
*/
- if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
- // clients of an object cached in shared memory are memory clients
- return STORE_MEM_CLIENT;
- }
-
assert(mem_obj);
if (mem_obj->inmem_lo)
if (swap_status == SWAPOUT_DONE) {
debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
if (mem_obj->endOffset() == mem_obj->object_sz) {
- /* hot object fully swapped in */
+ /* hot object fully swapped in (XXX: or swapped out?) */
return STORE_MEM_CLIENT;
}
} else {
}
StoreEntry::StoreEntry() :
- mem_obj(NULL),
- timestamp(-1),
- lastref(-1),
- expires(-1),
- lastmod(-1),
- swap_file_sz(0),
- refcount(0),
- flags(0),
- swap_filen(-1),
- swap_dirn(-1),
- lock_count(0),
- mem_status(NOT_IN_MEMORY),
- ping_status(PING_NONE),
- store_status(STORE_PENDING),
- swap_status(SWAPOUT_NONE)
-{
- debugs(20, 3, HERE << "new StoreEntry " << this);
+ mem_obj(NULL),
+ timestamp(-1),
+ lastref(-1),
+ expires(-1),
+ lastmod(-1),
+ swap_file_sz(0),
+ refcount(0),
+ flags(0),
+ swap_filen(-1),
+ swap_dirn(-1),
+ mem_status(NOT_IN_MEMORY),
+ ping_status(PING_NONE),
+ store_status(STORE_PENDING),
+ swap_status(SWAPOUT_NONE),
+ lock_count(0)
+{
+ debugs(20, 5, "StoreEntry constructed, this=" << this);
}
StoreEntry::~StoreEntry()
{
- if (swap_filen >= 0) {
- SwapDir &sd = dynamic_cast<SwapDir&>(*store());
- sd.disconnect(*this);
- }
+ debugs(20, 5, "StoreEntry destructed, this=" << this);
}
#if USE_ADAPTATION
StoreEntry::destroyMemObject()
{
debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
- setMemStatus(NOT_IN_MEMORY);
- MemObject *mem = mem_obj;
- mem_obj = NULL;
- delete mem;
+
+ if (MemObject *mem = mem_obj) {
+ // Store::Root() is FATALly missing during shutdown
+ if (mem->xitTable.index >= 0 && !shutting_down)
+ Store::Root().transientsDisconnect(*mem);
+ if (mem->memCache.index >= 0 && !shutting_down)
+ Store::Root().memoryDisconnect(*this);
+
+ setMemStatus(NOT_IN_MEMORY);
+ mem_obj = NULL;
+ delete mem;
+ }
}
void
if (e == NullStoreEntry::getInstance())
return;
+ // Store::Root() is FATALly missing during shutdown
+ if (e->swap_filen >= 0 && !shutting_down) {
+ SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
+ sd.disconnect(*e);
+ }
+
e->destroyMemObject();
e->hashDelete();
void
StoreEntry::hashDelete()
{
- hash_remove_link(store_table, this);
- storeKeyFree((const cache_key *)key);
- key = NULL;
+ if (key) { // some test cases do not create keys and do not hashInsert()
+ hash_remove_link(store_table, this);
+ storeKeyFree((const cache_key *)key);
+ key = NULL;
+ }
}
/* -------------------------------------------------------------------------- */
}
void
-StoreEntry::touch() {
+StoreEntry::touch()
+{
lastref = squid_curtime;
Store::Root().reference(*this);
}
debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
EBIT_SET(flags, RELEASE_REQUEST);
+
+ Store::Root().markForUnlink(*this);
}
void
if (EBIT_TEST(flags, RELEASE_REQUEST))
return;
- setReleaseFlag();
-
- /*
- * Clear cachable flag here because we might get called before
- * anyone else even looks at the cachability flag. Also, this
- * prevents httpMakePublic from really setting a public key.
- */
- EBIT_CLR(flags, ENTRY_CACHABLE);
+ setReleaseFlag(); // makes validToSend() false, preventing future hits
setPrivateKey();
}
{
debugs(20, 3, (context ? context : "somebody") <<
" unlocking key " << getMD5Text() << ' ' << *this);
+ assert(lock_count > 0);
--lock_count;
if (lock_count)
return; /* is already private */
if (key) {
+ setReleaseFlag(); // will markForUnlink(); all caches/workers will know
+
+ // TODO: move into SwapDir::markForUnlink() already called by Root()
if (swap_filen > -1)
storeDirSwapLog(this, SWAP_LOG_DEL);
* store clients won't be able to access object data which has
* been freed from memory.
*
- * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
- * be set, and StoreEntry::setPublicKey() should not be called.
+ * If RELEASE_REQUEST is set, setPublicKey() should not be called.
*/
#if MORE_DEBUG_OUTPUT
#if X_ACCELERATOR_VARY
vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
- if (vary.defined()) {
+ if (vary.size() > 0) {
/* Again, we own this structure layout */
rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
vary.clean();
}
#endif
- pe->replaceHttpReply(rep);
+ pe->replaceHttpReply(rep, false); // no write until key is public
pe->timestampsSet();
pe->makePublic();
+ pe->startWriting(); // after makePublic()
+
pe->complete();
- pe->unlock();
+ pe->unlock("StoreEntry::setPublicKey+Vary");
}
newkey = storeKeyPublicByRequest(mem_obj->request);
}
StoreEntry *
-storeCreateEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
+storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
{
StoreEntry *e = NULL;
debugs(20, 3, "storeCreateEntry: '" << url << "'");
e = new StoreEntry();
- e->lock_count = 1; /* Note lock here w/o calling storeLock() */
e->makeMemObject();
e->mem_obj->setUris(url, log_url, method);
- if (neighbors_do_private_keys || !flags.hierarchical)
- e->setPrivateKey();
- else
- e->setPublicKey();
-
if (flags.cachable) {
- EBIT_SET(e->flags, ENTRY_CACHABLE);
EBIT_CLR(e->flags, RELEASE_REQUEST);
} else {
- /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
e->releaseRequest();
}
e->store_status = STORE_PENDING;
- e->setMemStatus(NOT_IN_MEMORY);
e->refcount = 0;
e->lastref = squid_curtime;
e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
return e;
}
+StoreEntry *
+storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
+{
+ StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
+ e->lock("storeCreateEntry");
+
+ if (neighbors_do_private_keys || !flags.hierarchical)
+ e->setPrivateKey();
+ else
+ e->setPublicKey();
+
+ return e;
+}
+
/* Mark object as expired */
void
StoreEntry::expireNow()
int private_key;
int too_many_open_files;
int too_many_open_fds;
+ int missing_parts;
} no;
struct {
return 0;
if (STORE_OK == store_status)
- if (mem_obj->object_sz < 0 ||
+ if (mem_obj->object_sz >= 0 &&
mem_obj->object_sz < Config.Store.minObjectSize)
return 1;
if (getReply()->content_length > -1)
return 0;
}
-// TODO: remove checks already performed by swapoutPossible()
+bool
+StoreEntry::checkTooBig() const
+{
+ if (mem_obj->endOffset() > store_maxobjsize)
+ return true;
+
+ if (getReply()->content_length < 0)
+ return false;
+
+ return (getReply()->content_length > store_maxobjsize);
+}
+
// TODO: move "too many open..." checks outside -- we are called too early/late
-int
+bool
StoreEntry::checkCachable()
{
+ // XXX: This method is used for both memory and disk caches, but some
+ // checks are specific to disk caches. Move them to mayStartSwapOut().
+
+ // XXX: This method may be called several times, sometimes with different
+ // outcomes, making store_check_cachable_hist counters misleading.
+
+ // check this first to optimize handling of repeated calls for uncachables
+ if (EBIT_TEST(flags, RELEASE_REQUEST)) {
+ debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
+ ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
+ return 0; // avoid rerequesting release below
+ }
+
#if CACHE_ALL_METHODS
if (mem_obj->method != Http::METHOD_GET) {
if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
++store_check_cachable_hist.no.wrong_content_length;
- } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
- debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
- ++store_check_cachable_hist.no.not_entry_cachable;
} else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
++store_check_cachable_hist.no.negative_cached;
return 0; /* avoid release call below */
- } else if ((getReply()->content_length > 0 &&
- getReply()->content_length > store_maxobjsize) ||
- mem_obj->endOffset() > store_maxobjsize) {
+ } else if (!mem_obj || !getReply()) {
+ // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
+ // this segfault protection, but how can we get such a HIT?
+ debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
+ ++store_check_cachable_hist.no.missing_parts;
+ } else if (checkTooBig()) {
debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
++store_check_cachable_hist.no.too_big;
} else if (checkTooSmall()) {
}
releaseRequest();
- /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
return 0;
}
store_check_cachable_hist.no.wrong_content_length);
storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
store_check_cachable_hist.no.negative_cached);
+ storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
+ store_check_cachable_hist.no.missing_parts);
storeAppendPrintf(sentry, "no.too_big\t%d\n",
store_check_cachable_hist.no.too_big);
storeAppendPrintf(sentry, "no.too_small\t%d\n",
/*
* Someone wants to abort this transfer. Set the reason in the
- * request structure, call the server-side callback and mark the
+ * request structure, call the callback and mark the
* entry for releasing
*/
void
assert(mem_obj != NULL);
debugs(20, 6, "storeAbort: " << getMD5Text());
- lock(); /* lock while aborting */
+ lock("StoreEntry::abort"); /* lock while aborting */
negativeCache();
releaseRequest();
// abort swap out, invalidating what was created so far (release follows)
swapOutFileClose(StoreIOState::writerGone);
- unlock(); /* unlock */
+ unlock("StoreEntry::abort"); /* unlock */
}
/**
* it becomes active will self register
*/
void
-Store::Maintain(void *notused)
+Store::Maintain(void *)
{
Store::Root().maintain();
StoreEntry::release()
{
PROF_start(storeRelease);
- debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
+ debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
/* If, for any reason we can't discard this object because of an
* outstanding request, mark it for pending release */
setPrivateKey();
if (swap_filen > -1) {
- /*
- * Fake a call to StoreEntry->lock() When rebuilding is done,
- * we'll just call StoreEntry->unlock() on these.
- */
- ++lock_count;
+ // lock the entry until rebuilding is done
+ lock("storeLateRelease");
setReleaseFlag();
- LateReleaseStack.push_back(this);
+ LateReleaseStack.push(this);
} else {
destroyStoreEntry(static_cast<hash_link *>(this));
// "this" is no longer valid
}
static void
-storeLateRelease(void *unused)
+storeLateRelease(void *)
{
StoreEntry *e;
- int i;
static int n = 0;
if (StoreController::store_dirs_rebuilding) {
return;
}
- for (i = 0; i < 10; ++i) {
- e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
-
- if (e == NULL) {
- /* done! */
+ // TODO: this works but looks unelegant.
+ for (int i = 0; i < 10; ++i) {
+ if (LateReleaseStack.empty()) {
debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
return;
+ } else {
+ e = LateReleaseStack.top();
+ LateReleaseStack.pop();
}
- e->unlock();
+ e->unlock("storeLateRelease");
++n;
}
if (lock_count)
return 1;
- if (swap_status == SWAPOUT_WRITING)
- return 1;
-
- if (store_status == STORE_PENDING)
- return 1;
-
/*
- * SPECIAL, PUBLIC entries should be "locked"
+ * SPECIAL, PUBLIC entries should be "locked";
+ * XXX: Their owner should lock them then instead of relying on this hack.
*/
if (EBIT_TEST(flags, ENTRY_SPECIAL))
if (!EBIT_TEST(flags, KEY_PRIVATE))
storeRegisterWithCacheManager();
}
+/// computes maximum size of a cachable object
+/// larger objects are rejected by all (disk and memory) cache stores
+static int64_t
+storeCalcMaxObjSize()
+{
+ int64_t ms = 0; // nothing can be cached without at least one store consent
+
+ // global maximum is at least the disk store maximum
+ for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
+ assert (Config.cacheSwap.swapDirs[i].getRaw());
+ const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
+ if (ms < storeMax)
+ ms = storeMax;
+ }
+
+ // global maximum is at least the memory store maximum
+ // TODO: move this into a memory cache class when we have one
+ const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
+ if (ms < memMax)
+ ms = memMax;
+
+ return ms;
+}
+
void
storeConfigure(void)
{
store_swap_low = (long) (((float) Store::Root().maxSize() *
(float) Config.Swap.lowWaterMark) / (float) 100);
store_pages_max = Config.memMaxSize / sizeof(mem_node);
+
+ store_maxobjsize = storeCalcMaxObjSize();
}
bool
-StoreEntry::memoryCachable() const
+StoreEntry::memoryCachable()
{
+ if (!checkCachable())
+ return 0;
+
if (mem_obj == NULL)
return 0;
if (EBIT_TEST(flags, ENTRY_ABORTED))
return 0;
+ // now check that the entry has a cache backing or is collapsed
+ if (swap_filen > -1) // backed by a disk cache
+ return 1;
+
+ if (swappingOut()) // will be backed by a disk cache
+ return 1;
+
+ if (!mem_obj) // not backed by a memory cache and not collapsed
+ return 0;
+
+ // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
+ // disk cache backing that store_client constructor will assert. XXX: This
+ // is wrong for range requests (that could feed off nibbled memory) and for
+ // entries backed by the shared memory cache (that could, in theory, get
+ // nibbled bytes from that cache, but there is no such "memoryIn" code).
+ if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
+ return 0;
+
+ // The following check is correct but useless at this position. TODO: Move
+ // it up when the shared memory cache can either replenish locally nibbled
+ // bytes or, better, does not use local RAM copy at all.
+ // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
+ // return 1;
+
return 1;
}
const char *
StoreEntry::url() const
{
- if (this == NULL)
- return "[null_entry]";
- else if (mem_obj == NULL)
+ if (mem_obj == NULL)
return "[null_mem_obj]";
else
return mem_obj->storeId();
#endif
+void
+StoreEntry::storeErrorResponse(HttpReply *reply)
+{
+ lock("StoreEntry::storeErrorResponse");
+ buffer();
+ replaceHttpReply(reply);
+ flush();
+ complete();
+ negativeCache();
+ releaseRequest();
+ unlock("StoreEntry::storeErrorResponse");
+}
+
/*
* Replace a store entry with
* a new reply. This eats the reply.
return result;
}
+/**
+ * Abandon the transient entry our worker has created if neither the shared
+ * memory cache nor the disk cache wants to store it. Collapsed requests, if
+ * any, should notice and use Plan B instead of getting stuck waiting for us
+ * to start swapping the entry out.
+ */
+void
+StoreEntry::transientsAbandonmentCheck()
+{
+ if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
+ mem_obj->xitTable.index >= 0 && // other workers may be interested
+ mem_obj->memCache.index < 0 && // rejected by the shared memory cache
+ mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
+ debugs(20, 7, "cannot be shared: " << *this);
+ if (!shutting_down) // Store::Root() is FATALly missing during shutdown
+ Store::Root().transientsAbandon(*this);
+ }
+}
+
+void
+StoreEntry::memOutDecision(const bool)
+{
+ transientsAbandonmentCheck();
+}
+
+void
+StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
+{
+ // Abandon our transient entry if neither shared memory nor disk wants it.
+ assert(mem_obj);
+ mem_obj->swapout.decision = decision;
+ transientsAbandonmentCheck();
+}
+
void
StoreEntry::trimMemory(const bool preserveSwappable)
{
}
}
+bool
+StoreEntry::hasEtag(ETag &etag) const
+{
+ if (const HttpReply *reply = getReply()) {
+ etag = reply->header.getETag(HDR_ETAG);
+ if (etag.str)
+ return true;
+ }
+ return false;
+}
+
bool
StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
{
// print only non-default status values, using unique letters
if (e.mem_status != NOT_IN_MEMORY ||
- e.store_status != STORE_PENDING ||
- e.swap_status != SWAPOUT_NONE ||
- e.ping_status != PING_NONE) {
+ e.store_status != STORE_PENDING ||
+ e.swap_status != SWAPOUT_NONE ||
+ e.ping_status != PING_NONE) {
if (e.mem_status != NOT_IN_MEMORY) os << 'm';
if (e.store_status != STORE_PENDING) os << 's';
if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
- if (EBIT_TEST(e.flags, ENTRY_CACHABLE)) os << 'C';
if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
if (e.mem_obj && e.mem_obj->smpCollapsed)
os << 'O';
- return os << '/' << &e << '*' << e.lock_count;
+ return os << '/' << &e << '*' << e.locks();
}
/* NullStoreEntry */
return NULL;
}
-#if !_USE_INLINE_
-#include "Store.cci"
-#endif