*/
#include "squid.h"
+#include "CollapsedForwarding.h"
+#include "globals.h"
#include "ipc/mem/Segment.h"
#include "ipc/Messages.h"
#include "ipc/Port.h"
#include "ipc/TypedMsgHdr.h"
#include "MemObject.h"
-#include "CollapsedForwarding.h"
-#include "globals.h"
#include "SquidConfig.h"
#include "Store.h"
#include "store_key_md5.h"
// TODO: Count and report the total number of notifications, pops, pushes.
debugs(17, 7, "to kid" << workerId);
Ipc::TypedMsgHdr msg;
- // TODO: add proper message type?
msg.setType(Ipc::mtCollapsedForwardingNotification);
msg.putInt(KidIdentifier);
const String addr = Ipc::Port::MakeAddr(Ipc::strandAddrPfx, workerId);
*/
#include "squid.h"
-#include "CollapsedForwarding.h"
#include "base/RunnersRegistry.h"
+#include "CollapsedForwarding.h"
#include "HttpReply.h"
#include "ipc/mem/Page.h"
#include "ipc/mem/Pages.h"
#include "MemObject.h"
-#include "Transients.h"
#include "mime_header.h"
#include "SquidConfig.h"
#include "SquidMath.h"
#include "StoreStats.h"
#include "tools.h"
+#include "Transients.h"
#if HAVE_LIMITS_H
#include <limits>
#endif
-
/// shared memory segment path to use for Transients maps
static const char *MapLabel = "transients_map";
-
Transients::Transients(): map(NULL), locals(NULL)
{
}
void
Transients::maintain()
{
+ // no lazy garbage collection needed
}
uint64_t
void
Transients::reference(StoreEntry &)
{
+ // no replacement policy (but the cache(s) storing the entry may have one)
}
bool
map->abortWriting(index);
}
-
/// copies all relevant local data to shared memory
bool
Transients::copyToShm(const StoreEntry &e, const sfileno index,
RunnerRegistrationEntry(rrAfterConfig, TransientsRr);
-void TransientsRr::run(const RunnerRegistry &r)
+void
+TransientsRr::run(const RunnerRegistry &r)
{
assert(Config.memShared.configured());
Ipc::Mem::RegisteredRunner::run(r);
}
-void TransientsRr::create(const RunnerRegistry &)
+void
+TransientsRr::create(const RunnerRegistry &)
{
if (!Config.onoff.collapsed_forwarding)
return;
};
typedef Ipc::StoreMapWithExtras<TransientsMapExtras> TransientsMap;
-/// Keeps track of hits being delivered to clients that arrived before those
-/// hits were [fully] cached. This shared table is necessary to synchronize hit
-/// caching (writing) workers with other workers serving (reading) those hits.
+/// Keeps track of store entries being delivered to clients that arrived before
+/// those entries were [fully] cached. This shared table is necessary to sync
+/// the entry-writing worker with entry-reading worker(s).
class Transients: public Store, public Ipc::StoreMapCleaner
{
public:
// TODO: Why use Store as a base? We are not really a cache.
-#endif /* SQUID_MEMSTORE_H */
+#endif /* SQUID_TRANSIENTS_H */
if (NULL == http->storeEntry()) {
/** \li If no StoreEntry object is current assume this object isn't in the cache set MISS*/
- debugs(85, 3, "clientProcessRequest2: StoreEntry is NULL - MISS");
+ debugs(85, 3, "StoreEntry is NULL - MISS");
http->logType = LOG_TCP_MISS;
doGetMoreData();
return;
if (Config.onoff.offline) {
/** \li If we are running in offline mode set to HIT */
- debugs(85, 3, "clientProcessRequest2: offline HIT " << *e);
+ debugs(85, 3, "offline HIT " << *e);
http->logType = LOG_TCP_HIT;
doGetMoreData();
return;
}
if (!e->validToSend()) {
- debugs(85, 3, "clientProcessRequest2: !storeEntryValidToSend MISS " << *e);
+ debugs(85, 3, "!storeEntryValidToSend MISS " << *e);
forgetHit();
http->logType = LOG_TCP_MISS;
doGetMoreData();
if (EBIT_TEST(e->flags, ENTRY_SPECIAL)) {
/* \li Special entries are always hits, no matter what the client says */
- debugs(85, 3, "clientProcessRequest2: ENTRY_SPECIAL HIT " << *e);
+ debugs(85, 3, "ENTRY_SPECIAL HIT " << *e);
http->logType = LOG_TCP_HIT;
doGetMoreData();
return;
}
if (r->flags.noCache) {
- debugs(85, 3, "clientProcessRequest2: no-cache REFRESH MISS " << *e);
+ debugs(85, 3, "no-cache REFRESH MISS " << *e);
forgetHit();
http->logType = LOG_TCP_CLIENT_REFRESH_MISS;
doGetMoreData();
return;
}
- debugs(85, 3, "clientProcessRequest2: default HIT " << *e);
+ debugs(85, 3, "default HIT " << *e);
http->logType = LOG_TCP_HIT;
doGetMoreData();
}
StoreEntry *e = storeCreateEntry(storeId(), http->log_uri, reqFlags, m);
- // Make entry collapsable ASAP, to increase collapsing chances for others.
- // TODO: why is !.needValidation required here?
+ // Make entry collapsable ASAP, to increase collapsing chances for others,
+ // TODO: every must-revalidate and similar request MUST reach the origin,
+ // but do we have to prohibit others from collapsing on that request?
if (Config.onoff.collapsed_forwarding && reqFlags.cachable &&
!reqFlags.needValidation &&
(m == Http::METHOD_GET || m == Http::METHOD_HEAD)) {
}
-
namespace Rock
{
}
-
#endif /* SQUID_FS_ROCK_FORWARD_H */
return success;
}
-/** We only write data when full slot is accumulated or when close() is called.
- We buffer, in part, to avoid forcing OS to _read_ old unwritten portions of
- the slot when the write does not end at the page or sector boundary. */
+/**
+ * Possibly send data to be written to disk:
+ * We only write data when full slot is accumulated or when close() is called.
+ * We buffer, in part, to avoid forcing OS to _read_ old unwritten portions of
+ * the slot when the write does not end at the page or sector boundary.
+ */
void
Rock::IoState::tryWrite(char const *buf, size_t size, off_t coreOff)
{
// either this is the first write or append; we do not support write gaps
assert(!coreOff || coreOff == -1);
- // allocate the first slice diring the first write
+ // allocate the first slice during the first write
if (!coreOff) {
assert(sidCurrent < 0);
sidCurrent = reserveSlotForWriting(); // throws on failures
}
/// Buffers incoming data for the current slot.
-/// Returns the number of bytes buffered.
+/// \return the number of bytes buffered
size_t
Rock::IoState::writeToBuffer(char const *buf, size_t size)
{
CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
+/**
+ \defgroup RockFsRebuild Rock Store Rebuild
+ \ingroup Filesystems
+ *
+ \section Overview Overview
+ * Several layers of information are manipualted during the rebuild:
+ \par
+ * Store Entry: Response message plus all the metainformation associated with
+ * it. Identified by store key. At any given time, from Squid point
+ * of view, there is only one entry with a given key, but several
+ * different entries with the same key can be observed in any historical
+ * archive (such as an access log or a store database).
+ \par
+ * Slot chain: A sequence of db slots representing a Store Entry state at
+ * some point in time. Identified by key+version combination. Due to
+ * transaction aborts, crashes, and idle periods, some chains may contain
+ * incomplete or stale information. We assume that no two different chains
+ * have the same key and version. If that assumption fails, we may serve a
+ * hodgepodge entry during rebuild, until "extra" slots are loaded/noticed.
+ \par
+ * Db slot: A db record containing a piece of a single store entry and linked
+ * to other slots with the same key and version fields, forming a chain.
+ * Slots are identified by their absolute position in the database file,
+ * which is naturally unique.
+ \par
+ * Except for the "mapped", "freed", and "more" fields, LoadingEntry info is
+ * entry-level and is stored at fileno position. In other words, the array of
+ * LoadingEntries should be interpreted as two arrays, one that maps slot ID
+ * to the LoadingEntry::mapped/free/more members, and the second one that maps
+ * fileno to all other LoadingEntry members. StoreMap maps slot key to fileno.
+ \par
+ * When information from the newly loaded db slot contradicts the entry-level
+ * information collected so far (e.g., the versions do not match or the total
+ * chain size after the slot contribution exceeds the expected number), the
+ * whole entry (and not just the chain or the slot!) is declared corrupted.
+ \par
+ * Why invalidate the whole entry? Rock Store is written for high-load
+ * environments with large caches, where there is usually very few idle slots
+ * in the database. A space occupied by a purged entry is usually immediately
+ * reclaimed. A Squid crash or a transaction abort is rather unlikely to
+ * leave a relatively large number of stale slots in the database. Thus, the
+ * number of potentially corrupted entries is relatively small. On the other
+ * hand, the damage from serving a single hadgepodge entry may be significant
+ * to the user. In such an environment, invalidating the whole entry has
+ * negligible performance impact but saves us from high-damage bugs.
+ */
+
+
namespace Rock {
/// maintains information about the store entry being loaded from disk
} /* namespace Rock */
-/**
- Several layers of information are manipualted during the rebuild:
-
- Store Entry: Response message plus all the metainformation associated with
- it. Identified by store key. At any given time, from Squid point
- of view, there is only one entry with a given key, but several
- different entries with the same key can be observed in any historical
- archive (such as an access log or a store database).
-
- Slot chain: A sequence of db slots representing a Store Entry state at
- some point in time. Identified by key+version combination. Due to
- transaction aborts, crashes, and idle periods, some chains may contain
- incomplete or stale information. We assume that no two different chains
- have the same key and version. If that assumption fails, we may serve a
- hodgepodge entry during rebuild, until "extra" slots are loaded/noticed.
-
- Db slot: A db record containing a piece of a single store entry and linked
- to other slots with the same key and version fields, forming a chain.
- Slots are identified by their absolute position in the database file,
- which is naturally unique.
-
-
- Except for the "mapped", "freed", and "more" fields, LoadingEntry info is
- entry-level and is stored at fileno position. In other words, the array of
- LoadingEntries should be interpreted as two arrays, one that maps slot ID
- to the LoadingEntry::mapped/free/more members, and the second one that maps
- fileno to all other LoadingEntry members. StoreMap maps slot key to fileno.
-
-
- When information from the newly loaded db slot contradicts the entry-level
- information collected so far (e.g., the versions do not match or the total
- chain size after the slot contribution exceeds the expected number), the
- whole entry (and not just the chain or the slot!) is declared corrupted.
-
- Why invalidate the whole entry? Rock Store is written for high-load
- environments with large caches, where there is usually very few idle slots
- in the database. A space occupied by a purged entry is usually immediately
- reclaimed. A Squid crash or a transaction abort is rather unlikely to
- leave a relatively large number of stale slots in the database. Thus, the
- number of potentially corrupted entries is relatively small. On the other
- hand, the damage from serving a single hadgepodge entry may be significant
- to the user. In such an environment, invalidating the whole entry has
- negligible performance impact but saves us from high-damage bugs.
-*/
-
Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
sd(dir),
freeSlotIfIdle(slotId, true);
return;
}
-
memcpy(&header, buf.content(), sizeof(header));
if (header.empty()) {
freeSlotIfIdle(slotId, false);
bool canAdd(const sfileno fileno, const SlotId slotId, const DbCellHeader &header) const;
bool sameEntry(const sfileno fileno, const DbCellHeader &header) const;
-
SwapDir *sd;
LoadingEntry *entries; ///< store entries being loaded from disk
e.swap_filen = filen;
}
-
void Rock::SwapDir::disconnect(StoreEntry &e)
{
assert(e.swap_dirn == index);
debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
" cache_dirs have " << storeKeyText(key));
-
return NULL;
}