From: Alex Rousskov Date: Tue, 30 Aug 2011 15:04:30 +0000 (-0600) Subject: Merged from parent (trunk r11691, v3.2.0.11+). X-Git-Tag: take08~35 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=899249853a35bcad559ce23daa292a2c7a48c7a4;p=thirdparty%2Fsquid.git Merged from parent (trunk r11691, v3.2.0.11+). --- 899249853a35bcad559ce23daa292a2c7a48c7a4 diff --cc src/ipc/StoreMap.h index cbd9fef306,0000000000..ce17077eea mode 100644,000000..100644 --- a/src/ipc/StoreMap.h +++ b/src/ipc/StoreMap.h @@@ -1,198 -1,0 +1,198 @@@ +#ifndef SQUID_IPC_STORE_MAP_H +#define SQUID_IPC_STORE_MAP_H + +#include "ipc/ReadWriteLock.h" +#include "ipc/mem/Pointer.h" +#include "typedefs.h" + +namespace Ipc { + +/// a StoreMap element, holding basic shareable StoreEntry info +class StoreMapSlot { +public: + StoreMapSlot(); + + /// store StoreEntry key and basics + void set(const StoreEntry &anEntry); + + void setKey(const cache_key *const aKey); + bool sameKey(const cache_key *const aKey) const; + +public: + mutable ReadWriteLock lock; ///< protects slot data below + AtomicWordT waitingToBeFreed; ///< may be accessed w/o a lock + + uint64_t key[2]; ///< StoreEntry key + + // STORE_META_STD TLV field from StoreEntry + struct Basics { + time_t timestamp; + time_t lastref; + time_t expires; + time_t lastmod; + uint64_t swap_file_sz; - u_short refcount; - u_short flags; ++ uint16_t refcount; ++ uint16_t flags; + } basics; + + /// possible persistent states + typedef enum { + Empty, ///< ready for writing, with nothing of value + Writeable, ///< transitions from Empty to Readable + Readable, ///< ready for reading + } State; + State state; ///< current state +}; + +class StoreMapCleaner; + +/// map of StoreMapSlots indexed by their keys, with read/write slot locking +/// kids extend to store custom data +class StoreMap +{ +public: + typedef StoreMapSlot Slot; + +private: + struct Shared + { + Shared(const int aLimit, const size_t anExtrasSize); + size_t sharedMemorySize() const; + static size_t SharedMemorySize(const int limit, const size_t anExtrasSize); + + const int limit; ///< maximum number of map slots + const size_t extrasSize; ///< size of slot extra data + AtomicWord count; ///< current number of map slots + Slot slots[]; ///< slots storage + }; + +public: + typedef Mem::Owner Owner; + + /// initialize shared memory + static Owner *Init(const char *const path, const int limit); + + StoreMap(const char *const aPath); + + /// finds, reservers space for writing a new entry or returns nil + Slot *openForWriting(const cache_key *const key, sfileno &fileno); + /// successfully finish writing the entry + void closeForWriting(const sfileno fileno, bool lockForReading = false); + + /// only works on locked entries; returns nil unless the slot is readable + const Slot *peekAtReader(const sfileno fileno) const; + + /// mark the slot as waiting to be freed and, if possible, free it + void free(const sfileno fileno); + + /// open slot for reading, increments read level + const Slot *openForReading(const cache_key *const key, sfileno &fileno); + /// open slot for reading, increments read level + const Slot *openForReadingAt(const sfileno fileno); + /// close slot after reading, decrements read level + void closeForReading(const sfileno fileno); + + /// called by lock holder to terminate either slot writing or reading + void abortIo(const sfileno fileno); + + bool full() const; ///< there are no empty slots left + bool valid(const int n) const; ///< whether n is a valid slot coordinate + int entryCount() const; ///< number of used slots + int entryLimit() const; ///< maximum number of slots that can be used + + /// adds approximate current stats to the supplied ones + void updateStats(ReadWriteLockStats &stats) const; + + StoreMapCleaner *cleaner; ///< notified before a readable entry is freed + +protected: + static Owner *Init(const char *const path, const int limit, const size_t extrasSize); + + const String path; ///< cache_dir path, used for logging + Mem::Pointer shared; + +private: + int slotIndexByKey(const cache_key *const key) const; + Slot &slotByKey(const cache_key *const key); + + Slot *openForReading(Slot &s); + void abortWriting(const sfileno fileno); + void freeIfNeeded(Slot &s); + void freeLocked(Slot &s, bool keepLocked); +}; + +/// StoreMap with extra slot data +/// Note: ExtrasT must be POD, it is initialized with zeroes, no +/// constructors or destructors are called +template +class StoreMapWithExtras: public StoreMap +{ +public: + typedef ExtrasT Extras; + + /// initialize shared memory + static Owner *Init(const char *const path, const int limit); + + StoreMapWithExtras(const char *const path); + + /// write access to the extras; call openForWriting() first! + ExtrasT &extras(const sfileno fileno); + /// read-only access to the extras; call openForReading() first! + const ExtrasT &extras(const sfileno fileno) const; + +protected: + + ExtrasT *sharedExtras; ///< pointer to extras in shared memory +}; + +/// API for adjusting external state when dirty map slot is being freed +class StoreMapCleaner +{ +public: + virtual ~StoreMapCleaner() {} + + /// adjust slot-linked state before a locked Readable slot is erased + virtual void cleanReadable(const sfileno fileno) = 0; +}; + +// StoreMapWithExtras implementation + +template +StoreMap::Owner * +StoreMapWithExtras::Init(const char *const path, const int limit) +{ + return StoreMap::Init(path, limit, sizeof(Extras)); +} + +template +StoreMapWithExtras::StoreMapWithExtras(const char *const path): + StoreMap(path) +{ + const size_t sharedSizeWithoutExtras = + Shared::SharedMemorySize(entryLimit(), 0); + sharedExtras = reinterpret_cast(reinterpret_cast(shared.getRaw()) + sharedSizeWithoutExtras); +} + +template +ExtrasT & +StoreMapWithExtras::extras(const sfileno fileno) +{ + return const_cast(const_cast(this)->extras(fileno)); +} + +template +const ExtrasT & +StoreMapWithExtras::extras(const sfileno fileno) const +{ + assert(sharedExtras); + assert(valid(fileno)); + return sharedExtras[fileno]; +} + + +} // namespace Ipc + +// We do not reuse struct _fileMap because we cannot control its size, +// resulting in sfilenos that are pointing beyond the database. + +#endif /* SQUID_IPC_STORE_MAP_H */ diff --cc src/store_rebuild.cc index 0967664491,716e242b07..a8d0d2f992 --- a/src/store_rebuild.cc +++ b/src/store_rebuild.cc @@@ -229,187 -231,3 +229,187 @@@ storeRebuildProgress(int sd_index, int debugs(20, 1, "Store rebuilding is "<< std::setw(4)<< std::setprecision(2) << 100.0 * n / d << "% complete"); last_report = squid_curtime; } + +#include "fde.h" +#include "StoreMetaUnpacker.h" +#include "StoreMeta.h" +#include "Generic.h" + +struct InitStoreEntry : public unary_function { + InitStoreEntry(StoreEntry *anEntry, cache_key *aKey):what(anEntry),index(aKey) {} + + void operator()(StoreMeta const &x) { + switch (x.getType()) { + + case STORE_META_KEY: + assert(x.length == SQUID_MD5_DIGEST_LENGTH); + memcpy(index, x.value, SQUID_MD5_DIGEST_LENGTH); + break; + + case STORE_META_STD: + struct old_metahdr { + time_t timestamp; + time_t lastref; + time_t expires; + time_t lastmod; + size_t swap_file_sz; - u_short refcount; - u_short flags; ++ uint16_t refcount; ++ uint16_t flags; + } *tmp; + tmp = (struct old_metahdr *)x.value; + assert(x.length == STORE_HDR_METASIZE_OLD); + what->timestamp = tmp->timestamp; + what->lastref = tmp->lastref; + what->expires = tmp->expires; + what->lastmod = tmp->lastmod; + what->swap_file_sz = tmp->swap_file_sz; + what->refcount = tmp->refcount; + what->flags = tmp->flags; + break; + + case STORE_META_STD_LFS: + assert(x.length == STORE_HDR_METASIZE); + memcpy(&what->timestamp, x.value, STORE_HDR_METASIZE); + break; + + default: + break; + } + } + + StoreEntry *what; + cache_key *index; +}; + +bool +storeRebuildLoadEntry(int fd, int diskIndex, MemBuf &buf, + struct _store_rebuild_data &counts) +{ + if (fd < 0) + return false; + + assert(buf.hasSpace()); // caller must allocate + + const int len = FD_READ_METHOD(fd, buf.space(), buf.spaceSize()); + statCounter.syscalls.disk.reads++; + if (len < 0) { + const int xerrno = errno; + debugs(47, 1, "cache_dir[" << diskIndex << "]: " << + "failed to read swap entry meta data: " << xstrerr(xerrno)); + return false; + } + + buf.appended(len); + return true; +} + +bool +storeRebuildParseEntry(MemBuf &buf, StoreEntry &tmpe, cache_key *key, + struct _store_rebuild_data &counts, + uint64_t expectedSize) +{ + int swap_hdr_len = 0; + StoreMetaUnpacker aBuilder(buf.content(), buf.contentSize(), &swap_hdr_len); + if (aBuilder.isBufferZero()) { + debugs(47,5, HERE << "skipping empty record."); + return false; + } + + if (!aBuilder.isBufferSane()) { + debugs(47,1, "Warning: Ignoring malformed cache entry."); + return false; + } + + StoreMeta *tlv_list = aBuilder.createStoreMeta(); + if (!tlv_list) { + debugs(47, 1, HERE << "failed to get swap entry meta data list"); + return false; + } + + // TODO: consume parsed metadata? + + debugs(47,7, HERE << "successful swap meta unpacking"); + memset(key, '\0', SQUID_MD5_DIGEST_LENGTH); + + InitStoreEntry visitor(&tmpe, key); + for_each(*tlv_list, visitor); + storeSwapTLVFree(tlv_list); + tlv_list = NULL; + + if (storeKeyNull(key)) { + debugs(47,1, HERE << "NULL swap entry key"); + return false; + } + + tmpe.key = key; + /* check sizes */ + + if (expectedSize > 0) { + if (tmpe.swap_file_sz == 0) { + tmpe.swap_file_sz = expectedSize; + } else if (tmpe.swap_file_sz == (uint64_t)(expectedSize - swap_hdr_len)) { + tmpe.swap_file_sz = expectedSize; + } else if (tmpe.swap_file_sz != expectedSize) { + debugs(47, 1, HERE << "swap entry SIZE MISMATCH " << + tmpe.swap_file_sz << "!=" << expectedSize); + return false; + } + } else + if (tmpe.swap_file_sz <= 0) { + debugs(47, 1, HERE << "missing swap entry size: " << tmpe); + return false; + } + + if (EBIT_TEST(tmpe.flags, KEY_PRIVATE)) { + counts.badflags++; + return false; + } + + return true; +} + +bool +storeRebuildKeepEntry(const StoreEntry &tmpe, const cache_key *key, + struct _store_rebuild_data &counts) +{ + /* this needs to become + * 1) unpack url + * 2) make synthetic request with headers ?? or otherwise search + * for a matching object in the store + * TODO FIXME change to new async api + * TODO FIXME I think there is a race condition here with the + * async api : + * store A reads in object foo, searchs for it, and finds nothing. + * store B reads in object foo, searchs for it, finds nothing. + * store A gets called back with nothing, so registers the object + * store B gets called back with nothing, so registers the object, + * which will conflict when the in core index gets around to scanning + * store B. + * + * this suggests that rather than searching for duplicates, the + * index rebuild should just assume its the most recent accurate + * store entry and whoever indexes the stores handles duplicates. + */ + if (StoreEntry *e = Store::Root().get(key)) { + + if (e->lastref >= tmpe.lastref) { + /* key already exists, old entry is newer */ + /* keep old, ignore new */ + counts.dupcount++; + + // For some stores, get() creates/unpacks a store entry. Signal + // such stores that we will no longer use the get() result: + e->lock(); + e->unlock(); + + return false; + } else { + /* URL already exists, this swapfile not being used */ + /* junk old, load new */ + e->release(); /* release old entry */ + counts.dupcount++; + } + } + + return true; +}