]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/fs/rock/RockRebuild.cc
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 79 Disk IO Routines */
12 #include "base/AsyncJobCalls.h"
13 #include "fs/rock/RockDbCell.h"
14 #include "fs/rock/RockRebuild.h"
15 #include "fs/rock/RockSwapDir.h"
18 #include "ipc/StoreMap.h"
20 #include "SquidTime.h"
22 #include "store_rebuild.h"
27 CBDATA_NAMESPACED_CLASS_INIT(Rock
, Rebuild
);
30 \defgroup RockFsRebuild Rock Store Rebuild
33 \section Overview Overview
34 * Several layers of information are manipualted during the rebuild:
36 * Store Entry: Response message plus all the metainformation associated with
37 * it. Identified by store key. At any given time, from Squid point
38 * of view, there is only one entry with a given key, but several
39 * different entries with the same key can be observed in any historical
40 * archive (such as an access log or a store database).
42 * Slot chain: A sequence of db slots representing a Store Entry state at
43 * some point in time. Identified by key+version combination. Due to
44 * transaction aborts, crashes, and idle periods, some chains may contain
45 * incomplete or stale information. We assume that no two different chains
46 * have the same key and version. If that assumption fails, we may serve a
47 * hodgepodge entry during rebuild, until "extra" slots are loaded/noticed.
49 * iNode: The very first db slot in an entry slot chain. This slot contains
50 * at least the beginning of Store Entry metadata, but most 32KB inodes contain
51 * the entire metadata, HTTP headers, and HTTP body.
53 * Db slot: A db record containing a piece of a single store entry and linked
54 * to other slots with the same key and version fields, forming a chain.
55 * Slots are identified by their absolute position in the database file,
56 * which is naturally unique.
58 * When information from the newly loaded db slot contradicts the entry-level
59 * information collected so far (e.g., the versions do not match or the total
60 * chain size after the slot contribution exceeds the expected number), the
61 * whole entry (and not just the chain or the slot!) is declared corrupted.
63 * Why invalidate the whole entry? Rock Store is written for high-load
64 * environments with large caches, where there is usually very few idle slots
65 * in the database. A space occupied by a purged entry is usually immediately
66 * reclaimed. A Squid crash or a transaction abort is rather unlikely to
67 * leave a relatively large number of stale slots in the database. Thus, the
68 * number of potentially corrupted entries is relatively small. On the other
69 * hand, the damage from serving a single hadgepodge entry may be significant
70 * to the user. In such an environment, invalidating the whole entry has
71 * negligible performance impact but saves us from high-damage bugs.
77 /// low-level anti-padding storage class for LoadingEntry and LoadingSlot flags
81 LoadingFlags(): state(0), anchored(0), mapped(0), finalized(0), freed(0) {}
83 /* for LoadingEntry */
84 uint8_t state
:3; ///< current entry state (one of the LoadingEntry::State values)
85 uint8_t anchored
:1; ///< whether we loaded the inode slot for this entry
88 uint8_t mapped
:1; ///< whether the slot was added to a mapped entry
89 uint8_t finalized
:1; ///< whether finalizeOrThrow() has scanned the slot
90 uint8_t freed
:1; ///< whether the slot was given to the map as free space
93 /// smart StoreEntry-level info pointer (hides anti-padding LoadingParts arrays)
97 LoadingEntry(const sfileno fileNo
, LoadingParts
&source
);
99 uint64_t &size
; ///< payload seen so far
100 uint32_t &version
; ///< DbCellHeader::version to distinguish same-URL chains
102 /// possible store entry states during index rebuild
103 typedef enum { leEmpty
= 0, leLoading
, leLoaded
, leCorrupted
, leIgnored
} State
;
105 /* LoadingFlags::state */
106 State
state() const { return static_cast<State
>(flags
.state
); }
107 void state(State aState
) const { flags
.state
= aState
; }
109 /* LoadingFlags::anchored */
110 bool anchored() const { return flags
.anchored
; }
111 void anchored(const bool beAnchored
) { flags
.anchored
= beAnchored
; }
114 LoadingFlags
&flags
; ///< entry flags (see the above accessors) are ours
117 /// smart db slot-level info pointer (hides anti-padding LoadingParts arrays)
121 LoadingSlot(const SlotId slotId
, LoadingParts
&source
);
123 /// another slot in some chain belonging to the same entry (unordered!)
124 Ipc::StoreMapSliceId
&more
;
126 /* LoadingFlags::mapped */
127 bool mapped() const { return flags
.mapped
; }
128 void mapped(const bool beMapped
) { flags
.mapped
= beMapped
; }
130 /* LoadingFlags::finalized */
131 bool finalized() const { return flags
.finalized
; }
132 void finalized(const bool beFinalized
) { flags
.finalized
= beFinalized
; }
134 /* LoadingFlags::freed */
135 bool freed() const { return flags
.freed
; }
136 void freed(const bool beFreed
) { flags
.freed
= beFreed
; }
138 bool used() const { return freed() || mapped() || more
!= -1; }
141 LoadingFlags
&flags
; ///< slot flags (see the above accessors) are ours
144 /// information about store entries being loaded from disk (and their slots)
145 /// used for identifying partially stored/loaded entries
149 LoadingParts(int dbSlotLimit
, int dbEntryLimit
);
150 LoadingParts(LoadingParts
&&) = delete; // paranoid (often too huge to copy)
153 friend class LoadingEntry
;
154 friend class LoadingSlot
;
156 /* Anti-padding storage. With millions of entries, padding matters! */
158 /* indexed by sfileno */
159 std::vector
<uint64_t> sizes
; ///< LoadingEntry::size for all entries
160 std::vector
<uint32_t> versions
; ///< LoadingEntry::version for all entries
162 /* indexed by SlotId */
163 std::vector
<Ipc::StoreMapSliceId
> mores
; ///< LoadingSlot::more for all slots
165 /* entry flags are indexed by sfileno; slot flags -- by SlotId */
166 std::vector
<LoadingFlags
> flags
; ///< all LoadingEntry and LoadingSlot flags
169 } /* namespace Rock */
173 Rock::LoadingEntry::LoadingEntry(const sfileno fileNo
, LoadingParts
&source
):
174 size(source
.sizes
.at(fileNo
)),
175 version(source
.versions
.at(fileNo
)),
176 flags(source
.flags
.at(fileNo
))
182 Rock::LoadingSlot::LoadingSlot(const SlotId slotId
, LoadingParts
&source
):
183 more(source
.mores
.at(slotId
)),
184 flags(source
.flags
.at(slotId
))
190 Rock::LoadingParts::LoadingParts(const int dbEntryLimit
, const int dbSlotLimit
):
191 sizes(dbEntryLimit
, 0),
192 versions(dbEntryLimit
, 0),
193 mores(dbSlotLimit
, -1),
196 assert(sizes
.size() == versions
.size()); // every entry has both fields
197 assert(sizes
.size() <= mores
.size()); // every entry needs slot(s)
198 assert(mores
.size() == flags
.size()); // every slot needs a set of flags
203 Rock::Rebuild::Rebuild(SwapDir
*dir
): AsyncJob("Rock::Rebuild"),
216 memset(&counts
, 0, sizeof(counts
));
217 dbSize
= sd
->diskOffsetLimit(); // we do not care about the trailer waste
218 dbSlotSize
= sd
->slotSize
;
219 dbEntryLimit
= sd
->entryLimitActual();
220 dbSlotLimit
= sd
->slotLimitActual();
221 assert(dbEntryLimit
<= dbSlotLimit
);
224 Rock::Rebuild::~Rebuild()
231 /// prepares and initiates entry loading sequence
233 Rock::Rebuild::start()
235 // in SMP mode, only the disker is responsible for populating the map
236 if (UsingSmp() && !IamDiskProcess()) {
237 debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
238 sd
->index
<< " from " << sd
->filePath
);
239 mustStop("non-disker");
243 debugs(47, DBG_IMPORTANT
, "Loading cache_dir #" << sd
->index
<<
244 " from " << sd
->filePath
);
246 fd
= file_open(sd
->filePath
, O_RDONLY
| O_BINARY
);
248 failure("cannot open db", errno
);
250 char hdrBuf
[SwapDir::HeaderSize
];
251 if (read(fd
, hdrBuf
, sizeof(hdrBuf
)) != SwapDir::HeaderSize
)
252 failure("cannot read db header", errno
);
254 // slot prefix of SM_PAGE_SIZE should fit both core entry header and ours
255 assert(sizeof(DbCellHeader
) < SM_PAGE_SIZE
);
256 buf
.init(SM_PAGE_SIZE
, SM_PAGE_SIZE
);
258 dbOffset
= SwapDir::HeaderSize
;
260 parts
= new LoadingParts(dbEntryLimit
, dbSlotLimit
);
265 /// continues after a pause if not done
267 Rock::Rebuild::checkpoint()
270 eventAdd("Rock::Rebuild", Rock::Rebuild::Steps
, this, 0.01, 1, true);
274 Rock::Rebuild::doneLoading() const
276 return loadingPos
>= dbSlotLimit
;
280 Rock::Rebuild::doneValidating() const
282 // paranoid slot checking is only enabled with squid -S
283 return validationPos
>= dbEntryLimit
+
284 (opt_store_doublecheck
? dbSlotLimit
: 0);
288 Rock::Rebuild::doneAll() const
290 return doneLoading() && doneValidating() && AsyncJob::doneAll();
294 Rock::Rebuild::Steps(void *data
)
296 // use async call to enable job call protection that time events lack
297 CallJobHere(47, 5, static_cast<Rebuild
*>(data
), Rock::Rebuild
, steps
);
301 Rock::Rebuild::steps()
312 Rock::Rebuild::loadingSteps()
314 debugs(47,5, sd
->index
<< " slot " << loadingPos
<< " at " <<
315 dbOffset
<< " <= " << dbSize
);
317 // Balance our desire to maximize the number of entries processed at once
318 // (and, hence, minimize overheads and total rebuild time) with a
319 // requirement to also process Coordinator events, disk I/Os, etc.
320 const int maxSpentMsec
= 50; // keep small: most RAM I/Os are under 1ms
321 const timeval loopStart
= current_time
;
324 while (!doneLoading()) {
326 dbOffset
+= dbSlotSize
;
330 if (counts
.scancount
% 1000 == 0)
331 storeRebuildProgress(sd
->index
, dbSlotLimit
, counts
.scancount
);
333 if (opt_foreground_rebuild
)
334 continue; // skip "few entries at a time" check below
337 const double elapsedMsec
= tvSubMsec(loopStart
, current_time
);
338 if (elapsedMsec
> maxSpentMsec
|| elapsedMsec
< 0) {
339 debugs(47, 5, HERE
<< "pausing after " << loaded
<< " entries in " <<
340 elapsedMsec
<< "ms; " << (elapsedMsec
/loaded
) << "ms per entry");
347 Rock::Rebuild::loadingEntry(const sfileno fileNo
)
349 Must(0 <= fileNo
&& fileNo
< dbEntryLimit
);
350 return LoadingEntry(fileNo
, *parts
);
354 Rock::Rebuild::loadingSlot(const SlotId slotId
)
356 Must(0 <= slotId
&& slotId
< dbSlotLimit
);
357 Must(slotId
<= loadingPos
); // cannot look ahead
358 return LoadingSlot(slotId
, *parts
);
362 Rock::Rebuild::loadOneSlot()
364 debugs(47,5, sd
->index
<< " slot " << loadingPos
<< " at " <<
365 dbOffset
<< " <= " << dbSize
);
369 if (lseek(fd
, dbOffset
, SEEK_SET
) < 0)
370 failure("cannot seek to db entry", errno
);
374 if (!storeRebuildLoadEntry(fd
, sd
->index
, buf
, counts
))
377 const SlotId slotId
= loadingPos
;
381 if (buf
.contentSize() < static_cast<mb_size_t
>(sizeof(header
))) {
382 debugs(47, DBG_IMPORTANT
, "WARNING: cache_dir[" << sd
->index
<< "]: " <<
383 "Ignoring truncated " << buf
.contentSize() << "-byte " <<
384 "cache entry meta data at " << dbOffset
);
385 freeUnusedSlot(slotId
, true);
388 memcpy(&header
, buf
.content(), sizeof(header
));
389 if (header
.empty()) {
390 freeUnusedSlot(slotId
, false);
393 if (!header
.sane(dbSlotSize
, dbSlotLimit
)) {
394 debugs(47, DBG_IMPORTANT
, "WARNING: cache_dir[" << sd
->index
<< "]: " <<
395 "Ignoring malformed cache entry meta data at " << dbOffset
);
396 freeUnusedSlot(slotId
, true);
399 buf
.consume(sizeof(header
)); // optimize to avoid memmove()
401 useNewSlot(slotId
, header
);
404 /// parse StoreEntry basics and add them to the map, returning true on success
406 Rock::Rebuild::importEntry(Ipc::StoreMapAnchor
&anchor
, const sfileno fileno
, const DbCellHeader
&header
)
408 cache_key key
[SQUID_MD5_DIGEST_LENGTH
];
410 const uint64_t knownSize
= header
.entrySize
> 0 ?
411 header
.entrySize
: anchor
.basics
.swap_file_sz
.load();
412 if (!storeRebuildParseEntry(buf
, loadedE
, key
, counts
, knownSize
))
415 // the entry size may be unknown, but if it is known, it is authoritative
417 debugs(47, 8, "importing basics for entry " << fileno
<<
418 " inode.entrySize: " << header
.entrySize
<<
419 " swap_file_sz: " << loadedE
.swap_file_sz
);
422 // we have not validated whether all db cells for this entry were loaded
423 EBIT_CLR(anchor
.basics
.flags
, ENTRY_VALIDATED
);
431 Rock::Rebuild::validationSteps()
433 debugs(47, 5, sd
->index
<< " validating from " << validationPos
);
435 // see loadingSteps() for the rationale; TODO: avoid duplication
436 const int maxSpentMsec
= 50; // keep small: validation does not do I/O
437 const timeval loopStart
= current_time
;
440 while (!doneValidating()) {
441 if (validationPos
< dbEntryLimit
)
442 validateOneEntry(validationPos
);
444 validateOneSlot(validationPos
- dbEntryLimit
);
448 if (validationPos
% 1000 == 0)
449 debugs(20, 2, "validated: " << validationPos
);
451 if (opt_foreground_rebuild
)
452 continue; // skip "few entries at a time" check below
455 const double elapsedMsec
= tvSubMsec(loopStart
, current_time
);
456 if (elapsedMsec
> maxSpentMsec
|| elapsedMsec
< 0) {
457 debugs(47, 5, "pausing after " << validated
<< " entries in " <<
458 elapsedMsec
<< "ms; " << (elapsedMsec
/validated
) << "ms per entry");
464 /// Either make the entry accessible to all or throw.
465 /// This method assumes it is called only when no more entry slots are expected.
467 Rock::Rebuild::finalizeOrThrow(const sfileno fileNo
, LoadingEntry
&le
)
469 // walk all map-linked slots, starting from inode, and mark each
470 Ipc::StoreMapAnchor
&anchor
= sd
->map
->writeableEntry(fileNo
);
471 Must(le
.size
> 0); // paranoid
472 uint64_t mappedSize
= 0;
473 SlotId slotId
= anchor
.start
;
474 while (slotId
>= 0 && mappedSize
< le
.size
) {
475 LoadingSlot slot
= loadingSlot(slotId
); // throws if we have not loaded that slot
476 Must(!slot
.finalized()); // no loops or stealing from other entries
477 Must(slot
.mapped()); // all our slots should be in the sd->map
478 Must(!slot
.freed()); // all our slots should still be present
479 slot
.finalized(true);
481 Ipc::StoreMapSlice
&mapSlice
= sd
->map
->writeableSlice(fileNo
, slotId
);
482 Must(mapSlice
.size
> 0); // paranoid
483 mappedSize
+= mapSlice
.size
;
484 slotId
= mapSlice
.next
;
486 /* no hodgepodge entries: one entry - one full chain and no leftovers */
488 Must(mappedSize
== le
.size
);
490 if (!anchor
.basics
.swap_file_sz
)
491 anchor
.basics
.swap_file_sz
= le
.size
;
492 EBIT_SET(anchor
.basics
.flags
, ENTRY_VALIDATED
);
493 le
.state(LoadingEntry::leLoaded
);
494 sd
->map
->closeForWriting(fileNo
, false);
498 /// Either make the entry accessible to all or free it.
499 /// This method must only be called when no more entry slots are expected.
501 Rock::Rebuild::finalizeOrFree(const sfileno fileNo
, LoadingEntry
&le
)
504 finalizeOrThrow(fileNo
, le
);
505 } catch (const std::exception
&ex
) {
506 freeBadEntry(fileNo
, ex
.what());
511 Rock::Rebuild::validateOneEntry(const sfileno fileNo
)
513 LoadingEntry entry
= loadingEntry(fileNo
);
514 switch (entry
.state()) {
516 case LoadingEntry::leLoading
:
517 finalizeOrFree(fileNo
, entry
);
520 case LoadingEntry::leEmpty
: // no entry hashed to this position
521 case LoadingEntry::leLoaded
: // we have already unlocked this entry
522 case LoadingEntry::leCorrupted
: // we have already removed this entry
523 case LoadingEntry::leIgnored
: // we have already discarded this entry
529 Rock::Rebuild::validateOneSlot(const SlotId slotId
)
531 const LoadingSlot slot
= loadingSlot(slotId
);
532 // there should not be any unprocessed slots left
533 Must(slot
.freed() || (slot
.mapped() && slot
.finalized()));
536 /// Marks remaining bad entry slots as free and unlocks the entry. The map
537 /// cannot do this because Loading entries may have holes in the slots chain.
539 Rock::Rebuild::freeBadEntry(const sfileno fileno
, const char *eDescription
)
541 debugs(47, 2, "cache_dir #" << sd
->index
<< ' ' << eDescription
<<
542 " entry " << fileno
<< " is ignored during rebuild");
544 LoadingEntry le
= loadingEntry(fileno
);
545 le
.state(LoadingEntry::leCorrupted
);
547 Ipc::StoreMapAnchor
&anchor
= sd
->map
->writeableEntry(fileno
);
548 assert(anchor
.start
< 0 || le
.size
> 0);
549 for (SlotId slotId
= anchor
.start
; slotId
>= 0;) {
550 const SlotId next
= loadingSlot(slotId
).more
;
551 freeSlot(slotId
, true);
555 sd
->map
->forgetWritingEntry(fileno
);
559 Rock::Rebuild::swanSong()
561 debugs(47,3, HERE
<< "cache_dir #" << sd
->index
<< " rebuild level: " <<
562 StoreController::store_dirs_rebuilding
);
563 --StoreController::store_dirs_rebuilding
;
564 storeRebuildComplete(&counts
);
568 Rock::Rebuild::failure(const char *msg
, int errNo
)
570 debugs(47,5, sd
->index
<< " slot " << loadingPos
<< " at " <<
571 dbOffset
<< " <= " << dbSize
);
574 debugs(47, DBG_CRITICAL
, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo
));
575 debugs(47, DBG_CRITICAL
, "Do you need to run 'squid -z' to initialize storage?");
578 fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
579 sd
->index
, sd
->filePath
, msg
);
582 /// adds slot to the free slot index
584 Rock::Rebuild::freeSlot(const SlotId slotId
, const bool invalid
)
586 debugs(47,5, sd
->index
<< " frees slot " << slotId
);
587 LoadingSlot slot
= loadingSlot(slotId
);
588 assert(!slot
.freed());
593 //sd->unlink(fileno); leave garbage on disk, it should not hurt
596 Ipc::Mem::PageId pageId
;
597 pageId
.pool
= sd
->index
+1;
598 pageId
.number
= slotId
+1;
599 sd
->freeSlots
->push(pageId
);
602 /// freeSlot() for never-been-mapped slots
604 Rock::Rebuild::freeUnusedSlot(const SlotId slotId
, const bool invalid
)
606 LoadingSlot slot
= loadingSlot(slotId
);
607 // mapped slots must be freed via freeBadEntry() to keep the map in sync
608 assert(!slot
.mapped());
609 freeSlot(slotId
, invalid
);
612 /// adds slot to the entry chain in the map
614 Rock::Rebuild::mapSlot(const SlotId slotId
, const DbCellHeader
&header
)
616 LoadingSlot slot
= loadingSlot(slotId
);
617 assert(!slot
.mapped());
618 assert(!slot
.freed());
621 Ipc::StoreMapSlice slice
;
622 slice
.next
= header
.nextSlot
;
623 slice
.size
= header
.payloadSize
;
624 sd
->map
->importSlice(slotId
, slice
);
627 template <class SlotIdType
> // accommodates atomic and simple SlotIds.
629 Rock::Rebuild::chainSlots(SlotIdType
&from
, const SlotId to
)
631 LoadingSlot slot
= loadingSlot(to
);
632 assert(slot
.more
< 0);
633 slot
.more
= from
; // may still be unset
637 /// adds slot to an existing entry chain; caller must check that the slot
638 /// belongs to the chain it is being added to
640 Rock::Rebuild::addSlotToEntry(const sfileno fileno
, const SlotId slotId
, const DbCellHeader
&header
)
642 LoadingEntry le
= loadingEntry(fileno
);
643 Ipc::StoreMapAnchor
&anchor
= sd
->map
->writeableEntry(fileno
);
645 debugs(47,9, "adding " << slotId
<< " to entry " << fileno
);
646 // we do not need to preserve the order
648 LoadingSlot inode
= loadingSlot(anchor
.start
);
649 chainSlots(inode
.more
, slotId
);
651 chainSlots(anchor
.start
, slotId
);
654 le
.size
+= header
.payloadSize
; // must precede freeBadEntry() calls
656 if (header
.firstSlot
== slotId
) {
657 debugs(47,5, "added inode");
659 if (le
.anchored()) { // we have already added another inode slot
660 freeBadEntry(fileno
, "inode conflict");
667 if (!importEntry(anchor
, fileno
, header
)) {
668 freeBadEntry(fileno
, "corrupted metainfo");
672 // set total entry size and/or check it for consistency
673 if (const uint64_t totalSize
= header
.entrySize
) {
674 assert(totalSize
!= static_cast<uint64_t>(-1));
675 if (!anchor
.basics
.swap_file_sz
) {
676 anchor
.basics
.swap_file_sz
= totalSize
;
677 assert(anchor
.basics
.swap_file_sz
!= static_cast<uint64_t>(-1));
678 } else if (totalSize
!= anchor
.basics
.swap_file_sz
) {
679 freeBadEntry(fileno
, "size mismatch");
685 const uint64_t totalSize
= anchor
.basics
.swap_file_sz
; // may be 0/unknown
687 if (totalSize
> 0 && le
.size
> totalSize
) { // overflow
688 debugs(47, 8, "overflow: " << le
.size
<< " > " << totalSize
);
689 freeBadEntry(fileno
, "overflowing");
693 mapSlot(slotId
, header
);
694 if (totalSize
> 0 && le
.size
== totalSize
)
695 finalizeOrFree(fileno
, le
); // entry is probably fully loaded now
698 /// initialize housekeeping information for a newly accepted entry
700 Rock::Rebuild::primeNewEntry(Ipc::StoreMap::Anchor
&anchor
, const sfileno fileno
, const DbCellHeader
&header
)
702 anchor
.setKey(reinterpret_cast<const cache_key
*>(header
.key
));
703 assert(header
.firstSlot
>= 0);
704 anchor
.start
= -1; // addSlotToEntry() will set it
706 assert(anchor
.basics
.swap_file_sz
!= static_cast<uint64_t>(-1));
708 LoadingEntry le
= loadingEntry(fileno
);
709 le
.state(LoadingEntry::leLoading
);
710 le
.version
= header
.version
;
714 /// handle a slot from an entry that we have not seen before
716 Rock::Rebuild::startNewEntry(const sfileno fileno
, const SlotId slotId
, const DbCellHeader
&header
)
718 // A miss may have been stored at our fileno while we were loading other
719 // slots from disk. We ought to preserve that entry because it is fresher.
720 const bool overwriteExisting
= false;
721 if (Ipc::StoreMap::Anchor
*anchor
= sd
->map
->openForWritingAt(fileno
, overwriteExisting
)) {
722 primeNewEntry(*anchor
, fileno
, header
);
723 addSlotToEntry(fileno
, slotId
, header
); // may fail
724 assert(anchor
->basics
.swap_file_sz
!= static_cast<uint64_t>(-1));
726 // A new from-network entry is occupying our map slot; let it be, but
727 // save us from the trouble of going through the above motions again.
728 LoadingEntry le
= loadingEntry(fileno
);
729 le
.state(LoadingEntry::leIgnored
);
730 freeUnusedSlot(slotId
, false);
734 /// does the header belong to the fileno entry being loaded?
736 Rock::Rebuild::sameEntry(const sfileno fileno
, const DbCellHeader
&header
) const
738 // Header updates always result in multi-start chains and often
739 // result in multi-version chains so we can only compare the keys.
740 const Ipc::StoreMap::Anchor
&anchor
= sd
->map
->writeableEntry(fileno
);
741 return anchor
.sameKey(reinterpret_cast<const cache_key
*>(header
.key
));
744 /// handle freshly loaded (and validated) db slot header
746 Rock::Rebuild::useNewSlot(const SlotId slotId
, const DbCellHeader
&header
)
748 const cache_key
*const key
=
749 reinterpret_cast<const cache_key
*>(header
.key
);
750 const sfileno fileno
= sd
->map
->fileNoByKey(key
);
751 assert(0 <= fileno
&& fileno
< dbEntryLimit
);
753 LoadingEntry le
= loadingEntry(fileno
);
754 debugs(47,9, "entry " << fileno
<< " state: " << le
.state() << ", inode: " <<
755 header
.firstSlot
<< ", size: " << header
.payloadSize
);
757 switch (le
.state()) {
759 case LoadingEntry::leEmpty
: {
760 startNewEntry(fileno
, slotId
, header
);
764 case LoadingEntry::leLoading
: {
765 if (sameEntry(fileno
, header
)) {
766 addSlotToEntry(fileno
, slotId
, header
); // may fail
768 // either the loading chain or this slot is stale;
769 // be conservative and ignore both (and any future ones)
770 freeBadEntry(fileno
, "duplicated");
771 freeUnusedSlot(slotId
, true);
777 case LoadingEntry::leLoaded
: {
778 // either the previously loaded chain or this slot is stale;
779 // be conservative and ignore both (and any future ones)
780 le
.state(LoadingEntry::leCorrupted
);
781 sd
->map
->freeEntry(fileno
); // may not be immediately successful
782 freeUnusedSlot(slotId
, true);
787 case LoadingEntry::leCorrupted
: {
788 // previously seen slots messed things up so we must ignore this one
789 freeUnusedSlot(slotId
, true);
793 case LoadingEntry::leIgnored
: {
794 // already replaced by a fresher or colliding from-network entry
795 freeUnusedSlot(slotId
, false);