/* migrated from store_dir.cc */
bool objectSizeIsAcceptable(int64_t objsize) const;
+ /// called when the entry is about to forget its association with cache_dir
+ virtual void disconnect(StoreEntry &) {}
+
protected:
void parseOptions(int reconfiguring);
void dumpOptions(StoreEntry * e) const;
s.switchExclusiveToSharedLock();
}
+/// terminate writing the entry, freeing its slot for others to use
void
Rock::DirMap::abortWriting(const sfileno fileno)
{
freeLocked(s);
}
+void
+Rock::DirMap::abortIo(const sfileno fileno)
+{
+ debugs(79, 5, HERE << " abort I/O for slot at " << fileno <<
+ " in map [" << path << ']');
+ assert(valid(fileno));
+ Slot &s = shared->slots[fileno];
+
+ // The caller is a lock holder. Thus, if we are Writeable, then the
+ // caller must be the writer; otherwise the caller must be the reader.
+ if (s.state == Slot::Writeable)
+ abortWriting(fileno);
+ else
+ closeForReading(fileno);
+}
+
bool
Rock::DirMap::putAt(const StoreEntry &e, const sfileno fileno)
{
"map [" << path << ']');
assert(valid(fileno));
Slot &s = shared->slots[fileno];
+ assert(s.state == Slot::Readable);
s.releaseSharedLock();
freeIfNeeded(s);
}
StoreEntryBasics *openForWriting(const cache_key *const key, sfileno &fileno);
/// successfully finish writing the entry, leaving it opened for reading
void closeForWriting(const sfileno fileno);
- /// terminate writing the entry, freeing its slot for others to use
- void abortWriting(const sfileno fileno);
/// stores entry info at the requested slot or returns false
bool putAt(const StoreEntry &e, const sfileno fileno);
/// close slot after reading, decrements read level
void closeForReading(const sfileno fileno);
+ /// called by lock holder to terminate either slot writing or reading
+ void abortIo(const sfileno fileno);
+
bool full() const; ///< there are no empty slots left
bool valid(int n) const; ///< whether n is a valid slot coordinate
int entryCount() const; ///< number of used slots
int slotIdx(const cache_key *const key) const;
Slot &slot(const cache_key *const key);
const StoreEntryBasics *openForReading(Slot &s);
+ void abortWriting(const sfileno fileno);
void freeIfNeeded(Slot &s);
void freeLocked(Slot &s);
String sharedMemoryName();
// the disk entry remains open for reading, protected from modifications
}
-void
-Rock::SwapDir::closeForReading(StoreEntry &e)
+void Rock::SwapDir::disconnect(StoreEntry &e)
{
- assert(index == e.swap_dirn);
+ assert(e.swap_dirn == index);
assert(e.swap_filen >= 0);
- map->closeForReading(e.swap_filen);
+ // cannot have SWAPOUT_NONE entry with swap_filen >= 0
+ assert(e.swap_status != SWAPOUT_NONE);
+
+ // do not rely on e.swap_status here because there is an async delay
+ // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
+
+ // since e has swap_filen, its slot is locked for either reading or writing
+ map->abortIo(e.swap_filen);
e.swap_dirn = -1;
e.swap_filen = -1;
+ e.swap_status = SWAPOUT_NONE;
}
// TODO: encapsulate as a tool; identical to CossSwapDir::create()
assert(request->sio != NULL);
IoState &sio = *request->sio;
- if (errflag != DISK_OK)
- map->free(sio.swap_filen); // TODO: test by forcing failure
- // else sio.offset_ += rlen;
-
- map->closeForWriting(sio.swap_filen); // assume we only write once
+ if (errflag == DISK_OK) {
+ // close, assuming we only write once; the entry gets the read lock
+ map->closeForWriting(sio.swap_filen);
+ // and sio.offset_ += rlen;
+ } else {
+ // Do not abortWriting here. The entry should keep the write lock
+ // instead of losing association with the store and confusing core.
+ map->free(sio.swap_filen); // will mark as unusable, just in case
+ }
// TODO: always compute cur_size based on map, do not store it
cur_size = (HeaderSize + max_objsize * map->entryCount()) >> 10;
void
Rock::SwapDir::unlink(StoreEntry &e)
{
- debugs(47, 5, HERE << &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
+ debugs(47, 5, HERE << e);
ignoreReferences(e);
map->free(e.swap_filen);
+ disconnect(e);
}
void
Rock::SwapDir::trackReferences(StoreEntry &e)
{
- debugs(47, 5, HERE << *e);
+ debugs(47, 5, HERE << e);
repl->Add(repl, &e, &e.repl);
}
void
Rock::SwapDir::ignoreReferences(StoreEntry &e)
{
- debugs(47, 5, HERE << *e);
+ debugs(47, 5, HERE << e);
repl->Remove(repl, &e, &e.repl);
}
virtual void reconfigure(int, char *);
virtual StoreSearch *search(String const url, HttpRequest *);
virtual StoreEntry *get(const cache_key *key);
-
- void closeForReading(StoreEntry &e);
+ virtual void disconnect(StoreEntry &e);
protected:
/* protected ::SwapDir API */
#include "SquidTime.h"
#include "swap_log_op.h"
#include "mgr/StoreIoAction.h"
-#include "fs/rock/RockSwapDir.h"
static STMCB storeWriteComplete;
expires = lastmod = lastref = timestamp = -1;
+ swap_status = SWAPOUT_NONE;
swap_filen = -1;
swap_dirn = -1;
}
expires = lastmod = lastref = timestamp = -1;
+ swap_status = SWAPOUT_NONE;
swap_filen = -1;
swap_dirn = -1;
}
StoreEntry::~StoreEntry()
{
if (swap_filen >= 0) {
- // XXX: support cache types other than Rock
- Rock::SwapDir &rockSwapDir =
- dynamic_cast<Rock::SwapDir &>(*store());
- rockSwapDir.closeForReading(*this);
+ SwapDir &sd = dynamic_cast<SwapDir&>(*store());
+ sd.disconnect(*this);
}
}
e->store_status = STORE_PENDING;
e->setMemStatus(NOT_IN_MEMORY);
- e->swap_status = SWAPOUT_NONE;
- e->swap_filen = -1;
- e->swap_dirn = -1;
e->refcount = 0;
e->lastref = squid_curtime;
e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
storeLog(STORE_LOG_RELEASE, this);
if (swap_filen > -1) {
- unlink();
+ // update size before unlink() below clears swap_status
+ // TODO: the store/SwapDir::unlink should update the size!
if (swap_status == SWAPOUT_DONE)
if (EBIT_TEST(flags, ENTRY_VALIDATED))
store()->updateSize(swap_file_sz, -1);
+ // log before unlink() below clears swap_filen
if (!EBIT_TEST(flags, KEY_PRIVATE))
storeDirSwapLog(this, SWAP_LOG_DEL);
-#if 0
- /* From 2.4. I think we do this in storeUnlink? */
- storeSwapFileNumberSet(this, -1);
-
-#endif
-
+ unlink();
}
setMemStatus(NOT_IN_MEMORY);
void
StoreEntry::unlink()
{
- store()->unlink(*this);
+ store()->unlink(*this); // implies disconnect()
+ swap_filen = -1;
+ swap_dirn = -1;
+ swap_status = SWAPOUT_NONE;
}
/*
e->swap_dirn << " to " << sc->swapin_sio->swap_filen << "/" <<
sc->swapin_sio->swap_dirn);
+ assert(e->swap_filen < 0); // if this fails, call SwapDir::disconnect(e)
e->swap_filen = sc->swapin_sio->swap_filen;
e->swap_dirn = sc->swapin_sio->swap_dirn;
}
assert(mem);
assert(mem->swapout.sio == self);
assert(errflag == 0);
+ assert(e->swap_filen < 0); // if this fails, call SwapDir::disconnect(e)
e->swap_filen = mem->swapout.sio->swap_filen;
e->swap_dirn = mem->swapout.sio->swap_dirn;
}
if (e->swap_filen > 0)
e->unlink();
- e->swap_filen = -1;
-
- e->swap_dirn = -1;
-
- e->swap_status = SWAPOUT_NONE;
+ assert(e->swap_status == SWAPOUT_NONE);
e->releaseRequest();
} else {