public:
/* RegisteredRunner API */
virtual void run(const RunnerRegistry &);
- // TODO: cleanup in destructor
+ virtual ~MemStoreRr();
};
RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
if (IamMasterProcess())
MemStore::Init();
}
+
+MemStoreRr::~MemStoreRr()
+{
+ // XXX: restore if (!UsingSmp()) return;
+
+ if (IamMasterProcess())
+ MemStoreMap::Unlink(ShmLabel);
+}
debugs(54, 5, HERE << "attached map [" << path << "] created: " << shared->limit);
}
+void
+Ipc::StoreMap::Unlink(const char *const path)
+{
+ Mem::Segment::Unlink(path);
+}
+
Ipc::StoreMap::Slot *
Ipc::StoreMap::openForWriting(const cache_key *const key, sfileno &fileno)
{
StoreMap(const char *const aPath, const int limit, size_t sharedSizeExtra); ///< create a new shared StoreMap
explicit StoreMap(const char *const aPath); ///< open an existing shared StoreMap
+ static void Unlink(const char *const path); ///< unlink shared memory segment
/// finds, reservers space for writing a new entry or returns nil
Slot *openForWriting(const cache_key *const key, sfileno &fileno);
assert(shared);
}
+void
+Ipc::Mem::PagePool::Unlink(const String &id)
+{
+ PageStack::Unlink(PageIndexId(id));
+ Segment::Unlink(id.termedBuf());
+}
+
bool
Ipc::Mem::PagePool::get(PageId &page)
{
PagePool(const String &id, const unsigned int capacity, const size_t pageSize);
/// attaches to the identified shared page pool
PagePool(const String &id);
+ /// unlinks shared memory segments
+ static void Unlink(const String &id);
unsigned int capacity() const { return shared->theCapacity; }
/// lower bound for the number of free pages
assert(shared);
}
+void
+Ipc::Mem::PageStack::Unlink(const String &id)
+{
+ Segment::Unlink(id.termedBuf());
+}
+
/*
* TODO: We currently rely on the theLastReadable hint during each
* loop iteration. We could also use hint just for the start position:
PageStack(const String &id, const unsigned int capacity);
/// attaches to the identified shared stack
PageStack(const String &id);
+ /// unlinks shared memory segment
+ static void Unlink(const String &id);
/// lower bound for the number of free pages
unsigned int size() const { return max(0, shared->theSize.get()); }
public:
/* RegisteredRunner API */
virtual void run(const RunnerRegistry &);
- // TODO: cleanup in destructor
+ virtual ~SharedMemPagesRr();
};
RunnerRegistrationEntry(rrAfterConfig, SharedMemPagesRr);
else
Ipc::Mem::Attach();
}
+
+SharedMemPagesRr::~SharedMemPagesRr()
+{
+ delete ThePagePool;
+ if (IamMasterProcess())
+ Ipc::Mem::PagePool::Unlink(PagePoolId);
+}
/// attaches this kid to the already configured shared memory [pools]
void Attach();
-
/* Single page manipulation */
/// sets page ID and returns true unless no free pages are found
return result;
}
+void
+Ipc::Mem::Segment::Unlink(const char *const id)
+{
+ const String path = GenerateName(id);
+ if (shm_unlink(path.termedBuf()) != 0)
+ debugs(54, 5, HERE << "shm_unlink(" << path << "): " << xstrerror());
+}
+
/// determines the size of the underlying "file"
off_t
Ipc::Mem::Segment::statSize(const char *context) const
void *reserve(size_t chunkSize); ///< reserve and return the next chunk
// TODO: convert most mem() calls to reserve()
+ static void Unlink(const char *const id); ///< unlink the segment
+
private:
void attach();
void detach();
debugs(1,2, HERE << "Doing post-config initialization\n");
ActivateRegistered(rrAfterConfig);
- // TODO: find a place to call DeactivateRegistered(rrAfterConfig);
if (!opt_no_daemon && Config.workers > 0)
watch_child(argv);
#endif
if (!TheKids.someRunning() && !TheKids.shouldRestartSome()) {
+ DeactivateRegistered(rrAfterConfig);
+
if (TheKids.someSignaled(SIGINT) || TheKids.someSignaled(SIGTERM)) {
syslog(LOG_ALERT, "Exiting due to unexpected forced shutdown");
exit(1);
Store::Root().sync(); /* Flush log close */
StoreFileSystem::FreeAllFs();
DiskIOModule::FreeAllModules();
+ DeactivateRegistered(rrAfterConfig);
#if LEAK_CHECK_MODE && 0 /* doesn't work at the moment */
configFreeMemory();