]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
Support selective CF: collapsed_forwarding_access (#151)
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba 1/*
5b74111a 2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
9a9954ba 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9a9954ba
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
9a9954ba
AR
11#include "squid.h"
12#include "base/RunnersRegistry.h"
e4d13993 13#include "CollapsedForwarding.h"
9a9954ba
AR
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
9a9954ba
AR
18#include "mime_header.h"
19#include "SquidConfig.h"
20#include "SquidMath.h"
21#include "StoreStats.h"
22#include "tools.h"
e4d13993 23#include "Transients.h"
9a9954ba 24
9a9954ba 25#include <limits>
9a9954ba 26
1860fbac
AR
27/// shared memory segment path to use for Transients map
28static const SBuf MapLabel("transients_map");
9a9954ba 29
6919be24 30Transients::Transients(): map(NULL), locals(NULL)
9a9954ba 31{
9a9954ba
AR
32}
33
34Transients::~Transients()
35{
36 delete map;
6919be24 37 delete locals;
9a9954ba
AR
38}
39
40void
41Transients::init()
42{
43 const int64_t entryLimit = EntryLimit();
44 if (entryLimit <= 0)
45 return; // no SMP support or a misconfiguration
46
47 Must(!map);
48 map = new TransientsMap(MapLabel);
49 map->cleaner = this;
6919be24 50
8bcca0f8 51 locals = new Locals(entryLimit, 0);
9a9954ba
AR
52}
53
54void
55Transients::getStats(StoreInfoStats &stats) const
56{
57#if TRANSIENT_STATS_SUPPORTED
58 const size_t pageSize = Ipc::Mem::PageSize();
59
60 stats.mem.shared = true;
61 stats.mem.capacity =
62 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
63 stats.mem.size =
64 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
65 stats.mem.count = currentCount();
66#endif
67}
68
69void
70Transients::stat(StoreEntry &e) const
71{
72 storeAppendPrintf(&e, "\n\nTransient Objects\n");
73
74 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
75 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
76 currentSize() / 1024.0,
77 Math::doublePercent(currentSize(), maxSize()));
78
79 if (map) {
80 const int limit = map->entryLimit();
81 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
82 if (limit > 0) {
83 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
84 currentCount(), (100.0 * currentCount() / limit));
85 }
86 }
87}
88
89void
90Transients::maintain()
91{
e4d13993 92 // no lazy garbage collection needed
9a9954ba
AR
93}
94
95uint64_t
96Transients::minSize() const
97{
98 return 0; // XXX: irrelevant, but Store parent forces us to implement this
99}
100
101uint64_t
102Transients::maxSize() const
103{
104 // Squid currently does not limit the total size of all transient objects
105 return std::numeric_limits<uint64_t>::max();
106}
107
108uint64_t
109Transients::currentSize() const
110{
111 // TODO: we do not get enough information to calculate this
112 // StoreEntry should update associated stores when its size changes
113 return 0;
114}
115
116uint64_t
117Transients::currentCount() const
118{
119 return map ? map->entryCount() : 0;
120}
121
122int64_t
123Transients::maxObjectSize() const
124{
125 // Squid currently does not limit the size of a transient object
126 return std::numeric_limits<uint64_t>::max();
127}
128
129void
130Transients::reference(StoreEntry &)
131{
e4d13993 132 // no replacement policy (but the cache(s) storing the entry may have one)
9a9954ba
AR
133}
134
135bool
2745fea5 136Transients::dereference(StoreEntry &)
9a9954ba
AR
137{
138 // no need to keep e in the global store_table for us; we have our own map
139 return false;
140}
141
9a9954ba
AR
142StoreEntry *
143Transients::get(const cache_key *key)
144{
145 if (!map)
146 return NULL;
147
148 sfileno index;
1bfe9ade
AR
149 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
150 if (!anchor)
9a9954ba
AR
151 return NULL;
152
6919be24
AR
153 // If we already have a local entry, the store_table should have found it.
154 // Since it did not, the local entry key must have changed from public to
155 // private. We still need to keep the private entry around for syncing as
156 // its clients depend on it, but we should not allow new clients to join.
157 if (StoreEntry *oldE = locals->at(index)) {
158 debugs(20, 3, "not joining private " << *oldE);
159 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
4310f8b0
EB
160 map->closeForReading(index);
161 return nullptr;
1bfe9ade 162 }
4475555f 163
4310f8b0
EB
164 StoreEntry *e = new StoreEntry();
165 e->createMemObject();
4475555f 166 e->mem_obj->xitTable.index = index;
4310f8b0
EB
167 e->mem_obj->xitTable.io = Store::ioReading;
168 anchor->exportInto(*e);
169 // keep read lock to receive updates from others
9a9954ba
AR
170 return e;
171}
172
6919be24
AR
173StoreEntry *
174Transients::findCollapsed(const sfileno index)
175{
176 if (!map)
177 return NULL;
178
179 if (StoreEntry *oldE = locals->at(index)) {
180 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
181 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
182 return oldE;
183 }
184
185 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
186 return NULL;
187}
188
9a9954ba 189void
4310f8b0 190Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 191{
4310f8b0 192 assert(direction == Store::ioReading || direction == Store::ioWriting);
9a9954ba 193
4310f8b0
EB
194 if (!e->hasTransients()) {
195 addEntry(e, key, direction);
196 e->mem_obj->xitTable.io = direction;
9d4e9cfb 197 }
9a9954ba 198
4310f8b0
EB
199 assert(e->hasTransients());
200 const auto index = e->mem_obj->xitTable.index;
201 if (const auto old = locals->at(index)) {
202 assert(old == e);
203 } else {
204 // We do not lock e because we do not want to prevent its destruction;
205 // e is tied to us via mem_obj so we will know when it is destructed.
206 locals->at(index) = e;
9d4e9cfb 207 }
9a9954ba
AR
208}
209
4310f8b0
EB
210/// creates a new Transients entry or throws
211void
212Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 213{
4310f8b0
EB
214 assert(e);
215 assert(e->mem_obj);
216 assert(!e->hasTransients());
9a9954ba 217
4310f8b0 218 Must(map); // configured to track transients
9a9954ba 219
4310f8b0
EB
220 sfileno index = 0;
221 Ipc::StoreMapAnchor *slot = map->openForWriting(key, index);
222 Must(slot); // no writer collisions
9a9954ba 223
4310f8b0
EB
224 slot->set(*e, key);
225 e->mem_obj->xitTable.index = index;
226 if (direction == Store::ioWriting) {
227 // keep write lock; the caller will decide what to do with it
228 map->startAppending(e->mem_obj->xitTable.index);
229 } else {
230 // keep the entry locked (for reading) to receive remote DELETE events
231 map->closeForWriting(e->mem_obj->xitTable.index);
232 }
9a9954ba
AR
233}
234
235void
ced8def3 236Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
9a9954ba
AR
237{
238 // TODO: we should probably find the entry being deleted and abort it
239}
240
4475555f 241void
4310f8b0 242Transients::status(const StoreEntry &entry, bool &aborted, bool &waitingToBeFreed) const
4475555f
AR
243{
244 assert(map);
4310f8b0
EB
245 assert(entry.hasTransients());
246 const auto idx = entry.mem_obj->xitTable.index;
247 const auto &anchor = isWriter(entry) ?
248 map->writeableEntry(idx) : map->readableEntry(idx);
249 aborted = anchor.writerHalted;
250 waitingToBeFreed = anchor.waitingToBeFreed;
4475555f
AR
251}
252
99921d9d
AR
253void
254Transients::completeWriting(const StoreEntry &e)
255{
4310f8b0
EB
256 assert(e.hasTransients());
257 assert(isWriter(e));
258 map->closeForWriting(e.mem_obj->xitTable.index, true);
259 e.mem_obj->xitTable.io = Store::ioReading;
99921d9d
AR
260}
261
d366a7fa
AR
262int
263Transients::readers(const StoreEntry &e) const
264{
4310f8b0 265 if (e.hasTransients()) {
d366a7fa
AR
266 assert(map);
267 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
268 }
269 return 0;
270}
271
1bfe9ade 272void
4310f8b0
EB
273Transients::evictCached(StoreEntry &e)
274{
275 debugs(20, 5, e);
276 if (e.hasTransients()) {
277 const auto index = e.mem_obj->xitTable.index;
278 if (map->freeEntry(index)) {
279 // Delay syncCollapsed(index) which may end `e` wait for updates.
280 // Calling it directly/here creates complex reentrant call chains.
281 CollapsedForwarding::Broadcast(e, true);
282 }
283 } // else nothing to do because e must be private
2745fea5
AR
284}
285
286void
4310f8b0 287Transients::evictIfFound(const cache_key *key)
1bfe9ade 288{
4310f8b0
EB
289 if (!map)
290 return;
291
292 const sfileno index = map->fileNoByKey(key);
293 if (map->freeEntry(index))
294 CollapsedForwarding::Broadcast(index, true);
1bfe9ade
AR
295}
296
4475555f 297void
4310f8b0 298Transients::disconnect(StoreEntry &entry)
4475555f 299{
4310f8b0
EB
300 debugs(20, 5, entry);
301 if (entry.hasTransients()) {
302 auto &xitTable = entry.mem_obj->xitTable;
99921d9d 303 assert(map);
4310f8b0
EB
304 if (isWriter(entry)) {
305 map->abortWriting(xitTable.index);
99921d9d 306 } else {
4310f8b0
EB
307 assert(isReader(entry));
308 map->closeForReading(xitTable.index);
99921d9d 309 }
4310f8b0
EB
310 locals->at(xitTable.index) = nullptr;
311 xitTable.index = -1;
312 xitTable.io = Store::ioDone;
99921d9d 313 }
4475555f
AR
314}
315
9a9954ba
AR
316/// calculates maximum number of entries we need to store and map
317int64_t
318Transients::EntryLimit()
319{
320 // TODO: we should also check whether any SMP-aware caching is configured
321 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
322 return 0; // no SMP collapsed forwarding possible or needed
323
8f7dbf74 324 return Config.collapsed_forwarding_shared_entries_limit;
9a9954ba
AR
325}
326
4310f8b0
EB
327bool
328Transients::markedForDeletion(const cache_key *key) const
329{
330 assert(map);
331 return map->markedForDeletion(key);
332}
333
334bool
335Transients::isReader(const StoreEntry &e) const
336{
337 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading;
338}
339
340bool
341Transients::isWriter(const StoreEntry &e) const
342{
343 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting;
344}
345
9a9954ba
AR
346/// initializes shared memory segment used by Transients
347class TransientsRr: public Ipc::Mem::RegisteredRunner
348{
349public:
350 /* RegisteredRunner API */
21b7990f 351 virtual void useConfig();
9a9954ba
AR
352 virtual ~TransientsRr();
353
354protected:
21b7990f 355 virtual void create();
9a9954ba
AR
356
357private:
4310f8b0 358 TransientsMap::Owner *mapOwner = nullptr;
9a9954ba
AR
359};
360
21b7990f 361RunnerRegistrationEntry(TransientsRr);
9a9954ba 362
e4d13993 363void
21b7990f 364TransientsRr::useConfig()
9a9954ba
AR
365{
366 assert(Config.memShared.configured());
21b7990f 367 Ipc::Mem::RegisteredRunner::useConfig();
9a9954ba
AR
368}
369
e4d13993 370void
21b7990f 371TransientsRr::create()
9a9954ba 372{
9a9954ba
AR
373 if (!Config.onoff.collapsed_forwarding)
374 return;
375
376 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
377 if (entryLimit <= 0)
378 return; // no SMP configured or a misconfiguration
379
380 Must(!mapOwner);
381 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
382}
383
384TransientsRr::~TransientsRr()
385{
386 delete mapOwner;
387}
f53969cc 388