2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
29 /// shared memory segment path to use for Transients map extras
30 static const char *ExtrasLabel
= "transients_ex";
32 Transients::Transients(): map(NULL
), locals(NULL
)
36 Transients::~Transients()
45 const int64_t entryLimit
= EntryLimit();
47 return; // no SMP support or a misconfiguration
50 map
= new TransientsMap(MapLabel
);
53 extras
= shm_old(TransientsMapExtras
)(ExtrasLabel
);
55 locals
= new Locals(entryLimit
, 0);
59 Transients::getStats(StoreInfoStats
&stats
) const
61 #if TRANSIENT_STATS_SUPPORTED
62 const size_t pageSize
= Ipc::Mem::PageSize();
64 stats
.mem
.shared
= true;
66 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
68 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
69 stats
.mem
.count
= currentCount();
74 Transients::stat(StoreEntry
&e
) const
76 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
78 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
79 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
80 currentSize() / 1024.0,
81 Math::doublePercent(currentSize(), maxSize()));
84 const int limit
= map
->entryLimit();
85 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
87 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
88 currentCount(), (100.0 * currentCount() / limit
));
94 Transients::maintain()
96 // no lazy garbage collection needed
100 Transients::minSize() const
102 return 0; // XXX: irrelevant, but Store parent forces us to implement this
106 Transients::maxSize() const
108 // Squid currently does not limit the total size of all transient objects
109 return std::numeric_limits
<uint64_t>::max();
113 Transients::currentSize() const
115 // TODO: we do not get enough information to calculate this
116 // StoreEntry should update associated stores when its size changes
121 Transients::currentCount() const
123 return map
? map
->entryCount() : 0;
127 Transients::maxObjectSize() const
129 // Squid currently does not limit the size of a transient object
130 return std::numeric_limits
<uint64_t>::max();
134 Transients::reference(StoreEntry
&)
136 // no replacement policy (but the cache(s) storing the entry may have one)
140 Transients::dereference(StoreEntry
&)
142 // no need to keep e in the global store_table for us; we have our own map
147 Transients::get(const cache_key
*key
)
153 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
157 // If we already have a local entry, the store_table should have found it.
158 // Since it did not, the local entry key must have changed from public to
159 // private. We still need to keep the private entry around for syncing as
160 // its clients depend on it, but we should not allow new clients to join.
161 if (StoreEntry
*oldE
= locals
->at(index
)) {
162 debugs(20, 3, "not joining private " << *oldE
);
163 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
164 } else if (StoreEntry
*newE
= copyFromShm(index
)) {
165 return newE
; // keep read lock to receive updates from others
168 // private entry or loading failure
169 map
->closeForReading(index
);
174 Transients::copyFromShm(const sfileno index
)
176 const TransientsMapExtras::Item
&extra
= extras
->items
[index
];
178 // create a brand new store entry and initialize it with stored info
179 StoreEntry
*e
= storeCreatePureEntry(extra
.url
, extra
.url
,
180 extra
.reqFlags
, extra
.reqMethod
);
183 e
->mem_obj
->method
= extra
.reqMethod
;
184 e
->mem_obj
->xitTable
.io
= MemObject::ioReading
;
185 e
->mem_obj
->xitTable
.index
= index
;
187 // TODO: Support collapsed revalidation for SMP-aware caches.
188 e
->setPublicKey(ksDefault
);
191 // How do we know its SMP- and not just locally-collapsed? A worker gets
192 // locally-collapsed entries from the local store_table, not Transients.
193 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
194 e
->mem_obj
->smpCollapsed
= true;
196 assert(!locals
->at(index
));
197 // We do not lock e because we do not want to prevent its destruction;
198 // e is tied to us via mem_obj so we will know when it is destructed.
199 locals
->at(index
) = e
;
204 Transients::findCollapsed(const sfileno index
)
209 if (StoreEntry
*oldE
= locals
->at(index
)) {
210 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
211 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
215 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
220 Transients::startWriting(StoreEntry
*e
, const RequestFlags
&reqFlags
,
221 const HttpRequestMethod
&reqMethod
)
225 assert(e
->mem_obj
->xitTable
.index
< 0);
228 debugs(20, 5, "No map to add " << *e
);
233 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
->key
), index
);
235 debugs(20, 5, "collision registering " << *e
);
240 if (copyToShm(*e
, index
, reqFlags
, reqMethod
)) {
242 e
->mem_obj
->xitTable
.io
= MemObject::ioWriting
;
243 e
->mem_obj
->xitTable
.index
= index
;
244 map
->startAppending(index
);
245 // keep write lock -- we will be supplying others with updates
248 // fall through to the error handling code
249 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
250 debugs(20, 2, "error keeping entry " << index
<<
251 ' ' << *e
<< ": " << x
.what());
252 // fall through to the error handling code
255 map
->abortWriting(index
);
258 /// copies all relevant local data to shared memory
260 Transients::copyToShm(const StoreEntry
&e
, const sfileno index
,
261 const RequestFlags
&reqFlags
,
262 const HttpRequestMethod
&reqMethod
)
264 TransientsMapExtras::Item
&extra
= extras
->items
[index
];
266 const char *url
= e
.url();
267 const size_t urlLen
= strlen(url
);
268 Must(urlLen
< sizeof(extra
.url
)); // we have space to store it all, plus 0
269 strncpy(extra
.url
, url
, sizeof(extra
.url
));
270 extra
.url
[urlLen
] = '\0';
272 extra
.reqFlags
= reqFlags
;
274 Must(reqMethod
!= Http::METHOD_OTHER
);
275 extra
.reqMethod
= reqMethod
.id();
281 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
283 // TODO: we should probably find the entry being deleted and abort it
287 Transients::abandon(const StoreEntry
&e
)
289 assert(e
.mem_obj
&& map
);
290 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
291 CollapsedForwarding::Broadcast(e
);
292 // We do not unlock the entry now because the problem is most likely with
293 // the server resource rather than a specific cache writer, so we want to
294 // prevent other readers from collapsing requests for that resource.
298 Transients::abandoned(const StoreEntry
&e
) const
301 return abandonedAt(e
.mem_obj
->xitTable
.index
);
304 /// whether an in-transit entry at the index is now abandoned by its writer
306 Transients::abandonedAt(const sfileno index
) const
309 return map
->readableEntry(index
).waitingToBeFreed
;
313 Transients::completeWriting(const StoreEntry
&e
)
315 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
316 assert(e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
);
317 // there will be no more updates from us after this, so we must prevent
318 // future readers from joining
319 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
320 map
->closeForWriting(e
.mem_obj
->xitTable
.index
);
321 e
.mem_obj
->xitTable
.index
= -1;
322 e
.mem_obj
->xitTable
.io
= MemObject::ioDone
;
327 Transients::readers(const StoreEntry
&e
) const
329 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
331 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
337 Transients::markForUnlink(StoreEntry
&e
)
343 Transients::unlink(StoreEntry
&e
)
345 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
)
350 Transients::disconnect(MemObject
&mem_obj
)
352 if (mem_obj
.xitTable
.index
>= 0) {
354 if (mem_obj
.xitTable
.io
== MemObject::ioWriting
) {
355 map
->abortWriting(mem_obj
.xitTable
.index
);
357 assert(mem_obj
.xitTable
.io
== MemObject::ioReading
);
358 map
->closeForReading(mem_obj
.xitTable
.index
);
360 locals
->at(mem_obj
.xitTable
.index
) = NULL
;
361 mem_obj
.xitTable
.index
= -1;
362 mem_obj
.xitTable
.io
= MemObject::ioDone
;
366 /// calculates maximum number of entries we need to store and map
368 Transients::EntryLimit()
370 // TODO: we should also check whether any SMP-aware caching is configured
371 if (!UsingSmp() || !Config
.onoff
.collapsed_forwarding
)
372 return 0; // no SMP collapsed forwarding possible or needed
374 return Config
.collapsed_forwarding_shared_entries_limit
;
377 /// initializes shared memory segment used by Transients
378 class TransientsRr
: public Ipc::Mem::RegisteredRunner
381 /* RegisteredRunner API */
382 TransientsRr(): mapOwner(NULL
), extrasOwner(NULL
) {}
383 virtual void useConfig();
384 virtual ~TransientsRr();
387 virtual void create();
390 TransientsMap::Owner
*mapOwner
;
391 Ipc::Mem::Owner
<TransientsMapExtras
> *extrasOwner
;
394 RunnerRegistrationEntry(TransientsRr
);
397 TransientsRr::useConfig()
399 assert(Config
.memShared
.configured());
400 Ipc::Mem::RegisteredRunner::useConfig();
404 TransientsRr::create()
406 if (!Config
.onoff
.collapsed_forwarding
)
409 const int64_t entryLimit
= Transients::EntryLimit();
411 return; // no SMP configured or a misconfiguration
414 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
416 extrasOwner
= shm_new(TransientsMapExtras
)(ExtrasLabel
, entryLimit
);
419 TransientsRr::~TransientsRr()