/*
- * $Id: store_dir.cc,v 1.78 1998/09/18 15:14:42 wessels Exp $
+ * $Id$
*
* DEBUG: section 47 Store Directory Routines
* AUTHOR: Duane Wessels
*
- * SQUID Internet Object Cache http://squid.nlanr.net/Squid/
+ * SQUID Web Proxy Cache http://www.squid-cache.org/
* ----------------------------------------------------------
*
- * Squid is the result of efforts by numerous individuals from the
- * Internet community. Development is led by Duane Wessels of the
- * National Laboratory for Applied Network Research and funded by the
- * National Science Foundation. Squid is Copyrighted (C) 1998 by
- * Duane Wessels and the University of California San Diego. Please
- * see the COPYRIGHT file for full details. Squid incorporates
- * software developed and/or copyrighted by other sources. Please see
- * the CREDITS file for full details.
+ * Squid is the result of efforts by numerous individuals from
+ * the Internet community; see the CONTRIBUTORS file for full
+ * details. Many organizations have provided support for Squid's
+ * development; see the SPONSORS file for full details. Squid is
+ * Copyrighted (C) 2001 by the Regents of the University of
+ * California; see the COPYRIGHT file for full details. Squid
+ * incorporates software developed and/or copyrighted by other
+ * sources; see the CREDITS file for full details.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
*/
#include "squid.h"
+#include "Store.h"
+#include "MemObject.h"
+#include "SquidTime.h"
+#include "SwapDir.h"
+
+#if HAVE_STATVFS
+#if HAVE_SYS_STATVFS_H
+#include <sys/statvfs.h>
+#endif
+#endif /* HAVE_STATVFS */
+/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
+#if HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#if HAVE_SYS_MOUNT_H
+#include <sys/mount.h>
+#endif
+/* Windows and Linux use sys/vfs.h */
+#if HAVE_SYS_VFS_H
+#include <sys/vfs.h>
+#endif
-#define SWAP_DIR_SHIFT 24
-#define SWAP_FILE_MASK 0x00FFFFFF
-#define DefaultLevelOneDirs 16
-#define DefaultLevelTwoDirs 256
-
-static char *storeSwapSubDir(int dirn, int subdirn);
-static int storeDirSelectSwapDir(void);
-static int storeVerifyDirectory(const char *path);
-static int storeCreateDirectory(const char *path, int);
-static void storeCreateSwapSubDirs(int j);
-
-/* return full name to swapfile */
-char *
-storeSwapFullPath(int fn, char *fullpath)
-{
- LOCAL_ARRAY(char, fullfilename, SQUID_MAXPATHLEN);
- int dirn = (fn >> SWAP_DIR_SHIFT) % Config.cacheSwap.n_configured;
- int filn = fn & SWAP_FILE_MASK;
- int L1 = Config.cacheSwap.swapDirs[dirn].l1;
- int L2 = Config.cacheSwap.swapDirs[dirn].l2;
- if (!fullpath)
- fullpath = fullfilename;
- fullpath[0] = '\0';
- snprintf(fullpath, SQUID_MAXPATHLEN, "%s/%02X/%02X/%08X",
- Config.cacheSwap.swapDirs[dirn].path,
- ((filn / L2) / L2) % L1,
- (filn / L2) % L2,
- filn);
- return fullpath;
-}
-
-static char *
-storeSwapSubDir(int dirn, int subdirn)
-{
- LOCAL_ARRAY(char, fullfilename, SQUID_MAXPATHLEN);
- SwapDir *SD;
- assert(0 <= dirn && dirn < Config.cacheSwap.n_configured);
- SD = &Config.cacheSwap.swapDirs[dirn];
- assert(0 <= subdirn && subdirn < SD->l1);
- snprintf(fullfilename, SQUID_MAXPATHLEN, "%s/%02X",
- Config.cacheSwap.swapDirs[dirn].path,
- subdirn);
- return fullfilename;
-}
-
-char *
-storeSwapSubSubDir(int fn, char *fullpath)
-{
- LOCAL_ARRAY(char, fullfilename, SQUID_MAXPATHLEN);
- int dirn = (fn >> SWAP_DIR_SHIFT) % Config.cacheSwap.n_configured;
- int filn = fn & SWAP_FILE_MASK;
- int L1 = Config.cacheSwap.swapDirs[dirn].l1;
- int L2 = Config.cacheSwap.swapDirs[dirn].l2;
- if (!fullpath)
- fullpath = fullfilename;
- fullpath[0] = '\0';
- snprintf(fullpath, SQUID_MAXPATHLEN, "%s/%02X/%02X",
- Config.cacheSwap.swapDirs[dirn].path,
- ((filn / L2) / L2) % L1,
- (filn / L2) % L2);
- return fullpath;
-}
+#include "StoreHashIndex.h"
+
+static STDIRSELECT storeDirSelectSwapDirRoundRobin;
+static STDIRSELECT storeDirSelectSwapDirLeastLoad;
/*
- * Does swapfile number 'fn' belong in cachedir #F0,
- * level1 dir #F1, level2 dir #F2?
- *
- * This is called by storeDirClean(), but placed here because
- * the algorithm needs to match storeSwapSubSubDir().
- *
- * Don't check that (fn >> SWAP_DIR_SHIFT) == F0 because
- * 'fn' may not have the directory bits set.
+ * store_dirs_rebuilding is initialized to _1_ as a hack so that
+ * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
+ * cache_dirs have been read. For example, without this hack, Squid
+ * will try to write clean log files if -kparse fails (becasue it
+ * calls fatal()).
*/
-int
-storeFilenoBelongsHere(int fn, int F0, int F1, int F2)
-{
- int D1, D2;
- int L1, L2;
- int filn = fn & SWAP_FILE_MASK;
- assert(F0 < Config.cacheSwap.n_configured);
- L1 = Config.cacheSwap.swapDirs[F0].l1;
- L2 = Config.cacheSwap.swapDirs[F0].l2;
- D1 = ((filn / L2) / L2) % L1;
- if (F1 != D1)
- return 0;
- D2 = (filn / L2) % L2;
- if (F2 != D2)
- return 0;
- return 1;
-}
+int StoreController::store_dirs_rebuilding = 1;
-static int
-storeCreateDirectory(const char *path, int should_exist)
-{
- int created = 0;
- struct stat st;
- getCurrentTime();
- if (0 == stat(path, &st)) {
- if (S_ISDIR(st.st_mode)) {
- debug(20, should_exist ? 3 : 1) ("%s exists\n", path);
- } else {
- fatalf("Swap directory %s is not a directory.", path);
- }
- } else if (0 == mkdir(path, 0755)) {
- debug(20, should_exist ? 1 : 3) ("%s created\n", path);
- created = 1;
- } else {
- fatalf("Failed to make swap directory %s: %s",
- path, xstrerror());
- }
- return created;
-}
+StoreController::StoreController() : swapDir (new StoreHashIndex())
+{}
-static int
-storeVerifyDirectory(const char *path)
-{
- struct stat sb;
- if (stat(path, &sb) < 0) {
- debug(20, 0) ("%s: %s\n", path, xstrerror());
- return -1;
- }
- if (S_ISDIR(sb.st_mode) == 0) {
- debug(20, 0) ("%s is not a directory\n", path);
- return -1;
- }
- return 0;
-}
+StoreController::~StoreController()
+{}
/*
- * This function is called by storeInit(). If this returns < 0,
- * then Squid exits, complains about swap directories not
- * existing, and instructs the admin to run 'squid -z'
+ * This function pointer is set according to 'store_dir_select_algorithm'
+ * in squid.conf.
*/
-int
-storeVerifyCacheDirs(void)
+STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
+
+void
+StoreController::init()
{
- int i;
- int j;
- const char *path;
- for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- path = Config.cacheSwap.swapDirs[i].path;
- if (storeVerifyDirectory(path) < 0)
- return -1;
- for (j = 0; j < Config.cacheSwap.swapDirs[i].l1; j++) {
- path = storeSwapSubDir(i, j);
- if (storeVerifyDirectory(path) < 0)
- return -1;
- }
+ swapDir->init();
+
+ if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
+ storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
+ debugs(47, 1, "Using Round Robin store dir selection");
+ } else {
+ storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
+ debugs(47, 1, "Using Least Load store dir selection");
}
- return 0;
}
void
-storeCreateSwapDirectories(void)
+StoreController::createOneStore(Store &aStore)
{
- int i;
- const char *path = NULL;
- for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- path = Config.cacheSwap.swapDirs[i].path;
- debug(47, 3) ("Creating swap space in %s\n", path);
- storeCreateDirectory(path, 0);
- storeCreateSwapSubDirs(i);
- }
+ /*
+ * On Windows, fork() is not available.
+ * The following is a workaround for create store directories sequentially
+ * when running on native Windows port.
+ */
+#ifndef _SQUID_MSWIN_
+
+ if (fork())
+ return;
+
+#endif
+
+ aStore.create();
+
+#ifndef _SQUID_MSWIN_
+
+ exit(0);
+
+#endif
}
-static void
-storeCreateSwapSubDirs(int j)
-{
- int i, k;
- int should_exist;
- SwapDir *SD = &Config.cacheSwap.swapDirs[j];
- LOCAL_ARRAY(char, name, MAXPATHLEN);
- for (i = 0; i < SD->l1; i++) {
- snprintf(name, MAXPATHLEN, "%s/%02X", SD->path, i);
- if (storeCreateDirectory(name, 0))
- should_exist = 0;
- else
- should_exist = 1;
- debug(47, 1) ("Making directories in %s\n", name);
- for (k = 0; k < SD->l2; k++) {
- snprintf(name, MAXPATHLEN, "%s/%02X/%02X", SD->path, i, k);
- storeCreateDirectory(name, should_exist);
- }
- }
+void
+StoreController::create()
+{
+ swapDir->create();
+
+#ifndef _SQUID_MSWIN_
+
+ pid_t pid;
+
+ do {
+ int status;
+#ifdef _SQUID_NEXT_
+
+ pid = wait3(&status, WNOHANG, NULL);
+#else
+
+ pid = waitpid(-1, &status, 0);
+#endif
+
+ } while (pid > 0 || (pid < 0 && errno == EINTR));
+
+#endif
}
/*
- *Spread load across least 3/4 of the store directories
+ * Determine whether the given directory can handle this object
+ * size
+ *
+ * Note: if the object size is -1, then the only swapdirs that
+ * will return true here are ones that have max_obj_size = -1,
+ * ie any-sized-object swapdirs. This is a good thing.
*/
-static int
-storeDirSelectSwapDir(void)
+bool
+SwapDir::objectSizeIsAcceptable(int64_t objsize) const
{
- double least_used = 1.0;
- int dirn;
- int i, j;
- SwapDir *SD;
- static int nleast = 0;
- static int nconf = 0;
- static int *dirq = NULL;
- static double *diru = NULL;
- /*
- * Handle simplest case of a single swap directory immediately
- */
- if (Config.cacheSwap.n_configured == 1)
- return 0;
- /*
- * Initialise dirq on the first call or on change of number of dirs
- */
- if (nconf != Config.cacheSwap.n_configured) {
- nconf = Config.cacheSwap.n_configured;
- nleast = (nconf * 3) / 4;
- safe_free(dirq);
- dirq = (int *) xmalloc(sizeof(int) * nleast);
- safe_free(diru);
- diru = (double *) xmalloc(sizeof(double) * nconf);
- for (j = 0; j < nleast; j++)
- dirq[j] = -1;
- }
/*
- * Scan for a non-negative dirn in the dirq array and return that one
+ * If the swapdir's max_obj_size is -1, then it definitely can
*/
- dirn = -1;
- for (j = 0; j < nleast; j++) {
- dirn = dirq[j];
- if (dirn < 0)
- continue;
- dirq[j] = -1;
- break;
- }
+
+ if (max_objsize == -1)
+ return true;
+
/*
- * If we found a valid dirn return it
+ * If the object size is -1, then if the storedir isn't -1 we
+ * can't store it
*/
- if (dirn >= 0)
- return dirn;
+ if ((objsize == -1) && (max_objsize != -1))
+ return false;
+
/*
- * Now for the real guts of the algorithm - building the dirq array
+ * Else, make sure that the max object size is larger than objsize
*/
- for (i = 0; i < nconf; i++) {
- diru[i] = 1.1;
- SD = &Config.cacheSwap.swapDirs[i];
- if (SD->read_only)
- continue;
- diru[i] = (double) SD->cur_size;
- diru[i] /= SD->max_size;
+ return max_objsize > objsize;
+}
+
+
+/*
+ * This new selection scheme simply does round-robin on all SwapDirs.
+ * A SwapDir is skipped if it is over the max_size (100%) limit, or
+ * overloaded.
+ */
+static int
+storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
+{
+ static int dirn = 0;
+ int i;
+ int load;
+ RefCount<SwapDir> sd;
+
+ for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
+ if (++dirn >= Config.cacheSwap.n_configured)
+ dirn = 0;
+
+ sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
+
+ if (sd->flags.read_only)
+ continue;
+
+ if (sd->cur_size > sd->max_size)
+ continue;
+
+ if (!sd->objectSizeIsAcceptable(e->objectLen()))
+ continue;
+
+ /* check for error or overload condition */
+ load = sd->canStore(*e);
+
+ if (load < 0 || load > 1000) {
+ continue;
+ }
+
+ return dirn;
}
- for (j = 0; j < nleast; j++) {
- dirq[j] = -1;
- least_used = 1.0;
- dirn = -1;
- for (i = 0; i < nconf; i++) {
- if (diru[i] < least_used) {
- least_used = diru[i];
- dirn = i;
- }
- }
- if (dirn < 0)
- break;
- dirq[j] = dirn;
- diru[dirn] = 1.1;
+
+ return -1;
+}
+
+/*
+ * Spread load across all of the store directories
+ *
+ * Note: We should modify this later on to prefer sticking objects
+ * in the *tightest fit* swapdir to conserve space, along with the
+ * actual swapdir usage. But for now, this hack will do while
+ * testing, so you should order your swapdirs in the config file
+ * from smallest maxobjsize to unlimited (-1) maxobjsize.
+ *
+ * We also have to choose nleast == nconf since we need to consider
+ * ALL swapdirs, regardless of state. Again, this is a hack while
+ * we sort out the real usefulness of this algorithm.
+ */
+static int
+storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
+{
+ ssize_t objsize;
+ ssize_t most_free = 0, cur_free;
+ ssize_t least_objsize = -1;
+ int least_load = INT_MAX;
+ int load;
+ int dirn = -1;
+ int i;
+ RefCount<SwapDir> SD;
+
+ /* Calculate the object size */
+ objsize = e->objectLen();
+
+ if (objsize != -1)
+ objsize += e->mem_obj->swap_hdr_sz;
+
+ for (i = 0; i < Config.cacheSwap.n_configured; i++) {
+ SD = dynamic_cast<SwapDir *>(INDEXSD(i));
+ SD->flags.selected = 0;
+ load = SD->canStore(*e);
+
+ if (load < 0 || load > 1000) {
+ continue;
+ }
+
+ if (!SD->objectSizeIsAcceptable(objsize))
+ continue;
+
+ if (SD->flags.read_only)
+ continue;
+
+ if (SD->cur_size > SD->max_size)
+ continue;
+
+ if (load > least_load)
+ continue;
+
+ cur_free = SD->max_size - SD->cur_size;
+
+ /* If the load is equal, then look in more details */
+ if (load == least_load) {
+ /* closest max_objsize fit */
+
+ if (least_objsize != -1)
+ if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
+ continue;
+
+ /* most free */
+ if (cur_free < most_free)
+ continue;
+ }
+
+ least_load = load;
+ least_objsize = SD->max_objsize;
+ most_free = cur_free;
+ dirn = i;
}
- /*
- * Setup default return of 0 if no least found
- */
- if (dirq[0] < 0)
- dirq[0] = 0;
- dirn = dirq[0];
- dirq[0] = -1;
+
+ if (dirn >= 0)
+ dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
+
return dirn;
}
-int
-storeDirValidFileno(int fn)
+/*
+ * An entry written to the swap log MUST have the following
+ * properties.
+ * 1. It MUST be a public key. It does no good to log
+ * a public ADD, change the key, then log a private
+ * DEL. So we need to log a DEL before we change a
+ * key from public to private.
+ * 2. It MUST have a valid (> -1) swap_filen.
+ */
+void
+storeDirSwapLog(const StoreEntry * e, int op)
{
- int dirn = fn >> SWAP_DIR_SHIFT;
- int filn = fn & SWAP_FILE_MASK;
- if (dirn > Config.cacheSwap.n_configured)
- return 0;
- if (dirn < 0)
- return 0;
- if (filn < 0)
- return 0;
- if (filn > Config.cacheSwap.swapDirs[dirn].map->max_n_files)
- return 0;
- return 1;
+ assert (e);
+ assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
+ assert(e->swap_filen >= 0);
+ /*
+ * icons and such; don't write them to the swap log
+ */
+
+ if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
+ return;
+
+ assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
+
+ debugs(20, 3, "storeDirSwapLog: " <<
+ swap_log_op_str[op] << " " <<
+ e->getMD5Text() << " " <<
+ e->swap_dirn << " " <<
+ std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
+
+ dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
}
-int
-storeDirMapBitTest(int fn)
+void
+StoreController::updateSize(int64_t size, int sign)
{
- int dirn = fn >> SWAP_DIR_SHIFT;
- int filn = fn & SWAP_FILE_MASK;
- return file_map_bit_test(Config.cacheSwap.swapDirs[dirn].map, filn);
+ fatal("StoreController has no independent size\n");
}
void
-storeDirMapBitSet(int fn)
+SwapDir::updateSize(int64_t size, int sign)
{
- int dirn = fn >> SWAP_DIR_SHIFT;
- int filn = fn & SWAP_FILE_MASK;
- file_map_bit_set(Config.cacheSwap.swapDirs[dirn].map, filn);
+ int blks = (size + fs.blksize - 1) / fs.blksize;
+ int k = (blks * fs.blksize >> 10) * sign;
+ cur_size += k;
+ store_swap_size += k;
+
+ if (sign > 0)
+ n_disk_objects++;
+ else if (sign < 0)
+ n_disk_objects--;
}
void
-storeDirMapBitReset(int fn)
+StoreController::stat(StoreEntry &output) const
{
- int dirn = fn >> SWAP_DIR_SHIFT;
- int filn = fn & SWAP_FILE_MASK;
- file_map_bit_reset(Config.cacheSwap.swapDirs[dirn].map, filn);
+ storeAppendPrintf(&output, "Store Directory Statistics:\n");
+ storeAppendPrintf(&output, "Store Entries : %lu\n",
+ (unsigned long int)StoreEntry::inUseCount());
+ storeAppendPrintf(&output, "Maximum Swap Size : %8ld KB\n",
+ (long int) maxSize());
+ storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
+ store_swap_size);
+ storeAppendPrintf(&output, "Current Capacity : %d%% used, %d%% free\n",
+ percent((int) store_swap_size, (int) maxSize()),
+ percent((int) (maxSize() - store_swap_size), (int) maxSize()));
+ /* FIXME Here we should output memory statistics */
+
+ /* now the swapDir */
+ swapDir->stat(output);
}
-int
-storeDirMapAllocate(void)
+/* if needed, this could be taught to cache the result */
+size_t
+StoreController::maxSize() const
{
- int dirn = storeDirSelectSwapDir();
- SwapDir *SD = &Config.cacheSwap.swapDirs[dirn];
- int filn = file_map_allocate(SD->map, SD->suggest);
- SD->suggest = filn + 1;
- return (dirn << SWAP_DIR_SHIFT) | (filn & SWAP_FILE_MASK);
+ /* TODO: include memory cache ? */
+ return swapDir->maxSize();
}
-char *
-storeSwapDir(int dirn)
+size_t
+StoreController::minSize() const
{
- assert(0 <= dirn && dirn < Config.cacheSwap.n_configured);
- return Config.cacheSwap.swapDirs[dirn].path;
+ /* TODO: include memory cache ? */
+ return swapDir->minSize();
}
-int
-storeDirNumber(int swap_file_number)
+void
+SwapDir::diskFull()
+{
+ if (cur_size >= max_size)
+ return;
+
+ max_size = cur_size;
+
+ debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
+}
+
+void
+storeDirOpenSwapLogs(void)
{
- return swap_file_number >> SWAP_DIR_SHIFT;
+ for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
+ dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
}
-int
-storeDirProperFileno(int dirn, int fn)
+void
+storeDirCloseSwapLogs(void)
{
- return (dirn << SWAP_DIR_SHIFT) | (fn & SWAP_FILE_MASK);
+ for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
+ dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
}
/*
- * An entry written to the swap log MUST have the following
- * properties.
- * 1. It MUST be a public key. It does no good to log
- * a public ADD, change the key, then log a private
- * DEL. So we need to log a DEL before we change a
- * key from public to private.
- * 2. It MUST have a valid (> -1) swap_file_number.
+ * storeDirWriteCleanLogs
+ *
+ * Writes a "clean" swap log file from in-memory metadata.
+ * This is a rewrite of the original function to troll each
+ * StoreDir and write the logs, and flush at the end of
+ * the run. Thanks goes to Eric Stern, since this solution
+ * came out of his COSS code.
*/
-void
-storeDirSwapLog(const StoreEntry * e, int op)
+int
+storeDirWriteCleanLogs(int reopen)
{
- storeSwapLogData *s;
+ const StoreEntry *e = NULL;
+ int n = 0;
+
+ struct timeval start;
+ double dt;
+ RefCount<SwapDir> sd;
int dirn;
- dirn = e->swap_file_number >> SWAP_DIR_SHIFT;
- assert(dirn < Config.cacheSwap.n_configured);
- assert(!e->flags.key_private);
- assert(e->swap_file_number >= 0);
+ int notdone = 1;
+
+ if (StoreController::store_dirs_rebuilding) {
+ debugs(20, 1, "Not currently OK to rewrite swap log.");
+ debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
+ return 0;
+ }
+
+ debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
+ getCurrentTime();
+ start = current_time;
+
+ for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
+ sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
+
+ if (sd->writeCleanStart() < 0) {
+ debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
+ continue;
+ }
+ }
+
/*
- * icons and such; don't write them to the swap log
+ * This may look inefficient as CPU wise it is more efficient to do this
+ * sequentially, but I/O wise the parallellism helps as it allows more
+ * hdd spindles to be active.
*/
- if (e->flags.entry_special)
- return;
- assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
- debug(20, 3) ("storeDirSwapLog: %s %s %08X\n",
- swap_log_op_str[op],
- storeKeyText(e->key),
- e->swap_file_number);
- s = xcalloc(1, sizeof(storeSwapLogData));
- s->op = (char) op;
- s->swap_file_number = e->swap_file_number;
- s->timestamp = e->timestamp;
- s->lastref = e->lastref;
- s->expires = e->expires;
- s->lastmod = e->lastmod;
- s->swap_file_sz = e->swap_file_sz;
- s->refcount = e->refcount;
- s->flags = e->flags;
- xmemcpy(s->key, e->key, MD5_DIGEST_CHARS);
- file_write(Config.cacheSwap.swapDirs[dirn].swaplog_fd,
- -1,
- s,
- sizeof(storeSwapLogData),
- NULL,
- NULL,
- xfree);
-}
-
-char *
-storeDirSwapLogFile(int dirn, const char *ext)
-{
- LOCAL_ARRAY(char, path, SQUID_MAXPATHLEN);
- LOCAL_ARRAY(char, digit, 32);
- if (Config.Log.swap) {
- xstrncpy(path, Config.Log.swap, SQUID_MAXPATHLEN - 64);
- strcat(path, ".");
- snprintf(digit, 32, "%02d", dirn);
- strncat(path, digit, 3);
- } else {
- xstrncpy(path, storeSwapDir(dirn), SQUID_MAXPATHLEN - 64);
- strcat(path, "/swap.state");
+ while (notdone) {
+ notdone = 0;
+
+ for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
+ sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
+
+ if (NULL == sd->cleanLog)
+ continue;
+
+ e = sd->cleanLog->nextEntry();
+
+ if (!e)
+ continue;
+
+ notdone = 1;
+
+ if (!sd->canLog(*e))
+ continue;
+
+ sd->cleanLog->write(*e);
+
+ if ((++n & 0xFFFF) == 0) {
+ getCurrentTime();
+ debugs(20, 1, " " << std::setw(7) << n <<
+ " entries written so far.");
+ }
+ }
}
- if (ext)
- strncat(path, ext, 16);
- return path;
+
+ /* Flush */
+ for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
+ dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
+
+ if (reopen)
+ storeDirOpenSwapLogs();
+
+ getCurrentTime();
+
+ dt = tvSubDsec(start, current_time);
+
+ debugs(20, 1, " Finished. Wrote " << n << " entries.");
+ debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
+ " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
+
+
+ return n;
}
-void
-storeDirOpenSwapLogs(void)
+StoreSearch *
+StoreController::search(String const url, HttpRequest *request)
{
- int i;
- char *path;
- int fd;
- SwapDir *SD;
- for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- SD = &Config.cacheSwap.swapDirs[i];
- path = storeDirSwapLogFile(i, NULL);
- fd = file_open(path, O_WRONLY | O_CREAT, NULL, NULL, NULL);
- if (fd < 0) {
- debug(50, 1) ("%s: %s\n", path, xstrerror());
- fatal("storeDirOpenSwapLogs: Failed to open swap log.");
- }
- debug(47, 3) ("Cache Dir #%d log opened on FD %d\n", i, fd);
- SD->swaplog_fd = fd;
- }
+ /* cheat, for now you can't search the memory hot cache */
+ return swapDir->search(url, request);
+}
+
+StorePointer
+StoreHashIndex::store(int const x) const
+{
+ return INDEXSD(x);
}
void
-storeDirCloseSwapLogs(void)
+StoreController::sync(void)
{
- int i;
- SwapDir *SD;
- for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- SD = &Config.cacheSwap.swapDirs[i];
- if (SD->swaplog_fd < 0) /* not open */
- continue;
- file_close(SD->swaplog_fd);
- debug(47, 3) ("Cache Dir #%d log closed on FD %d\n", i, SD->swaplog_fd);
- SD->swaplog_fd = -1;
+ /* sync mem cache? */
+ swapDir->sync();
+}
+
+/*
+ * handle callbacks all avaliable fs'es
+ */
+int
+StoreController::callback()
+{
+ /* This will likely double count. Thats ok. */
+ PROF_start(storeDirCallback);
+
+ /* mem cache callbacks ? */
+ int result = swapDir->callback();
+
+ PROF_stop(storeDirCallback);
+
+ return result;
+}
+
+int
+storeDirGetBlkSize(const char *path, int *blksize)
+{
+#if HAVE_STATVFS
+
+ struct statvfs sfs;
+
+ if (statvfs(path, &sfs)) {
+ debugs(50, 1, "" << path << ": " << xstrerror());
+ *blksize = 2048;
+ return 1;
}
+
+ *blksize = (int) sfs.f_frsize;
+#else
+
+ struct statfs sfs;
+
+ if (statfs(path, &sfs)) {
+ debugs(50, 1, "" << path << ": " << xstrerror());
+ *blksize = 2048;
+ return 1;
+ }
+
+ *blksize = (int) sfs.f_bsize;
+#endif
+ /*
+ * Sanity check; make sure we have a meaningful value.
+ */
+
+ if (*blksize < 512)
+ *blksize = 2048;
+
+ return 0;
}
-FILE *
-storeDirOpenTmpSwapLog(int dirn, int *clean_flag, int *zero_flag)
-{
- char *swaplog_path = xstrdup(storeDirSwapLogFile(dirn, NULL));
- char *clean_path = xstrdup(storeDirSwapLogFile(dirn, ".last-clean"));
- char *new_path = xstrdup(storeDirSwapLogFile(dirn, ".new"));
- struct stat log_sb;
- struct stat clean_sb;
- SwapDir *SD = &Config.cacheSwap.swapDirs[dirn];
- FILE *fp;
- int fd;
- if (stat(swaplog_path, &log_sb) < 0) {
- debug(47, 1) ("Cache Dir #%d: No log file\n", dirn);
- safe_free(swaplog_path);
- safe_free(clean_path);
- safe_free(new_path);
- return NULL;
+#define fsbtoblk(num, fsbs, bs) \
+ (((fsbs) != 0 && (fsbs) < (bs)) ? \
+ (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
+int
+storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
+{
+#if HAVE_STATVFS
+
+ struct statvfs sfs;
+
+ if (statvfs(path, &sfs)) {
+ debugs(50, 1, "" << path << ": " << xstrerror());
+ return 1;
}
- *zero_flag = log_sb.st_size == 0 ? 1 : 0;
- /* close the existing write-only FD */
- if (SD->swaplog_fd >= 0)
- file_close(SD->swaplog_fd);
- /* open a write-only FD for the new log */
- fd = file_open(new_path, O_WRONLY | O_CREAT | O_TRUNC, NULL, NULL, NULL);
- if (fd < 0) {
- debug(50, 1) ("%s: %s\n", new_path, xstrerror());
- fatal("storeDirOpenTmpSwapLog: Failed to open swap log.");
+
+ *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
+ *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
+ *totl_in = (int) sfs.f_files;
+ *free_in = (int) sfs.f_ffree;
+#else
+
+ struct statfs sfs;
+
+ if (statfs(path, &sfs)) {
+ debugs(50, 1, "" << path << ": " << xstrerror());
+ return 1;
}
- SD->swaplog_fd = fd;
- /* open a read-only stream of the old log */
- fp = fopen(swaplog_path, "r");
- if (fp == NULL) {
- debug(50, 0) ("%s: %s\n", swaplog_path, xstrerror());
- fatal("Failed to open swap log for reading");
+
+ *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
+ *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
+ *totl_in = (int) sfs.f_files;
+ *free_in = (int) sfs.f_ffree;
+#endif
+
+ return 0;
+}
+
+void
+allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
+{
+ if (swap->swapDirs == NULL) {
+ swap->n_allocated = 4;
+ swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
+ }
+
+ if (swap->n_allocated == swap->n_configured) {
+ StorePointer *tmp;
+ swap->n_allocated <<= 1;
+ tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
+ xmemcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
+ xfree(swap->swapDirs);
+ swap->swapDirs = tmp;
}
- memset(&clean_sb, '\0', sizeof(struct stat));
- if (stat(clean_path, &clean_sb) < 0)
- *clean_flag = 0;
- else if (clean_sb.st_mtime < log_sb.st_mtime)
- *clean_flag = 0;
- else
- *clean_flag = 1;
- safeunlink(clean_path, 1);
- safe_free(swaplog_path);
- safe_free(clean_path);
- safe_free(new_path);
- return fp;
}
void
-storeDirCloseTmpSwapLog(int dirn)
-{
- char *swaplog_path = xstrdup(storeDirSwapLogFile(dirn, NULL));
- char *new_path = xstrdup(storeDirSwapLogFile(dirn, ".new"));
- SwapDir *SD = &Config.cacheSwap.swapDirs[dirn];
- int fd;
- file_close(SD->swaplog_fd);
- if (rename(new_path, swaplog_path) < 0) {
- debug(50, 0) ("%s,%s: %s\n", new_path, swaplog_path, xstrerror());
- fatal("storeDirCloseTmpSwapLog: rename failed");
+free_cachedir(SquidConfig::_cacheSwap * swap)
+{
+ int i;
+ /* DON'T FREE THESE FOR RECONFIGURE */
+
+ if (reconfiguring)
+ return;
+
+ for (i = 0; i < swap->n_configured; i++) {
+ /* TODO XXX this lets the swapdir free resources asynchronously
+ * swap->swapDirs[i]->deactivate();
+ * but there may be such a means already.
+ * RBC 20041225
+ */
+ swap->swapDirs[i] = NULL;
}
- fd = file_open(swaplog_path, O_WRONLY | O_CREAT, NULL, NULL, NULL);
- if (fd < 0) {
- debug(50, 1) ("%s: %s\n", swaplog_path, xstrerror());
- fatal("storeDirCloseTmpSwapLog: Failed to open swap log.");
+
+ safe_free(swap->swapDirs);
+ swap->swapDirs = NULL;
+ swap->n_allocated = 0;
+ swap->n_configured = 0;
+}
+
+/* this should be a virtual method on StoreEntry,
+ * i.e. e->referenced()
+ * so that the entry can notify the creating Store
+ */
+void
+StoreController::reference(StoreEntry &e)
+{
+ /* Notify the fs that we're referencing this object again */
+
+ if (e.swap_dirn > -1)
+ e.store()->reference(e);
+
+ /* Notify the memory cache that we're referencing this object again */
+ if (e.mem_obj) {
+ if (mem_policy->Referenced)
+ mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
}
- safe_free(swaplog_path);
- safe_free(new_path);
- SD->swaplog_fd = fd;
- debug(47, 3) ("Cache Dir #%d log opened on FD %d\n", dirn, fd);
}
void
-storeDirUpdateSwapSize(int fn, size_t size, int sign)
+StoreController::dereference(StoreEntry & e)
{
- int dirn = (fn >> SWAP_DIR_SHIFT) % Config.cacheSwap.n_configured;
- int k = ((size + 1023) >> 10) * sign;
- Config.cacheSwap.swapDirs[dirn].cur_size += k;
- store_swap_size += k;
- if (sign > 0)
- n_disk_objects++;
- else if (sign < 0)
- n_disk_objects--;
+ /* Notify the fs that we're not referencing this object any more */
+
+ if (e.swap_filen > -1)
+ e.store()->dereference(e);
+
+ /* Notify the memory cache that we're not referencing this object any more */
+ if (e.mem_obj) {
+ if (mem_policy->Dereferenced)
+ mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
+ }
+}
+
+StoreEntry *
+
+StoreController::get
+(const cache_key *key)
+{
+
+ return swapDir->get
+ (key);
}
void
-storeDirStats(StoreEntry * sentry)
+
+StoreController::get
+(String const key, STOREGETCLIENT callback, void *cbdata)
{
- int i;
- SwapDir *SD;
- storeAppendPrintf(sentry, "Store Directory Statistics:\n");
- storeAppendPrintf(sentry, "Store Entries : %d\n",
- memInUse(MEM_STOREENTRY));
- storeAppendPrintf(sentry, "Maximum Swap Size : %8d KB\n",
- Config.Swap.maxSize);
- storeAppendPrintf(sentry, "Current Store Swap Size: %8d KB\n",
- store_swap_size);
- storeAppendPrintf(sentry, "Current Capacity : %d%% used, %d%% free\n",
- percent((int) store_swap_size, (int) Config.Swap.maxSize),
- percent((int) (Config.Swap.maxSize - store_swap_size), (int) Config.Swap.maxSize));
- for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- SD = &Config.cacheSwap.swapDirs[i];
- storeAppendPrintf(sentry, "\n");
- storeAppendPrintf(sentry, "Store Directory #%d: %s\n", i, SD->path);
- storeAppendPrintf(sentry, "First level subdirectories: %d\n", SD->l1);
- storeAppendPrintf(sentry, "Second level subdirectories: %d\n", SD->l2);
- storeAppendPrintf(sentry, "Maximum Size: %d KB\n", SD->max_size);
- storeAppendPrintf(sentry, "Current Size: %d KB\n", SD->cur_size);
- storeAppendPrintf(sentry, "Percent Used: %0.2f%%\n",
- 100.0 * SD->cur_size / SD->max_size);
- storeAppendPrintf(sentry, "Filemap bits in use: %d of %d (%d%%)\n",
- SD->map->n_files_in_map, SD->map->max_n_files,
- percent(SD->map->n_files_in_map, SD->map->max_n_files));
+ fatal("not implemented");
+}
+
+StoreHashIndex::StoreHashIndex()
+{
+ if (store_table)
+ abort();
+ assert (store_table == NULL);
+}
+
+StoreHashIndex::~StoreHashIndex()
+{
+ if (store_table) {
+ hashFreeItems(store_table, destroyStoreEntry);
+ hashFreeMemory(store_table);
+ store_table = NULL;
}
}
int
-storeDirMapBitsInUse(void)
+StoreHashIndex::callback()
+{
+ int result = 0;
+ int j;
+ static int ndir = 0;
+
+ do {
+ j = 0;
+
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
+ if (ndir >= Config.cacheSwap.n_configured)
+ ndir = ndir % Config.cacheSwap.n_configured;
+
+ int temp_result = store(ndir)->callback();
+
+ ++ndir;
+
+ j += temp_result;
+
+ result += temp_result;
+
+ if (j > 100)
+ fatal ("too much io\n");
+ }
+ } while (j > 0);
+
+ ndir++;
+
+ return result;
+}
+
+void
+StoreHashIndex::create()
+{
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++)
+ store(i)->create();
+}
+
+/* Lookup an object in the cache.
+ * return just a reference to object, don't start swapping in yet. */
+StoreEntry *
+
+StoreHashIndex::get
+(const cache_key *key)
+{
+ PROF_start(storeGet);
+ debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
+ StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
+ PROF_stop(storeGet);
+ return p;
+}
+
+void
+
+StoreHashIndex::get
+(String const key, STOREGETCLIENT callback, void *cbdata)
+{
+ fatal("not implemented");
+}
+
+void
+StoreHashIndex::init()
+{
+ /* Calculate size of hash table (maximum currently 64k buckets). */
+ /* this is very bogus, its specific to the any Store maintaining an
+ * in-core index, not global */
+ size_t buckets = Store::Root().maxSize() / Config.Store.avgObjectSize;
+ debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
+ " KB, estimated " << buckets << " objects");
+ buckets /= Config.Store.objectsPerBucket;
+ debugs(20, 1, "Target number of buckets: " << buckets);
+ /* ideally the full scan period should be configurable, for the
+ * moment it remains at approximately 24 hours. */
+ store_hash_buckets = storeKeyHashBuckets(buckets);
+ debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
+ debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
+ debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
+
+ store_table = hash_create(storeKeyHashCmp,
+ store_hash_buckets, storeKeyHashHash);
+
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++)
+ /* this starts a search of the store dirs, loading their
+ * index. under the new Store api this should be
+ * driven by the StoreHashIndex, not by each store.
+ *
+ * That is, the HashIndex should perform a search of each dir it is
+ * indexing to do the hash insertions. The search is then able to
+ * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
+ * 'from-no-log'.
+ *
+ * Step 1: make the store rebuilds use a search internally
+ * Step 2: change the search logic to use the four modes described
+ * above
+ * Step 3: have the hash index walk the searches itself.
+ */
+ store(i)->init();
+
+}
+
+size_t
+StoreHashIndex::maxSize() const
{
int i;
- int n = 0;
+ size_t result = 0;
+
for (i = 0; i < Config.cacheSwap.n_configured; i++)
- n += Config.cacheSwap.swapDirs[i].map->n_files_in_map;
- return n;
+ result += store(i)->maxSize();
+
+ return result;
}
-/*
- * storeDirWriteCleanLogs
- *
- * Writes a "clean" swap log file from in-memory metadata.
- */
-#define CLEAN_BUF_SZ 16384
-int
-storeDirWriteCleanLogs(int reopen)
+size_t
+StoreHashIndex::minSize() const
{
- StoreEntry *e = NULL;
- int *fd;
- int n = 0;
- time_t start, stop, r;
- struct stat sb;
- char **cur;
- char **new;
- char **cln;
- int dirn;
- int N = Config.cacheSwap.n_configured;
- dlink_node *m;
- char **outbuf;
- off_t *outbufoffset;
- storeSwapLogData s;
- size_t ss = sizeof(storeSwapLogData);
- if (store_rebuilding) {
- debug(20, 1) ("Not currently OK to rewrite swap log.\n");
- debug(20, 1) ("storeDirWriteCleanLogs: Operation aborted.\n");
- return 0;
- }
- debug(20, 1) ("storeDirWriteCleanLogs: Starting...\n");
- start = squid_curtime;
- fd = xcalloc(N, sizeof(int));
- cur = xcalloc(N, sizeof(char *));
- new = xcalloc(N, sizeof(char *));
- cln = xcalloc(N, sizeof(char *));
- for (dirn = 0; dirn < N; dirn++) {
- fd[dirn] = -1;
- cur[dirn] = xstrdup(storeDirSwapLogFile(dirn, NULL));
- new[dirn] = xstrdup(storeDirSwapLogFile(dirn, ".clean"));
- cln[dirn] = xstrdup(storeDirSwapLogFile(dirn, ".last-clean"));
- unlink(new[dirn]);
- unlink(cln[dirn]);
- fd[dirn] = file_open(new[dirn],
- O_WRONLY | O_CREAT | O_TRUNC,
- NULL,
- NULL,
- NULL);
- if (fd[dirn] < 0) {
- debug(50, 0) ("storeDirWriteCleanLogs: %s: %s\n", new[dirn], xstrerror());
- continue;
- }
- debug(20, 3) ("storeDirWriteCleanLogs: opened %s, FD %d\n",
- new[dirn], fd[dirn]);
-#if HAVE_FCHMOD
- if (stat(cur[dirn], &sb) == 0)
- fchmod(fd[dirn], sb.st_mode);
-#endif
- }
- outbuf = xcalloc(N, sizeof(char *));
- outbufoffset = xcalloc(N, sizeof(*outbufoffset));
- for (dirn = 0; dirn < N; dirn++) {
- outbuf[dirn] = xcalloc(CLEAN_BUF_SZ, 1);
- outbufoffset[dirn] = 0;
- }
- for (m = store_list.tail; m; m = m->prev) {
- e = m->data;
- if (e->swap_file_number < 0)
- continue;
- if (e->swap_status != SWAPOUT_DONE)
- continue;
- if (e->swap_file_sz <= 0)
- continue;
- if (e->flags.release_request)
- continue;
- if (e->flags.key_private)
- continue;
- if (e->flags.entry_special)
- continue;
- dirn = storeDirNumber(e->swap_file_number);
- assert(dirn < N);
- if (fd[dirn] < 0)
- continue;
- memset(&s, '\0', ss);
- s.op = (char) SWAP_LOG_ADD;
- s.swap_file_number = e->swap_file_number;
- s.timestamp = e->timestamp;
- s.lastref = e->lastref;
- s.expires = e->expires;
- s.lastmod = e->lastmod;
- s.swap_file_sz = e->swap_file_sz;
- s.refcount = e->refcount;
- s.flags = e->flags;
- xmemcpy(&s.key, e->key, MD5_DIGEST_CHARS);
- xmemcpy(outbuf[dirn] + outbufoffset[dirn], &s, ss);
- outbufoffset[dirn] += ss;
- /* buffered write */
- if (outbufoffset[dirn] + ss > CLEAN_BUF_SZ) {
- if (write(fd[dirn], outbuf[dirn], outbufoffset[dirn]) < 0) {
- debug(50, 0) ("storeDirWriteCleanLogs: %s: write: %s\n",
- new[dirn], xstrerror());
- debug(20, 0) ("storeDirWriteCleanLogs: Current swap logfile not replaced.\n");
- file_close(fd[dirn]);
- fd[dirn] = -1;
- unlink(new[dirn]);
- continue;
- }
- outbufoffset[dirn] = 0;
- }
- if ((++n & 0xFFFF) == 0) {
- getCurrentTime();
- debug(20, 1) (" %7d entries written so far.\n", n);
- }
- }
- /* flush */
- for (dirn = 0; dirn < N; dirn++) {
- if (outbufoffset[dirn] == 0)
- continue;
- if (fd[dirn] < 0)
- continue;
- if (write(fd[dirn], outbuf[dirn], outbufoffset[dirn]) < 0) {
- debug(50, 0) ("storeDirWriteCleanLogs: %s: write: %s\n",
- new[dirn], xstrerror());
- debug(20, 0) ("storeDirWriteCleanLogs: Current swap logfile not replaced.\n");
- file_close(fd[dirn]);
- fd[dirn] = -1;
- unlink(new[dirn]);
- continue;
- }
- safe_free(outbuf[dirn]);
- }
- safe_free(outbuf);
- safe_free(outbufoffset);
-#ifdef _SQUID_MSWIN_
- /*
- * You can't rename open files on Microsoft "operating systems"
- * so we close before renaming.
- */
- storeDirCloseSwapLogs();
-#endif
- /* rename */
- for (dirn = 0; dirn < N; dirn++) {
- if (fd[dirn] < 0)
- continue;
- if (rename(new[dirn], cur[dirn]) < 0) {
- debug(50, 0) ("storeDirWriteCleanLogs: rename failed: %s, %s -> %s\n",
- xstrerror(), new[dirn], cur[dirn]);
- }
- }
-#ifndef _SQUID_MSWIN_
- storeDirCloseSwapLogs();
-#endif
- if (reopen)
- storeDirOpenSwapLogs();
- stop = squid_curtime;
- r = stop - start;
- debug(20, 1) (" Finished. Wrote %d entries.\n", n);
- debug(20, 1) (" Took %d seconds (%6.1f entries/sec).\n",
- r > 0 ? (int) r : 0,
- (double) n / (r > 0 ? r : 1));
- /* touch a timestamp file if we're not still validating */
- if (!store_rebuilding) {
- for (dirn = 0; dirn < N; dirn++) {
- if (fd[dirn] < 0)
- continue;
- file_close(file_open(cln[dirn],
- O_WRONLY | O_CREAT | O_TRUNC, NULL, NULL, NULL));
- }
- }
- /* close */
- for (dirn = 0; dirn < N; dirn++) {
- safe_free(cur[dirn]);
- safe_free(new[dirn]);
- safe_free(cln[dirn]);
- if (fd[dirn] < 0)
- continue;
- file_close(fd[dirn]);
- fd[dirn] = -1;
+ size_t result = 0;
+
+ for (int i = 0; i < Config.cacheSwap.n_configured; i++)
+ result += store(i)->minSize();
+
+ return result;
+}
+
+void
+StoreHashIndex::stat(StoreEntry & output) const
+{
+ int i;
+
+ /* Now go through each store, calling its stat routine */
+
+ for (i = 0; i < Config.cacheSwap.n_configured; i++) {
+ storeAppendPrintf(&output, "\n");
+ store(i)->stat(output);
}
- safe_free(cur);
- safe_free(new);
- safe_free(cln);
- safe_free(fd);
- return n;
}
-#undef CLEAN_BUF_SZ
void
-storeDirConfigure(void)
+StoreHashIndex::reference(StoreEntry&)
+{}
+
+void
+StoreHashIndex::dereference(StoreEntry&)
+{}
+
+void
+StoreHashIndex::maintain()
{
- SwapDir *SD;
- int n;
int i;
- fileMap *fm;
- Config.Swap.maxSize = 0;
+ /* walk each fs */
+
for (i = 0; i < Config.cacheSwap.n_configured; i++) {
- SD = &Config.cacheSwap.swapDirs[i];;
- Config.Swap.maxSize += SD->max_size;
- n = 2 * SD->max_size / Config.Store.avgObjectSize;
- if (NULL == SD->map) {
- /* first time */
- SD->map = file_map_create(n);
- } else if (n > SD->map->max_n_files) {
- /* it grew, need to expand */
- fm = file_map_create(n);
- filemapCopy(SD->map, fm);
- filemapFreeMemory(SD->map);
- SD->map = fm;
- }
- /* else it shrunk, and we leave the old one in place */
+ /* XXX FixMe: This should be done "in parallell" on the different
+ * cache_dirs, not one at a time.
+ */
+ /* call the maintain function .. */
+ store(i)->maintain();
}
}
void
-storeDirDiskFull(int fn)
-{
- int dirn = fn >> SWAP_DIR_SHIFT;
- SwapDir *SD = &Config.cacheSwap.swapDirs[dirn];
- assert(0 <= dirn && dirn < Config.cacheSwap.n_configured);
- SD->max_size = SD->cur_size;
- debug(20, 1) ("WARNING: Shrinking cache_dir #%d to %d KB\n",
- dirn, SD->cur_size);
+StoreHashIndex::updateSize(int64_t, int)
+{}
+
+void
+StoreHashIndex::sync()
+{
+ for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
+ store(i)->sync();
+}
+
+StoreSearch *
+StoreHashIndex::search(String const url, HttpRequest *)
+{
+ if (url.size())
+ fatal ("Cannot search by url yet\n");
+
+ return new StoreSearchHashIndex (this);
+}
+
+CBDATA_CLASS_INIT(StoreSearchHashIndex);
+
+StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
+{}
+
+/* do not link
+StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
+*/
+
+StoreSearchHashIndex::~StoreSearchHashIndex()
+{}
+
+void
+StoreSearchHashIndex::next(void (callback)(void *cbdata), void *cbdata)
+{
+ next();
+ callback (cbdata);
+}
+
+bool
+StoreSearchHashIndex::next()
+{
+ if (entries.size())
+ entries.pop_back();
+
+ while (!isDone() && !entries.size())
+ copyBucket();
+
+ return currentItem() != NULL;
+}
+
+bool
+StoreSearchHashIndex::error() const
+{
+ return false;
+}
+
+bool
+StoreSearchHashIndex::isDone() const
+{
+ return bucket >= store_hash_buckets || _done;
+}
+
+StoreEntry *
+StoreSearchHashIndex::currentItem()
+{
+ if (!entries.size())
+ return NULL;
+
+ return entries.back();
+}
+
+void
+StoreSearchHashIndex::copyBucket()
+{
+ /* probably need to lock the store entries...
+ * we copy them all to prevent races on the links. */
+ debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
+ assert (!entries.size());
+ hash_link *link_ptr = NULL;
+ hash_link *link_next = NULL;
+ link_next = hash_get_bucket(store_table, bucket);
+
+ while (NULL != (link_ptr = link_next)) {
+ link_next = link_ptr->next;
+ StoreEntry *e = (StoreEntry *) link_ptr;
+
+ entries.push_back(e);
+ }
+
+ bucket++;
+ debugs(47,3, "got entries: " << entries.size());
}