printf("major version ........ = %u\n", hdr->major_version);
printf("minor version ........ = %u\n", hdr->minor_version);
printf("indexid .............. = %u (%s)\n", hdr->indexid, unixdate2str(hdr->indexid));
- printf("file_seq ............. = %u (%s) (%d compressions)\n",
+ printf("file_seq ............. = %u (%s) (%d purges)\n",
hdr->file_seq, unixdate2str(hdr->file_seq),
hdr->file_seq - hdr->indexid);
printf("continued_record_count = %u\n", hdr->continued_record_count);
static int cmd_mailbox_cache_purge_run_box(struct mailbox_cache_cmd_context *ctx,
struct mailbox *box)
{
- if (mail_cache_compress(box->cache, (uint32_t)-1) < 0) {
+ if (mail_cache_purge(box->cache, (uint32_t)-1) < 0) {
mailbox_set_index_error(box);
doveadm_mail_failed_mailbox(&ctx->ctx, box);
return -1;
So, group 1. and 2. could be optimally implemented by keeping things
cached only for a while. I thought a week would be good. When cache file
- is compressed, everything older than week will be dropped.
+ is purged, everything older than week will be dropped.
But how to figure out if user is in group 3? One quite easy rule would
be to see if client is accessing messages older than a week. But with
if (count > 0)
mail_cache_register_fields(dst, fields, count);
- /* Destination cache isn't expected to exist yet, so use compression
+ /* Destination cache isn't expected to exist yet, so use purging
to create it. Setting field_header_write_pending also guarantees
that the fields are updated even if the cache was already created
- and no compression was done. */
+ and no purging was done. */
dst->field_header_write_pending = TRUE;
- return mail_cache_compress(dst, 0);
+ return mail_cache_purge(dst, 0);
}
}
cache->last_field_header_offset = offset;
- if (next_count > cache->index->optimization_set.cache.compress_header_continue_count)
- cache->need_compress_file_seq = cache->hdr->file_seq;
+ if (next_count > cache->index->optimization_set.cache.purge_header_continue_count)
+ cache->need_purge_file_seq = cache->hdr->file_seq;
if (field_hdr_r != NULL) {
/* detect corrupted size later */
dec != MAIL_CACHE_DECISION_NO) {
/* time to drop this field. don't bother dropping
fields that have never been used. */
- cache->need_compress_file_seq = cache->hdr->file_seq;
+ cache->need_purge_file_seq = cache->hdr->file_seq;
}
names = p + 1;
if (file_field >= cache->file_fields_count) {
/* new field, have to re-read fields header to figure
- out its size. don't do this if we're compressing. */
+ out its size. don't do this if we're purging. */
if (!cache->locked) {
if (mail_cache_header_fields_read(cache) < 0)
return -1;
HASH_TABLE(char *, void *) field_name_hash; /* name -> idx */
uint32_t last_field_header_offset;
- /* 0 is no need for compression, otherwise the file sequence number
- which we want compressed. */
- uint32_t need_compress_file_seq;
+ /* 0 is no need for purging, otherwise the file sequence number
+ which we want purged. */
+ uint32_t need_purge_file_seq;
unsigned int *file_field_map;
unsigned int file_fields_count;
bool last_lock_failed:1;
bool hdr_modified:1;
bool field_header_write_pending:1;
- bool compressing:1;
+ bool purging:1;
bool map_with_read:1;
};
}
static void
-mail_cache_compress_field(struct mail_cache_copy_context *ctx,
- const struct mail_cache_iterate_field *field)
+mail_cache_purge_field(struct mail_cache_copy_context *ctx,
+ const struct mail_cache_iterate_field *field)
{
struct mail_cache_field *cache_field;
enum mail_cache_decision_type dec;
}
static void
-mail_cache_compress_get_fields(struct mail_cache_copy_context *ctx,
- unsigned int used_fields_count)
+mail_cache_purge_get_fields(struct mail_cache_copy_context *ctx,
+ unsigned int used_fields_count)
{
struct mail_cache *cache = ctx->cache;
struct mail_cache_field *field;
mail_cache_lookup_iter_init(cache_view, seq, &iter);
while (mail_cache_lookup_iter_next(&iter, &field) > 0)
- mail_cache_compress_field(&ctx, &field);
+ mail_cache_purge_field(&ctx, &field);
if (ctx.buffer->used == sizeof(cache_rec) ||
ctx.buffer->used > cache->index->optimization_set.cache.record_max_size) {
hdr.record_count = record_count;
hdr.field_header_offset = mail_index_uint32_to_offset(output->offset);
- mail_cache_compress_get_fields(&ctx, used_fields_count);
+ mail_cache_purge_get_fields(&ctx, used_fields_count);
o_stream_nsend(output, ctx.buffer->data, ctx.buffer->used);
hdr.backwards_compat_used_file_size = output->offset;
}
static int
-mail_cache_compress_write(struct mail_cache *cache,
- struct mail_index_transaction *trans,
- int fd, const char *temp_path, bool *unlock)
+mail_cache_purge_write(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ int fd, const char *temp_path, bool *unlock)
{
struct stat st;
uint32_t prev_file_seq, file_seq, old_offset, max_uid;
return -1;
}
- e_debug(cache->index->event, "%s: Compressed, file_seq changed %u -> %u, "
+ e_debug(cache->index->event, "%s: Purged, file_seq changed %u -> %u, "
"size=%"PRIuUOFF_T", max_uid=%u", cache->filepath,
prev_file_seq, file_seq, file_size, max_uid);
- /* once we're sure that the compression was successful,
+ /* once we're sure that the purging was successful,
update the offsets */
mail_index_ext_reset(trans, cache->ext_id, file_seq, TRUE);
offsets = array_get(&ext_offsets, &count);
}
static int
-mail_cache_compress_has_file_changed(struct mail_cache *cache,
- uint32_t compress_file_seq)
+mail_cache_purge_has_file_changed(struct mail_cache *cache,
+ uint32_t purge_file_seq)
{
struct mail_cache_header hdr;
unsigned int i;
if (ret >= 0) {
if (ret == 0)
return 0;
- if (compress_file_seq == 0) {
+ if (purge_file_seq == 0) {
/* previously it didn't exist or it
was unusable and was just unlinked */
return 1;
}
- return hdr.file_seq != compress_file_seq ? 1 : 0;
+ return hdr.file_seq != purge_file_seq ? 1 : 0;
} else if (errno != ESTALE || i >= NFS_ESTALE_RETRY_COUNT) {
mail_cache_set_syscall_error(cache, "read()");
return -1;
}
}
-static int mail_cache_compress_locked(struct mail_cache *cache,
- uint32_t compress_file_seq,
- struct mail_index_transaction *trans,
- bool *unlock)
+static int mail_cache_purge_locked(struct mail_cache *cache,
+ uint32_t purge_file_seq,
+ struct mail_index_transaction *trans,
+ bool *unlock)
{
const char *temp_path;
int fd, ret;
- /* we've locked the cache compression now. if somebody else had just
+ /* we've locked the cache purging now. if somebody else had just
recreated the cache, reopen the cache and return success. */
- if (compress_file_seq != (uint32_t)-1 &&
- (ret = mail_cache_compress_has_file_changed(cache, compress_file_seq)) != 0) {
+ if (purge_file_seq != (uint32_t)-1 &&
+ (ret = mail_cache_purge_has_file_changed(cache, purge_file_seq)) != 0) {
if (ret < 0)
return -1;
- /* was just compressed, forget this */
- cache->need_compress_file_seq = 0;
+ /* was just purged, forget this */
+ cache->need_purge_file_seq = 0;
if (*unlock) {
(void)mail_cache_unlock(cache);
fd = mail_index_create_tmp_file(cache->index, cache->filepath, &temp_path);
if (fd == -1)
return -1;
- if (mail_cache_compress_write(cache, trans, fd, temp_path, unlock) < 0) {
+ if (mail_cache_purge_write(cache, trans, fd, temp_path, unlock) < 0) {
i_close_fd(&fd);
i_unlink(temp_path);
return -1;
if (mail_cache_header_fields_read(cache) < 0)
return -1;
- cache->need_compress_file_seq = 0;
+ cache->need_purge_file_seq = 0;
return 0;
}
static int
-mail_cache_compress_full(struct mail_cache *cache,
- struct mail_index_transaction *trans,
- uint32_t compress_file_seq)
+mail_cache_purge_full(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq)
{
bool unlock = FALSE;
int ret;
- i_assert(!cache->compressing);
+ i_assert(!cache->purging);
i_assert(cache->index->log_sync_locked);
if (MAIL_INDEX_IS_IN_MEMORY(cache->index) || cache->index->readonly)
return 0;
- /* compression isn't very efficient with small read()s */
+ /* purging isn't very efficient with small read()s */
if (cache->map_with_read) {
cache->map_with_read = FALSE;
if (cache->read_buf != NULL)
cache->mmap_length = 0;
}
- /* .log lock already prevents other processes from compressing cache at
+ /* .log lock already prevents other processes from purging cache at
the same time, but locking the cache file itself prevents other
processes from doing other changes to it (header changes, adding
more cached data). */
/* locking succeeded. */
unlock = TRUE;
}
- cache->compressing = TRUE;
- ret = mail_cache_compress_locked(cache, compress_file_seq, trans, &unlock);
- cache->compressing = FALSE;
+ cache->purging = TRUE;
+ ret = mail_cache_purge_locked(cache, purge_file_seq, trans, &unlock);
+ cache->purging = FALSE;
if (unlock)
mail_cache_unlock(cache);
i_assert(!cache->hdr_modified);
return ret;
}
-int mail_cache_compress_with_trans(struct mail_cache *cache,
- struct mail_index_transaction *trans,
- uint32_t compress_file_seq)
+int mail_cache_purge_with_trans(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq)
{
- return mail_cache_compress_full(cache, trans, compress_file_seq);
+ return mail_cache_purge_full(cache, trans, purge_file_seq);
}
-int mail_cache_compress(struct mail_cache *cache, uint32_t compress_file_seq)
+int mail_cache_purge(struct mail_cache *cache, uint32_t purge_file_seq)
{
struct mail_index_view *view;
struct mail_index_transaction *trans;
uoff_t file_offset;
if (mail_transaction_log_sync_lock(cache->index->log,
- "mail cache compress",
+ "mail cache purge",
&file_seq, &file_offset) < 0)
return -1;
}
MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
if (ret < 0)
;
- else if ((ret = mail_cache_compress_full(cache, trans, compress_file_seq)) < 0)
+ else if ((ret = mail_cache_purge_full(cache, trans, purge_file_seq)) < 0)
mail_index_transaction_rollback(&trans);
else {
if (mail_index_transaction_commit(&trans) < 0)
mail_index_view_close(&view);
if (lock_log) {
mail_transaction_log_sync_unlock(cache->index->log,
- "mail cache compress");
+ "mail cache purge");
}
return ret;
}
-bool mail_cache_need_compress(struct mail_cache *cache)
+bool mail_cache_need_purge(struct mail_cache *cache)
{
- return cache->need_compress_file_seq != 0 &&
+ return cache->need_purge_file_seq != 0 &&
(cache->index->flags & MAIL_INDEX_OPEN_FLAG_SAVEONLY) == 0 &&
!cache->index->readonly;
}
unsigned int records_written;
- bool tried_compression:1;
+ bool tried_purging:1;
bool decisions_refreshed:1;
bool changes:1;
};
}
static int
-mail_cache_transaction_compress(struct mail_cache_transaction_ctx *ctx)
+mail_cache_transaction_purge(struct mail_cache_transaction_ctx *ctx)
{
struct mail_cache *cache = ctx->cache;
- ctx->tried_compression = TRUE;
+ ctx->tried_purging = TRUE;
- uint32_t compress_file_seq =
+ uint32_t purge_file_seq =
MAIL_CACHE_IS_UNUSABLE(cache) ? 0 : cache->hdr->file_seq;
- int ret = mail_cache_compress(cache, compress_file_seq);
+ int ret = mail_cache_purge(cache, purge_file_seq);
/* already written cache records must be forgotten, but records in
memory can still be written to the new cache file */
mail_cache_transaction_forget_flushed(ctx);
if (ret < 0)
return -1;
- if (!ctx->tried_compression) {
- if (mail_cache_transaction_compress(ctx) < 0)
+ if (!ctx->tried_purging) {
+ if (mail_cache_transaction_purge(ctx) < 0)
return -1;
return mail_cache_transaction_lock(ctx);
} else {
}
i_assert(!MAIL_CACHE_IS_UNUSABLE(cache));
- if (!ctx->tried_compression && ctx->cache_data != NULL &&
+ if (!ctx->tried_purging && ctx->cache_data != NULL &&
cache->last_stat_size + ctx->cache_data->used > cache_max_size) {
- /* Looks like cache file is becoming too large. Try to compress
+ /* Looks like cache file is becoming too large. Try to purge
it to free up some space. */
if (cache->hdr->continued_record_count > 0 ||
cache->hdr->deleted_record_count > 0) {
mail_cache_unlock(cache);
- (void)mail_cache_transaction_compress(ctx);
+ (void)mail_cache_transaction_purge(ctx);
return mail_cache_transaction_lock(ctx);
}
}
/* Remember that this field has been used within the transaction. Later
on we fill mail_cache_field_private.used with it. We can't rely on
- setting it here, because cache compression may run and clear it. */
+ setting it here, because cache purging may run and clear it. */
uint8_t field_idx_set = 1;
array_idx_set(&ctx->cache_field_idx_used, field_idx, &field_idx_set);
return FALSE;
case MAIL_CACHE_DECISION_TEMP:
/* add it only if it's newer than what we would drop when
- compressing */
+ purging */
if (ctx->first_new_seq == 0) {
ctx->first_new_seq =
mail_cache_get_first_new_seq(ctx->view->view);
if (cache->fd == -1) {
mail_cache_file_close(cache);
if (errno == ENOENT) {
- cache->need_compress_file_seq = 0;
+ cache->need_purge_file_seq = 0;
return 0;
}
return mail_cache_open_and_verify(cache);
}
-static void mail_cache_update_need_compress(struct mail_cache *cache)
+static void mail_cache_update_need_purge(struct mail_cache *cache)
{
const struct mail_index_cache_optimization_settings *set =
&cache->index->optimization_set.cache;
struct stat st;
unsigned int msg_count;
unsigned int records_count, cont_percentage, delete_percentage;
- bool want_compress = FALSE;
+ bool want_purge = FALSE;
if (hdr->minor_version == 0) {
- /* compress to get ourself into the new header version */
- cache->need_compress_file_seq = hdr->file_seq;
+ /* purge to get ourself into the new header version */
+ cache->need_purge_file_seq = hdr->file_seq;
return;
}
}
cont_percentage = hdr->continued_record_count * 100 / records_count;
- if (cont_percentage >= set->compress_continued_percentage) {
- /* too many continued rows, compress */
- want_compress = TRUE;
+ if (cont_percentage >= set->purge_continued_percentage) {
+ /* too many continued rows, purge */
+ want_purge = TRUE;
}
delete_percentage = hdr->deleted_record_count * 100 /
(records_count + hdr->deleted_record_count);
- if (delete_percentage >= set->compress_delete_percentage) {
- /* too many deleted records, compress */
- want_compress = TRUE;
+ if (delete_percentage >= set->purge_delete_percentage) {
+ /* too many deleted records, purge */
+ want_purge = TRUE;
}
- if (want_compress) {
+ if (want_purge) {
if (fstat(cache->fd, &st) < 0) {
if (!ESTALE_FSTAT(errno))
mail_cache_set_syscall_error(cache, "fstat()");
return;
}
- if ((uoff_t)st.st_size >= set->compress_min_size)
- cache->need_compress_file_seq = hdr->file_seq;
+ if ((uoff_t)st.st_size >= set->purge_min_size)
+ cache->need_purge_file_seq = hdr->file_seq;
}
}
/* verify the header validity only with offset=0. this way
we won't waste time re-verifying it all the time */
if (!mail_cache_verify_header(cache, hdr)) {
- cache->need_compress_file_seq =
+ cache->need_purge_file_seq =
!MAIL_CACHE_IS_UNUSABLE(cache) &&
cache->hdr->file_seq != 0 ?
cache->hdr->file_seq : 0;
sizeof(cache->hdr_ro_copy));
cache->hdr = &cache->hdr_ro_copy;
}
- mail_cache_update_need_compress(cache);
+ mail_cache_update_need_purge(cache);
} else {
i_assert(cache->hdr != NULL);
}
mail_cache_set_syscall_error(cache, "munmap()");
} else {
if (cache->fd == -1) {
- /* unusable, waiting for compression or
+ /* unusable, waiting for purging or
index is in memory */
- i_assert(cache->need_compress_file_seq != 0 ||
+ i_assert(cache->need_purge_file_seq != 0 ||
MAIL_INDEX_IS_IN_MEMORY(cache->index));
return -1;
}
if (cache->index->log_sync_locked)
return 0;
- /* Wait for .log file lock, so we can be sure that there are no cache
- compressions going on. (Because it first recreates the cache file,
+ /* Wait for .log file lock, so we can be sure that there is no cache
+ purging going on. (Because it first recreates the cache file,
unlocks it and only then writes the changes to the index and
releases the .log lock.) To prevent deadlocks, cache file must be
locked after the .log, not before. */
if (mail_cache_verify_reset_id(cache, &reset_id))
return 1;
- /* Use locking to wait for a potential cache compressing to finish.
+ /* Use locking to wait for a potential cache purging to finish.
If that didn't work either, the cache is corrupted or lost. */
ret = mail_cache_sync_wait_index(cache, &reset_id);
if (ret == 0 && cache->fd != -1 && reset_id != 0) {
return -1;
if (ret == 0) {
/* Cache doesn't exist or it was just found to be corrupted and
- was unlinked. Cache compression will create it back. */
+ was unlinked. Cache purging will create it back. */
return 0;
}
return ret;
}
i_assert(cache->file_lock == NULL);
- /* okay, so it was just compressed. try again. */
+ /* okay, so it was just purged. try again. */
}
if ((ret = mail_cache_sync_reset_id(cache)) <= 0) {
sizeof(cache->hdr_copy), 0) < 0)
ret = -1;
cache->hdr_ro_copy = cache->hdr_copy;
- mail_cache_update_need_compress(cache);
+ mail_cache_update_need_purge(cache);
}
mail_cache_unlock(cache);
*_view = NULL;
if (view->cache->field_header_write_pending &&
- !view->cache->compressing)
+ !view->cache->purging)
(void)mail_cache_header_fields_update(view->cache);
DLLIST_REMOVE(&view->cache->views, view);
enum mail_cache_decision_type {
/* Not needed currently */
MAIL_CACHE_DECISION_NO = 0x00,
- /* Needed only for new mails. Drop when compressing. */
+ /* Needed only for new mails. Drop when purging. */
MAIL_CACHE_DECISION_TEMP = 0x01,
/* Needed. */
MAIL_CACHE_DECISION_YES = 0x02,
mail_cache_register_get_list(struct mail_cache *cache, pool_t pool,
unsigned int *count_r);
-/* Returns TRUE if cache should be compressed. */
-bool mail_cache_need_compress(struct mail_cache *cache);
-/* Compress cache file. Offsets are updated to given transaction.
+/* Returns TRUE if cache should be purged. */
+bool mail_cache_need_purge(struct mail_cache *cache);
+/* Purge cache file. Offsets are updated to given transaction.
The transaction log must already be exclusively locked.
- The cache compression is done only if the current cache file's file_seq
- matches compress_file_seq. The idea is that compression isn't done if
- another process had just compressed it. 0 means the cache file is created
- only if it didn't already exist. (uint32_t)-1 means that compression is
+ The cache purging is done only if the current cache file's file_seq
+ matches purge_file_seq. The idea is that purging isn't done if
+ another process had just purged it. 0 means the cache file is created
+ only if it didn't already exist. (uint32_t)-1 means that purging is
done always regardless of file_seq. */
-int mail_cache_compress_with_trans(struct mail_cache *cache,
- struct mail_index_transaction *trans,
- uint32_t compress_file_seq);
-int mail_cache_compress(struct mail_cache *cache, uint32_t compress_file_seq);
+int mail_cache_purge_with_trans(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq);
+int mail_cache_purge(struct mail_cache *cache, uint32_t purge_file_seq);
/* Returns TRUE if there is at least something in the cache. */
bool mail_cache_exists(struct mail_cache *cache);
/* Open and read cache header. Returns 1 if ok, 0 if cache doesn't exist or it
return TRUE;
/* already synced */
- return mail_cache_need_compress(index->cache);
+ return mail_cache_need_purge(index->cache);
}
static int
/* The previously called expunged handlers will update cache's
record_count and deleted_record_count. That also has a side effect
- of updating whether cache needs to be compressed. */
- if (ret == 0 && mail_cache_need_compress(index->cache) &&
+ of updating whether cache needs to be purged. */
+ if (ret == 0 && mail_cache_need_purge(index->cache) &&
!mail_cache_transactions_have_changes(index->cache)) {
- if (mail_cache_compress(index->cache,
- index->cache->need_compress_file_seq) < 0) {
+ if (mail_cache_purge(index->cache,
+ index->cache->need_purge_file_seq) < 0) {
/* can't really do anything if it fails */
}
/* Make sure the newly committed cache record offsets are
.unaccessed_field_drop_secs = 3600 * 24 * 30,
.record_max_size = 64 * 1024,
.max_size = 1024 * 1024 * 1024,
- .compress_min_size = 32 * 1024,
- .compress_delete_percentage = 20,
- .compress_continued_percentage = 200,
- .compress_header_continue_count = 4,
+ .purge_min_size = 32 * 1024,
+ .purge_delete_percentage = 20,
+ .purge_continued_percentage = 200,
+ .purge_header_continue_count = 4,
},
};
set->cache.unaccessed_field_drop_secs;
if (set->cache.max_size != 0)
dest->cache.max_size = set->cache.max_size;
- if (set->cache.compress_min_size != 0)
- dest->cache.compress_min_size = set->cache.compress_min_size;
- if (set->cache.compress_delete_percentage != 0)
- dest->cache.compress_delete_percentage =
- set->cache.compress_delete_percentage;
- if (set->cache.compress_continued_percentage != 0)
- dest->cache.compress_continued_percentage =
- set->cache.compress_continued_percentage;
- if (set->cache.compress_header_continue_count != 0)
- dest->cache.compress_header_continue_count =
- set->cache.compress_header_continue_count;
+ if (set->cache.purge_min_size != 0)
+ dest->cache.purge_min_size = set->cache.purge_min_size;
+ if (set->cache.purge_delete_percentage != 0)
+ dest->cache.purge_delete_percentage =
+ set->cache.purge_delete_percentage;
+ if (set->cache.purge_continued_percentage != 0)
+ dest->cache.purge_continued_percentage =
+ set->cache.purge_continued_percentage;
+ if (set->cache.purge_header_continue_count != 0)
+ dest->cache.purge_header_continue_count =
+ set->cache.purge_header_continue_count;
if (set->cache.record_max_size != 0)
dest->cache.record_max_size = set->cache.record_max_size;
}
/* Maximum size for the cache file. Internally the limit is 1 GB. */
uoff_t max_size;
- /* Never compress the file if it's smaller than this */
- uoff_t compress_min_size;
- /* Compress the file when n% of records are deleted */
- unsigned int compress_delete_percentage;
- /* Compress the file when n% of rows contain continued rows.
+ /* Never purge the file if it's smaller than this */
+ uoff_t purge_min_size;
+ /* Purge the file when n% of records are deleted */
+ unsigned int purge_delete_percentage;
+ /* Purge the file when n% of rows contain continued rows.
For example 200% means that the record has 2 continued rows, i.e.
it exists in 3 separate segments in the cache file. */
- unsigned int compress_continued_percentage;
- /* Compress the file when we need to follow more than n next_offsets to
+ unsigned int purge_continued_percentage;
+ /* Purge the file when we need to follow more than n next_offsets to
find the latest cache header. */
- unsigned int compress_header_continue_count;
+ unsigned int purge_header_continue_count;
};
struct mail_index_optimization_settings {
*_ctx = NULL;
/* initialize cache file with the old field decisions */
- (void)mail_cache_compress_with_trans(ctx->box->cache, ctx->trans,
- (uint32_t)-1);
+ (void)mail_cache_purge_with_trans(ctx->box->cache, ctx->trans,
+ (uint32_t)-1);
index_rebuild_header(ctx, cb);
index_rebuild_box_name_header(ctx);
if (ctx->backup_index != NULL) {
.unaccessed_field_drop_secs = set->mail_cache_unaccessed_field_drop,
.record_max_size = set->mail_cache_record_max_size,
.max_size = set->mail_cache_max_size,
- .compress_min_size = set->mail_cache_compress_min_size,
- .compress_delete_percentage = set->mail_cache_compress_delete_percentage,
- .compress_continued_percentage = set->mail_cache_compress_continued_percentage,
- .compress_header_continue_count = set->mail_cache_compress_header_continue_count,
+ .purge_min_size = set->mail_cache_compress_min_size,
+ .purge_delete_percentage = set->mail_cache_compress_delete_percentage,
+ .purge_continued_percentage = set->mail_cache_compress_continued_percentage,
+ .purge_header_continue_count = set->mail_cache_compress_header_continue_count,
},
};
mail_index_set_optimization_settings(box->index, &optimization_set);