static void
dsync_cache_fields_update(const struct dsync_mailbox *local_box,
const struct dsync_mailbox *remote_box,
+ struct mailbox *box,
struct mailbox_update *update)
{
ARRAY_TYPE(mailbox_cache_field) local_sorted, remote_sorted, changes;
local_fields = array_get(&local_sorted, &local_count);
remote_fields = array_get(&remote_sorted, &remote_count);
t_array_init(&changes, local_count + remote_count);
- drop_older_timestamp = ioloop_time - MAIL_CACHE_FIELD_DROP_SECS;
+ drop_older_timestamp = ioloop_time -
+ box->index->optimization_set.cache.unaccessed_field_drop_secs;
for (li = ri = 0; li < local_count || ri < remote_count; ) {
ret = li == local_count ? 1 :
}
}
- dsync_cache_fields_update(local_box, remote_box, &update);
+ dsync_cache_fields_update(local_box, remote_box, box, &update);
if (update.uid_validity == 0 &&
update.cache_updates == NULL) {
used fields */
idx_hdr = mail_index_get_header(view);
max_drop_time = idx_hdr->day_stamp == 0 ? 0 :
- idx_hdr->day_stamp - MAIL_CACHE_FIELD_DROP_SECS;
+ idx_hdr->day_stamp -
+ cache->index->optimization_set.cache.unaccessed_field_drop_secs;
orig_fields_count = cache->fields_count;
if (cache->file_fields_count == 0) {
mail_cache_compress_field(&ctx, &field);
if (ctx.buffer->used == sizeof(cache_rec) ||
- ctx.buffer->used > MAIL_CACHE_RECORD_MAX_SIZE) {
+ ctx.buffer->used > cache->index->optimization_set.cache.record_max_size) {
/* nothing cached */
ext_offset = 0;
} else {
}
cache->last_field_header_offset = offset;
- if (next_count > MAIL_CACHE_HEADER_FIELD_CONTINUE_COUNT)
+ if (next_count > cache->index->optimization_set.cache.compress_header_continue_count)
cache->need_compress_file_seq = cache->hdr->file_seq;
if (field_hdr_r != NULL) {
cache->field_file_map[i] = (uint32_t)-1;
max_drop_time = cache->index->map->hdr.day_stamp == 0 ? 0 :
- cache->index->map->hdr.day_stamp - MAIL_CACHE_FIELD_DROP_SECS;
+ cache->index->map->hdr.day_stamp -
+ cache->index->optimization_set.cache.unaccessed_field_drop_secs;
i_zero(&field);
for (i = 0; i < field_hdr->fields_count; i++) {
#define MAIL_CACHE_MAJOR_VERSION 1
#define MAIL_CACHE_MINOR_VERSION 1
-/* Drop fields that haven't been accessed for n seconds */
-#define MAIL_CACHE_FIELD_DROP_SECS (3600*24*30)
-
-/* Never compress the file if it's smaller than this */
-#define MAIL_CACHE_COMPRESS_MIN_SIZE (1024*32)
-
-/* Compress the file when n% of records are deleted */
-#define MAIL_CACHE_COMPRESS_DELETE_PERCENTAGE 20
-
-/* Compress the file when n% of rows contain continued rows.
- 200% means that there's 2 continued rows per record. */
-#define MAIL_CACHE_COMPRESS_CONTINUED_PERCENTAGE 200
-
-/* Compress the file when we need to follow more than n next_offsets to find
- the latest cache header. */
-#define MAIL_CACHE_HEADER_FIELD_CONTINUE_COUNT 4
-
-/* If cache record becomes larger than this, don't add it. */
-#define MAIL_CACHE_RECORD_MAX_SIZE (64*1024)
-
#define MAIL_CACHE_LOCK_TIMEOUT 10
#define MAIL_CACHE_LOCK_CHANGE_TIMEOUT 300
size_t size;
size = mail_cache_transaction_update_last_rec_size(ctx);
- if (size > MAIL_CACHE_RECORD_MAX_SIZE) {
+ if (size > ctx->cache->index->optimization_set.cache.record_max_size) {
buffer_set_used_size(ctx->cache_data, ctx->last_rec_pos);
return;
}
static void mail_cache_update_need_compress(struct mail_cache *cache)
{
+ const struct mail_index_cache_optimization_settings *set =
+ &cache->index->optimization_set.cache;
const struct mail_cache_header *hdr = cache->hdr;
struct stat st;
unsigned int msg_count;
}
cont_percentage = hdr->continued_record_count * 100 / records_count;
- if (cont_percentage >= MAIL_CACHE_COMPRESS_CONTINUED_PERCENTAGE) {
+ if (cont_percentage >= set->compress_continued_percentage) {
/* too many continued rows, compress */
want_compress = TRUE;
}
delete_percentage = hdr->deleted_record_count * 100 /
(records_count + hdr->deleted_record_count);
- if (delete_percentage >= MAIL_CACHE_COMPRESS_DELETE_PERCENTAGE) {
+ if (delete_percentage >= set->compress_delete_percentage) {
/* too many deleted records, compress */
want_compress = TRUE;
}
mail_cache_set_syscall_error(cache, "fstat()");
return;
}
- if (st.st_size >= MAIL_CACHE_COMPRESS_MIN_SIZE)
+ if ((uoff_t)st.st_size >= set->compress_min_size)
cache->need_compress_file_seq = hdr->file_seq;
}
.min_age_secs = 5 * 60,
.log2_max_age_secs = 3600 * 24 * 2,
},
+ .cache = {
+ .unaccessed_field_drop_secs = 3600 * 24 * 30,
+ .record_max_size = 64 * 1024,
+ .compress_min_size = 32 * 1024,
+ .compress_delete_percentage = 20,
+ .compress_continued_percentage = 200,
+ .compress_header_continue_count = 4,
+ },
};
struct mail_index *mail_index_alloc(const char *dir, const char *prefix)
dest->log.min_age_secs = set->log.min_age_secs;
if (set->log.log2_max_age_secs != 0)
dest->log.log2_max_age_secs = set->log.log2_max_age_secs;
+
+ /* cache */
+ if (set->cache.unaccessed_field_drop_secs != 0)
+ dest->cache.unaccessed_field_drop_secs =
+ set->cache.unaccessed_field_drop_secs;
+ if (set->cache.compress_min_size != 0)
+ dest->cache.compress_min_size = set->cache.compress_min_size;
+ if (set->cache.compress_delete_percentage != 0)
+ dest->cache.compress_delete_percentage =
+ set->cache.compress_delete_percentage;
+ if (set->cache.compress_continued_percentage != 0)
+ dest->cache.compress_continued_percentage =
+ set->cache.compress_continued_percentage;
+ if (set->cache.compress_header_continue_count != 0)
+ dest->cache.compress_header_continue_count =
+ set->cache.compress_header_continue_count;
+ if (set->cache.record_max_size != 0)
+ dest->cache.record_max_size = set->cache.record_max_size;
}
void mail_index_set_ext_init_data(struct mail_index *index, uint32_t ext_id,
unsigned int log2_max_age_secs;
};
+struct mail_index_cache_optimization_settings {
+ /* Drop fields that haven't been accessed for n seconds */
+ unsigned int unaccessed_field_drop_secs;
+ /* If cache record becomes larger than this, don't add it. */
+ unsigned int record_max_size;
+
+ /* Never compress the file if it's smaller than this */
+ uoff_t compress_min_size;
+ /* Compress the file when n% of records are deleted */
+ unsigned int compress_delete_percentage;
+ /* Compress the file when n% of rows contain continued rows.
+ For example 200% means that the record has 2 continued rows, i.e.
+ it exists in 3 separate segments in the cache file. */
+ unsigned int compress_continued_percentage;
+ /* Compress the file when we need to follow more than n next_offsets to
+ find the latest cache header. */
+ unsigned int compress_header_continue_count;
+};
+
struct mail_index_optimization_settings {
struct mail_index_base_optimization_settings index;
struct mail_index_log_optimization_settings log;
+ struct mail_index_cache_optimization_settings cache;
};
struct mail_index;