#define MAX_BACKWARDS_LOOKUPS 10
+#define DBOX_FORCE_PURGE_MIN_BYTES (1024*1024*10)
+#define DBOX_FORCE_PURGE_MIN_RATIO 0.5
+
struct dbox_map_transaction_context {
struct dbox_map *map;
struct mail_index_transaction *trans;
pool_unref(&pool);
}
+bool dbox_map_want_purge(struct dbox_map *map)
+{
+ const struct mail_index_header *hdr;
+ const struct dbox_mail_index_map_record *rec;
+ const uint16_t *ref16_p;
+ const void *data;
+ uoff_t ref0_size, total_size;
+ bool expunged;
+ uint32_t seq;
+
+ if (map->storage->set->dbox_purge_min_percentage >= 100) {
+ /* we never purge anything */
+ return FALSE;
+ }
+
+ ref0_size = total_size = 0;
+ hdr = mail_index_get_header(map->view);
+ for (seq = 1; seq <= hdr->messages_count; seq++) {
+ mail_index_lookup_ext(map->view, seq, map->map_ext_id,
+ &data, &expunged);
+ if (data == NULL || expunged)
+ continue;
+ rec = data;
+
+ mail_index_lookup_ext(map->view, seq, map->ref_ext_id,
+ &data, &expunged);
+ if (data == NULL || expunged)
+ continue;
+ ref16_p = data;
+
+ if (*ref16_p == 0)
+ ref0_size += rec->size;
+ total_size += rec->size;
+ }
+
+ if (ref0_size < DBOX_FORCE_PURGE_MIN_BYTES)
+ return FALSE;
+ if ((float)ref0_size / (float)total_size < DBOX_FORCE_PURGE_MIN_RATIO)
+ return FALSE;
+ return TRUE;
+}
+
const ARRAY_TYPE(seq_range) *dbox_map_get_zero_ref_files(struct dbox_map *map)
{
const struct mail_index_header *hdr;
const ARRAY_TYPE(uint32_t) *map_uids, int diff);
int dbox_map_remove_file_id(struct dbox_map *map, uint32_t file_id);
+/* Returns TRUE if there's enough pressure to purge immediately. */
+bool dbox_map_want_purge(struct dbox_map *map);
/* Return all files containing messages with zero refcount. */
const ARRAY_TYPE(seq_range) *dbox_map_get_zero_ref_files(struct dbox_map *map);
}
seq_range_array_add(&entry->expunge_seqs, 0, seq);
array_append(&entry->expunge_map_uids, &map_uid, 1);
+ if (entry->file_id != 0)
+ ctx->have_storage_expunges = TRUE;
} else {
if ((sync_rec->add_flags & DBOX_INDEX_FLAG_ALT) != 0)
entry->move_to_alt = TRUE;
}
if (ret > 0) {
+ if (ctx->have_storage_expunges) {
+ /* prevent a user from saving + expunging messages
+ all the time and using lots of disk space.
+ but avoid doing this in situations where a user
+ simply expunges a lot of mail for the first time.
+ that's why we do this calculation before current
+ sync: the purging is triggered only after the
+ second expunge. */
+ if (dbox_map_want_purge(ctx->mbox->storage->map))
+ ctx->purge = TRUE;
+ }
+
/* now sync each file separately */
iter = hash_table_iterate_init(ctx->syncs);
while (hash_table_iterate(iter, &key, &value)) {
}
if (ctx->path != NULL)
str_free(&ctx->path);
+
+ if (ctx->purge)
+ (void)dbox_sync_purge(&ctx->mbox->storage->storage);
i_free(ctx);
return ret;
}
pool_t pool;
struct hash_table *syncs; /* struct dbox_sync_file_entry */
+
+ unsigned int have_storage_expunges:1;
+ unsigned int purge:1;
};
int dbox_sync_begin(struct dbox_mailbox *mbox, enum dbox_sync_flags flags,