}
}
-int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic)
+int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic,
+ const char *reason)
{
int ret;
mail_index_reset_error(atomic->map->index);
return -1;
}
+ mail_index_sync_set_reason(atomic->sync_ctx, reason);
atomic->locked = TRUE;
/* reset refresh state so that if it's wanted to be done locked,
it gets the latest changes */
return ctx;
}
-int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx)
+int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx,
+ const char *reason)
{
i_assert(!ctx->committed);
if (!ctx->changed)
return 0;
- if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ if (mdbox_map_atomic_lock(ctx->atomic, reason) < 0)
return -1;
if (mail_index_transaction_commit(&ctx->trans) < 0) {
}
}
if (ret == 0)
- ret = mdbox_map_transaction_commit(map_trans);
+ ret = mdbox_map_transaction_commit(map_trans, "removing file");
mdbox_map_transaction_free(&map_trans);
if (mdbox_map_atomic_finish(&atomic) < 0)
ret = -1;
return 0;
}
-static int mdbox_map_assign_file_ids(struct mdbox_map_append_context *ctx,
- bool separate_transaction)
+static int
+mdbox_map_assign_file_ids(struct mdbox_map_append_context *ctx,
+ bool separate_transaction, const char *reason)
{
struct dbox_file_append_context *const *file_appends;
unsigned int i, count;
/* start the syncing. we'll need it even if there are no file ids to
be assigned. */
- if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ if (mdbox_map_atomic_lock(ctx->atomic, reason) < 0)
return -1;
mdbox_map_get_ext_hdr(ctx->map, ctx->atomic->sync_view, &hdr);
return 0;
}
- if (mdbox_map_assign_file_ids(ctx, TRUE) < 0)
+ if (mdbox_map_assign_file_ids(ctx, TRUE, "saving - assign uids") < 0)
return -1;
/* append map records to index */
unsigned int i, j, map_uids_count, appends_count;
uint32_t uid, seq;
- if (mdbox_map_assign_file_ids(ctx, FALSE) < 0)
+ if (mdbox_map_assign_file_ids(ctx, FALSE, "purging - update uids") < 0)
return -1;
memset(&rec, 0, sizeof(rec));
offsetof(struct mail_index_header, uid_validity),
&uid_validity, sizeof(uid_validity), TRUE);
}
+ mail_index_sync_set_reason(sync_ctx, "uidvalidity initialization");
return mail_index_sync_commit(&sync_ctx);
}
same atomic context. */
struct mdbox_map_atomic_context *mdbox_map_atomic_begin(struct mdbox_map *map);
/* Lock the map immediately. */
-int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic);
+int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic,
+ const char *reason);
/* Returns TRUE if map is locked */
bool mdbox_map_atomic_is_locked(struct mdbox_map_atomic_context *atomic);
/* When finish() is called, rollback the changes. If data was already written
bool external);
/* Write transaction to map and leave it locked. Call _free() to update tail
offset and unlock. */
-int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx);
+int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx,
+ const char *reason);
void mdbox_map_transaction_free(struct mdbox_map_transaction_context **ctx);
int mdbox_map_update_refcount(struct mdbox_map_transaction_context *ctx,
unsigned int i, count;
int ret;
- if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ if (mdbox_map_atomic_lock(ctx->atomic, "purging check") < 0)
return -1;
msgs = array_get(msgs_arr, &count);
}
/* make sure the map gets locked */
- if (mdbox_map_atomic_lock(ctx->atomic) < 0) {
+ if (mdbox_map_atomic_lock(ctx->atomic, "saving") < 0) {
mdbox_transaction_save_rollback(_ctx);
return -1;
}
mdbox_transaction_save_rollback(_ctx);
return -1;
}
+ mail_index_sync_set_reason(ctx->sync_ctx->index_sync_ctx, "copying");
+ } else {
+ mail_index_sync_set_reason(ctx->sync_ctx->index_sync_ctx, "saving");
}
if (ctx->ctx.mail != NULL)
if (mdbox_sync_finish(&ctx->sync_ctx, TRUE) == 0) {
/* commit refcount increases for copied mails */
if (ctx->map_trans != NULL) {
- if (mdbox_map_transaction_commit(ctx->map_trans) < 0)
+ if (mdbox_map_transaction_commit(ctx->map_trans, "copy refcount updates") < 0)
mdbox_map_atomic_set_failed(ctx->atomic);
}
/* flush file append writes */
rebuild_mailbox_multi(ctx, rebuild_ctx, mbox, view, trans);
index_index_rebuild_deinit(&rebuild_ctx, dbox_get_uidvalidity_next);
+ mail_index_sync_set_reason(sync_ctx, "mdbox storage rebuild");
if (mail_index_sync_commit(&sync_ctx) < 0) {
mailbox_set_index_error(box);
ret = -1;
static int rebuild_msg_mailbox_commit(struct rebuild_msg_mailbox *msg)
{
+ mail_index_sync_set_reason(msg->sync_ctx, "mdbox storage rebuild");
if (mail_index_sync_commit(&msg->sync_ctx) < 0)
return -1;
mailbox_free(&msg->box);
/* begin by locking the map, so that other processes can't try to
rebuild at the same time. */
- if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ if (mdbox_map_atomic_lock(ctx->atomic, "mdbox storage rebuild") < 0)
return -1;
/* fsck the map just in case its UIDs are broken */
log head, while tail is left behind. */
if (mdbox_map_atomic_is_locked(ctx->atomic)) {
if (ret == 0)
- ret = mdbox_map_transaction_commit(ctx->map_trans);
+ ret = mdbox_map_transaction_commit(ctx->map_trans, "mdbox syncing");
/* write changes to mailbox index */
if (ret == 0)
ret = dbox_sync_mark_expunges(ctx);
if (ret < 0)
mdbox_map_atomic_set_failed(ctx->atomic);
mdbox_map_transaction_free(&ctx->map_trans);
+ ctx->expunged_count = seq_range_count(&ctx->expunged_seqs);
array_free(&ctx->expunged_seqs);
}
mail_index_sync_has_expunges(ctx->index_sync_ctx)) {
/* we have expunges, so we need to write to map.
it needs to be locked before mailbox index. */
+ mail_index_sync_set_reason(ctx->index_sync_ctx, "mdbox expunge check");
mail_index_sync_rollback(&ctx->index_sync_ctx);
index_storage_expunging_deinit(&ctx->mbox->box);
- if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ if (mdbox_map_atomic_lock(ctx->atomic, "mdbox syncing with expunges") < 0)
return -1;
return mdbox_sync_try_begin(ctx, sync_flags);
}
{
struct mail_storage *storage = mbox->box.storage;
struct mdbox_sync_context *ctx;
+ const char *reason;
enum mail_index_sync_flags sync_flags;
int ret;
bool rebuild, storage_rebuilt = FALSE;
}
if ((ret = mdbox_sync_index(ctx)) <= 0) {
+ mail_index_sync_set_reason(ctx->index_sync_ctx,
+ ret < 0 ? "mdbox syncing failed" :
+ "mdbox syncing found corruption");
mail_index_sync_rollback(&ctx->index_sync_ctx);
index_storage_expunging_deinit(&mbox->box);
i_free_and_null(ctx);
}
index_storage_expunging_deinit(&mbox->box);
+ if (!mdbox_map_atomic_is_locked(ctx->atomic))
+ reason = "mdbox synced";
+ else {
+ /* may be 0 msgs, but that still informs that the map
+ was locked */
+ reason = t_strdup_printf("mdbox synced - %u msgs expunged",
+ ctx->expunged_count);
+ }
+ mail_index_sync_set_reason(ctx->index_sync_ctx, reason);
+
*ctx_r = ctx;
return 0;
}
enum mdbox_sync_flags flags;
ARRAY_TYPE(seq_range) expunged_seqs;
+ unsigned int expunged_count;
};
int mdbox_sync_begin(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags,