struct mdbox_map_append_context {
struct mdbox_map *map;
-
- struct mail_index_sync_ctx *sync_ctx;
- struct mail_index_view *sync_view;
- struct mail_index_transaction *sync_trans, *trans;
+ struct mdbox_map_atomic_context *atomic;
+ struct mail_index_transaction *trans;
ARRAY_DEFINE(file_appends, struct dbox_file_append_context *);
ARRAY_DEFINE(files, struct dbox_file *);
unsigned int files_nonappendable_count;
unsigned int failed:1;
- unsigned int committed:1;
+};
+
+struct mdbox_map_atomic_context {
+ struct mdbox_map *map;
+ struct mail_index_transaction *sync_trans;
+ struct mail_index_sync_ctx *sync_ctx;
+ struct mail_index_view *sync_view;
+
+ unsigned int map_refreshed:1;
+ unsigned int locked:1;
+ unsigned int success:1;
};
int mdbox_map_view_lookup_rec(struct mdbox_map *map,
#define MAP_STORAGE(map) (&(map)->storage->storage.storage)
struct mdbox_map_transaction_context {
- struct mdbox_map *map;
+ struct mdbox_map_atomic_context *atomic;
struct mail_index_transaction *trans;
- struct mail_index_sync_ctx *sync_ctx;
unsigned int changed:1;
- unsigned int success:1;
+ unsigned int committed:1;
};
static int mdbox_map_generate_uid_validity(struct mdbox_map *map);
return 0;
}
-struct mdbox_map_transaction_context *
-mdbox_map_transaction_begin(struct mdbox_map *map, bool external)
+struct mdbox_map_atomic_context *mdbox_map_atomic_begin(struct mdbox_map *map)
{
- struct mdbox_map_transaction_context *ctx;
- enum mail_index_transaction_flags flags =
- MAIL_INDEX_TRANSACTION_FLAG_FSYNC;
+ struct mdbox_map_atomic_context *atomic;
- if (external)
- flags |= MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL;
-
- ctx = i_new(struct mdbox_map_transaction_context, 1);
- ctx->map = map;
- if (mdbox_map_open(map) > 0 &&
- mdbox_map_refresh(map) == 0)
- ctx->trans = mail_index_transaction_begin(map->view, flags);
- return ctx;
+ atomic = i_new(struct mdbox_map_atomic_context, 1);
+ atomic->map = map;
+ return atomic;
}
static void
}
}
-int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx)
+int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic)
{
- struct mdbox_map *map = ctx->map;
- struct mail_index_view *view;
- struct mail_index_transaction *sync_trans;
int ret;
- if (!ctx->changed)
+ if (atomic->locked)
return 0;
+ if (mdbox_map_open_or_create(atomic->map) < 0)
+ return -1;
+
/* use syncing to lock the transaction log, so that we always see
log's head_offset = tail_offset */
- ret = mail_index_sync_begin(map->index, &ctx->sync_ctx,
- &view, &sync_trans, 0);
+ ret = mail_index_sync_begin(atomic->map->index, &atomic->sync_ctx,
+ &atomic->sync_view, &atomic->sync_trans, 0);
if (ret <= 0) {
i_assert(ret != 0);
- mail_storage_set_internal_error(MAP_STORAGE(map));
- mail_index_reset_error(map->index);
- mail_index_transaction_rollback(&ctx->trans);
+ mail_storage_set_internal_error(MAP_STORAGE(atomic->map));
+ mail_index_reset_error(atomic->map->index);
return -1;
}
- mdbox_map_sync_handle(map, ctx->sync_ctx);
+ atomic->locked = TRUE;
+ /* reset refresh state so that if it's wanted to be done locked,
+ it gets the latest changes */
+ atomic->map_refreshed = FALSE;
+ mdbox_map_sync_handle(atomic->map, atomic->sync_ctx);
+ return 0;
+}
- if (mail_index_transaction_commit(&ctx->trans) < 0) {
- mail_storage_set_internal_error(MAP_STORAGE(map));
- mail_index_reset_error(map->index);
- return -1;
+bool mdbox_map_atomic_is_locked(struct mdbox_map_atomic_context *atomic)
+{
+ return atomic->locked;
+}
+
+void mdbox_map_atomic_set_failed(struct mdbox_map_atomic_context *atomic)
+{
+ atomic->success = FALSE;
+}
+
+int mdbox_map_atomic_finish(struct mdbox_map_atomic_context **_atomic)
+{
+ struct mdbox_map_atomic_context *atomic = *_atomic;
+ int ret = 0;
+
+ *_atomic = NULL;
+
+ if (atomic->success) {
+ if (mail_index_sync_commit(&atomic->sync_ctx) < 0) {
+ mail_storage_set_internal_error(MAP_STORAGE(atomic->map));
+ mail_index_reset_error(atomic->map->index);
+ ret = -1;
+ }
+ } else if (atomic->sync_ctx != NULL) {
+ mail_index_sync_rollback(&atomic->sync_ctx);
}
- ctx->success = TRUE;
- return 0;
+ i_free(atomic);
+ return ret;
+}
+
+struct mdbox_map_transaction_context *
+mdbox_map_transaction_begin(struct mdbox_map_atomic_context *atomic,
+ bool external)
+{
+ struct mdbox_map_transaction_context *ctx;
+ enum mail_index_transaction_flags flags =
+ MAIL_INDEX_TRANSACTION_FLAG_FSYNC;
+ bool success;
+
+ if (external)
+ flags |= MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL;
+
+ ctx = i_new(struct mdbox_map_transaction_context, 1);
+ ctx->atomic = atomic;
+ if (atomic->locked && atomic->map_refreshed) {
+ /* already refreshed within a lock, don't do it again */
+ success = TRUE;
+ } else {
+ success = mdbox_map_open(atomic->map) > 0 &&
+ mdbox_map_refresh(atomic->map) == 0;
+ }
+
+ if (success) {
+ atomic->map_refreshed = TRUE;
+ ctx->trans = mail_index_transaction_begin(atomic->map->view,
+ flags);
+ }
+ return ctx;
}
-void mdbox_map_transaction_set_failed(struct mdbox_map_transaction_context *ctx)
+int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx)
{
- ctx->success = FALSE;
+ i_assert(!ctx->committed);
+
+ ctx->committed = TRUE;
+ if (!ctx->changed)
+ return 0;
+
+ if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ return -1;
+
+ if (mail_index_transaction_commit(&ctx->trans) < 0) {
+ mail_storage_set_internal_error(MAP_STORAGE(ctx->atomic->map));
+ mail_index_reset_error(ctx->atomic->map->index);
+ return -1;
+ }
+ ctx->atomic->success = TRUE;
+ return 0;
}
void mdbox_map_transaction_free(struct mdbox_map_transaction_context **_ctx)
{
struct mdbox_map_transaction_context *ctx = *_ctx;
- struct mdbox_map *map = ctx->map;
*_ctx = NULL;
- if (ctx->success) {
- if (mail_index_sync_commit(&ctx->sync_ctx) < 0) {
- mail_storage_set_internal_error(MAP_STORAGE(map));
- mail_index_reset_error(map->index);
- }
- } else if (ctx->sync_ctx != NULL) {
- mail_index_sync_rollback(&ctx->sync_ctx);
- }
+
if (ctx->trans != NULL)
mail_index_transaction_rollback(&ctx->trans);
i_free(ctx);
int mdbox_map_update_refcount(struct mdbox_map_transaction_context *ctx,
uint32_t map_uid, int diff)
{
- struct mdbox_map *map = ctx->map;
+ struct mdbox_map *map = ctx->atomic->map;
const void *data;
uint32_t seq;
bool expunged;
int mdbox_map_remove_file_id(struct mdbox_map *map, uint32_t file_id)
{
+ struct mdbox_map_atomic_context *atomic;
struct mdbox_map_transaction_context *map_trans;
const struct mail_index_header *hdr;
const struct mdbox_map_mail_index_record *rec;
messages that have already been moved to other files. */
/* we need a per-file transaction, otherwise we can't refresh the map */
- map_trans = mdbox_map_transaction_begin(map, TRUE);
+ atomic = mdbox_map_atomic_begin(map);
+ map_trans = mdbox_map_transaction_begin(atomic, TRUE);
hdr = mail_index_get_header(map->view);
for (seq = 1; seq <= hdr->messages_count; seq++) {
}
}
if (ret == 0)
- (void)mdbox_map_transaction_commit(map_trans);
+ ret = mdbox_map_transaction_commit(map_trans);
mdbox_map_transaction_free(&map_trans);
+ if (mdbox_map_atomic_finish(&atomic) < 0)
+ ret = -1;
return ret;
}
struct mdbox_map_append_context *
-mdbox_map_append_begin(struct mdbox_map *map)
+mdbox_map_append_begin(struct mdbox_map_atomic_context *atomic)
{
struct mdbox_map_append_context *ctx;
ctx = i_new(struct mdbox_map_append_context, 1);
- ctx->map = map;
+ ctx->atomic = atomic;
+ ctx->map = atomic->map;
ctx->first_new_file_id = (uint32_t)-1;
i_array_init(&ctx->file_appends, 64);
i_array_init(&ctx->files, 64);
i_array_init(&ctx->appends, 128);
- if (mdbox_map_open_or_create(map) < 0)
+ if (mdbox_map_open_or_create(atomic->map) < 0)
ctx->failed = TRUE;
else {
/* refresh the map so we can try appending to the
latest files */
- (void)mdbox_map_refresh(ctx->map);
+ if (mdbox_map_refresh(atomic->map) == 0)
+ atomic->map_refreshed = TRUE;
+ else
+ ctx->failed = TRUE;
}
return ctx;
}
unsigned int i, count;
struct mdbox_map_mail_index_header hdr;
uint32_t first_file_id, file_id;
- int ret;
/* start the syncing. we'll need it even if there are no file ids to
be assigned. */
- ret = mail_index_sync_begin(ctx->map->index, &ctx->sync_ctx,
- &ctx->sync_view, &ctx->sync_trans, 0);
- if (ret <= 0) {
- i_assert(ret != 0);
- mail_storage_set_internal_error(MAP_STORAGE(ctx->map));
- mail_index_reset_error(ctx->map->index);
+ if (mdbox_map_atomic_lock(ctx->atomic) < 0)
return -1;
- }
- mdbox_map_sync_handle(ctx->map, ctx->sync_ctx);
- mdbox_map_get_ext_hdr(ctx->map, ctx->sync_view, &hdr);
+ mdbox_map_get_ext_hdr(ctx->map, ctx->atomic->sync_view, &hdr);
file_id = hdr.highest_file_id + 1;
/* assign file_ids for newly created files */
struct mdbox_file *mfile =
(struct mdbox_file *)file_appends[i]->file;
- if (dbox_file_append_flush(file_appends[i]) < 0) {
- ret = -1;
- break;
- }
+ if (dbox_file_append_flush(file_appends[i]) < 0)
+ return -1;
if (mfile->file_id == 0) {
- if (mdbox_file_assign_file_id(mfile, file_id++) < 0) {
- ret = -1;
- break;
- }
+ if (mdbox_file_assign_file_id(mfile, file_id++) < 0)
+ return -1;
}
}
- if (ret < 0) {
- mail_index_sync_rollback(&ctx->sync_ctx);
- return -1;
- }
-
ctx->trans = !separate_transaction ? NULL :
mail_index_transaction_begin(ctx->map->view,
MAIL_INDEX_TRANSACTION_FLAG_FSYNC);
if (first_file_id != file_id) {
file_id--;
mail_index_update_header_ext(ctx->trans != NULL ? ctx->trans :
- ctx->sync_trans,
+ ctx->atomic->sync_trans,
ctx->map->map_ext_id,
0, &file_id, sizeof(file_id));
}
}
/* assign map UIDs for appended records */
- hdr = mail_index_get_header(ctx->sync_view);
+ hdr = mail_index_get_header(ctx->atomic->sync_view);
t_array_init(&uids, 1);
mail_index_append_finish_uids(ctx->trans, hdr->next_uid, &uids);
range = array_idx(&uids, 0);
rec.size = appends[j].size;
j++;
- if (!mail_index_lookup_seq(ctx->sync_view, uids[i], &seq))
+ if (!mail_index_lookup_seq(ctx->atomic->sync_view,
+ uids[i], &seq))
i_unreached();
- mail_index_update_ext(ctx->sync_trans, seq,
+ mail_index_update_ext(ctx->atomic->sync_trans, seq,
ctx->map->map_ext_id, &rec, NULL);
}
seq_range_array_iter_init(&iter, expunge_map_uids); i = 0;
while (seq_range_array_iter_nth(&iter, i++, &uid)) {
- if (!mail_index_lookup_seq(ctx->sync_view, uid, &seq))
+ if (!mail_index_lookup_seq(ctx->atomic->sync_view, uid, &seq))
i_unreached();
- mail_index_expunge(ctx->sync_trans, seq);
+ mail_index_expunge(ctx->atomic->sync_trans, seq);
}
return 0;
}
int mdbox_map_append_commit(struct mdbox_map_append_context *ctx)
{
- struct mdbox_map *map = ctx->map;
struct dbox_file_append_context **file_appends;
unsigned int i, count;
if (dbox_file_append_commit(&file_appends[i]) < 0)
return -1;
}
-
- if (ctx->sync_ctx != NULL) {
- if (mail_index_sync_commit(&ctx->sync_ctx) < 0) {
- mail_storage_set_internal_error(MAP_STORAGE(map));
- mail_index_reset_error(map->index);
- return -1;
- }
- }
-
- ctx->committed = TRUE;
+ ctx->atomic->success = TRUE;
return 0;
}
if (ctx->trans != NULL)
mail_index_transaction_rollback(&ctx->trans);
- if (ctx->sync_ctx != NULL)
- mail_index_sync_rollback(&ctx->sync_ctx);
file_appends = array_get_modifiable(&ctx->file_appends, &count);
for (i = 0; i < count; i++) {
int mdbox_map_get_file_msgs(struct mdbox_map *map, uint32_t file_id,
ARRAY_TYPE(mdbox_map_file_msg) *recs);
+/* Begin atomic context. There can be multiple transactions/appends within the
+ same atomic context. */
+struct mdbox_map_atomic_context *mdbox_map_atomic_begin(struct mdbox_map *map);
+/* Lock the map immediately. */
+int mdbox_map_atomic_lock(struct mdbox_map_atomic_context *atomic);
+/* Returns TRUE if map is locked */
+bool mdbox_map_atomic_is_locked(struct mdbox_map_atomic_context *atomic);
+/* When finish() is called, rollback the changes. If data was already written
+ to map's transaction log, this desyncs the map and causes a rebuild */
+void mdbox_map_atomic_set_failed(struct mdbox_map_atomic_context *atomic);
+/* Commit/rollback changes within this atomic context. */
+int mdbox_map_atomic_finish(struct mdbox_map_atomic_context **atomic);
+
struct mdbox_map_transaction_context *
-mdbox_map_transaction_begin(struct mdbox_map *map, bool external);
+mdbox_map_transaction_begin(struct mdbox_map_atomic_context *atomic,
+ bool external);
/* Write transaction to map and leave it locked. Call _free() to update tail
offset and unlock. */
int mdbox_map_transaction_commit(struct mdbox_map_transaction_context *ctx);
-void mdbox_map_transaction_set_failed(struct mdbox_map_transaction_context *ctx);
void mdbox_map_transaction_free(struct mdbox_map_transaction_context **ctx);
int mdbox_map_update_refcount(struct mdbox_map_transaction_context *ctx,
ARRAY_TYPE(seq_range) *file_ids_r);
struct mdbox_map_append_context *
-mdbox_map_append_begin(struct mdbox_map *map);
+mdbox_map_append_begin(struct mdbox_map_atomic_context *atomic);
/* Request file for saving a new message with given size (if available). If an
existing file can be used, the record is locked and updated in index.
Returns 0 if ok, -1 if error. */
struct hash_table *altmoves;
bool have_altmoves;
+ struct mdbox_map_atomic_context *atomic;
struct mdbox_map_append_context *append_ctx;
};
off_t ret;
int read_errno;
- if (ctx->append_ctx == NULL)
- ctx->append_ctx = mdbox_map_append_begin(ctx->storage->map);
+ if (ctx->append_ctx == NULL) {
+ ctx->atomic = mdbox_map_atomic_begin(ctx->storage->map);
+ ctx->append_ctx = mdbox_map_append_begin(ctx->atomic);
+ }
append_flags = !mdbox_purge_want_altpath(ctx, msg->map_uid) ? 0 :
DBOX_MAP_APPEND_FLAG_ALT;
}
if (ret > 0)
(void)dbox_file_unlink(file);
- if (ctx->append_ctx != NULL)
+ if (ctx->append_ctx != NULL) {
mdbox_map_append_free(&ctx->append_ctx);
+ if (mdbox_map_atomic_finish(&ctx->atomic) < 0)
+ ret = -1;
+ }
if (ret < 0)
dbox_file_unlock(file);
array_free(&copied_map_uids);
struct mdbox_map_append_context *append_ctx;
ARRAY_TYPE(uint32_t) copy_map_uids;
+ struct mdbox_map_atomic_context *atomic;
struct mdbox_map_transaction_context *map_trans;
ARRAY_DEFINE(mails, struct dbox_save_mail);
ctx->ctx.ctx.transaction = t;
ctx->ctx.trans = t->itrans;
ctx->mbox = mbox;
- ctx->append_ctx = mdbox_map_append_begin(mbox->storage->map);
+ ctx->atomic = mdbox_map_atomic_begin(mbox->storage->map);
+ ctx->append_ctx = mdbox_map_append_begin(ctx->atomic);
i_array_init(&ctx->mails, 32);
t->save_ctx = &ctx->ctx.ctx;
return t->save_ctx;
i_assert(ctx->ctx.finished);
- /* lock the mailbox before map to avoid deadlocks */
- if (mdbox_sync_begin(mbox, MDBOX_SYNC_FLAG_NO_PURGE |
- MDBOX_SYNC_FLAG_FORCE |
- MDBOX_SYNC_FLAG_FSYNC, &ctx->sync_ctx) < 0) {
- mdbox_transaction_save_rollback(_ctx);
- return -1;
- }
-
/* assign map UIDs for newly saved messages. they're written to
transaction log immediately within this function, but the map
is left locked. */
return -1;
}
+ /* map is now locked. lock the mailbox after it to avoid deadlocks. */
+ if (mdbox_sync_begin(mbox, MDBOX_SYNC_FLAG_NO_PURGE |
+ MDBOX_SYNC_FLAG_FORCE |
+ MDBOX_SYNC_FLAG_FSYNC, ctx->atomic,
+ &ctx->sync_ctx) < 0) {
+ mdbox_transaction_save_rollback(_ctx);
+ return -1;
+ }
+
/* assign UIDs for new messages */
hdr = mail_index_get_header(ctx->sync_ctx->sync_view);
mail_index_append_finish_uids(ctx->ctx.trans, hdr->next_uid,
/* increase map's refcount for copied mails */
if (array_is_created(&ctx->copy_map_uids)) {
- ctx->map_trans =
- mdbox_map_transaction_begin(mbox->storage->map, FALSE);
+ ctx->map_trans = mdbox_map_transaction_begin(ctx->atomic, FALSE);
if (mdbox_map_update_refcounts(ctx->map_trans,
&ctx->copy_map_uids, 1) < 0) {
mdbox_transaction_save_rollback(_ctx);
/* finish writing the mailbox APPENDs */
if (mdbox_sync_finish(&ctx->sync_ctx, TRUE) == 0) {
- if (ctx->map_trans != NULL)
- (void)mdbox_map_transaction_commit(ctx->map_trans);
- /* commit only updates the sync tail offset, everything else
- was already written at this point. */
- (void)mdbox_map_append_commit(ctx->append_ctx);
+ /* commit refcount increases for copied mails */
+ if (ctx->map_trans != NULL) {
+ if (mdbox_map_transaction_commit(ctx->map_trans) < 0)
+ mdbox_map_atomic_set_failed(ctx->atomic);
+ }
+ /* flush file append writes */
+ if (mdbox_map_append_commit(ctx->append_ctx) < 0)
+ mdbox_map_atomic_set_failed(ctx->atomic);
}
mdbox_map_append_free(&ctx->append_ctx);
+ /* update the sync tail offset, everything else
+ was already written at this point. */
+ (void)mdbox_map_atomic_finish(&ctx->atomic);
if (storage->set->parsed_fsync_mode != FSYNC_MODE_NEVER) {
if (fdatasync_path(ctx->mbox->box.path) < 0) {
mdbox_map_append_free(&ctx->append_ctx);
if (ctx->map_trans != NULL)
mdbox_map_transaction_free(&ctx->map_trans);
+ if (ctx->atomic != NULL)
+ (void)mdbox_map_atomic_finish(&ctx->atomic);
if (array_is_created(&ctx->copy_map_uids))
array_free(&ctx->copy_map_uids);
struct mdbox_storage_rebuild_context {
struct mdbox_storage *storage;
+ struct mdbox_map_atomic_context *atomic;
pool_t pool;
struct mdbox_map_mail_index_header orig_map_hdr;
uint32_t highest_file_id;
struct mailbox_list *default_list;
- struct mail_index_sync_ctx *sync_ctx;
- struct mail_index_view *sync_view;
- struct mail_index_transaction *trans;
struct rebuild_msg_mailbox prev_msg;
};
}
static struct mdbox_storage_rebuild_context *
-mdbox_storage_rebuild_init(struct mdbox_storage *storage)
+mdbox_storage_rebuild_init(struct mdbox_storage *storage,
+ struct mdbox_map_atomic_context *atomic)
{
struct mdbox_storage_rebuild_context *ctx;
ctx = i_new(struct mdbox_storage_rebuild_context, 1);
ctx->storage = storage;
+ ctx->atomic = atomic;
ctx->pool = pool_alloconly_create("dbox map rebuild", 1024*256);
ctx->guid_hash = hash_table_create(default_pool, ctx->pool, 0,
guid_hash, guid_cmp);
i_assert(ctx->storage->rebuilding_storage);
ctx->storage->rebuilding_storage = FALSE;
- if (ctx->sync_ctx != NULL)
- mail_index_sync_rollback(&ctx->sync_ctx);
hash_table_destroy(&ctx->guid_hash);
pool_unref(&ctx->pool);
rec.size = msgs[i]->size;
msgs[i]->map_uid = next_uid++;
- mail_index_append(ctx->trans, msgs[i]->map_uid, &seq);
- mail_index_update_ext(ctx->trans, seq,
+ mail_index_append(ctx->atomic->sync_trans,
+ msgs[i]->map_uid, &seq);
+ mail_index_update_ext(ctx->atomic->sync_trans, seq,
ctx->storage->map->map_ext_id,
&rec, NULL);
}
array_sort(&ctx->msgs, mdbox_rebuild_msg_offset_cmp);
msgs = array_get_modifiable(&ctx->msgs, &count);
- hdr = mail_index_get_header(ctx->sync_view);
+ hdr = mail_index_get_header(ctx->atomic->sync_view);
for (seq = 1; seq <= hdr->messages_count; seq++) {
- if (mdbox_map_view_lookup_rec(map, ctx->sync_view,
+ if (mdbox_map_view_lookup_rec(map, ctx->atomic->sync_view,
seq, &rec) < 0)
return -1;
if (pos == NULL || (*pos)->map_uid != 0) {
/* map record points to non-existing or
a duplicate message. */
- mail_index_expunge(ctx->trans, seq);
+ mail_index_expunge(ctx->atomic->sync_trans, seq);
} else {
(*pos)->map_uid = rec.map_uid;
if (rec.refcount == 0)
/* update refcounts for existing map records */
msgs = array_get_modifiable(&ctx->msgs, &count);
- hdr = mail_index_get_header(ctx->sync_view);
+ hdr = mail_index_get_header(ctx->atomic->sync_view);
for (seq = 1, i = 0; seq <= hdr->messages_count && i < count; seq++) {
- mail_index_lookup_uid(ctx->sync_view, seq, &map_uid);
+ mail_index_lookup_uid(ctx->atomic->sync_view, seq, &map_uid);
if (map_uid != msgs[i]->map_uid) {
/* we've already expunged this map record */
i_assert(map_uid < msgs[i]->map_uid);
continue;
}
- mail_index_lookup_ext(ctx->sync_view, seq,
+ mail_index_lookup_ext(ctx->atomic->sync_view, seq,
ctx->storage->map->ref_ext_id,
&data, &expunged);
ref16_p = data;
if (ref16_p == NULL || *ref16_p != msgs[i]->refcount) {
- mail_index_update_ext(ctx->trans, seq,
+ mail_index_update_ext(ctx->atomic->sync_trans, seq,
ctx->storage->map->ref_ext_id,
&msgs[i]->refcount, NULL);
}
/* update refcounts for newly created map records */
for (; i < count; i++, seq++) {
- mail_index_update_ext(ctx->trans, seq,
+ mail_index_update_ext(ctx->atomic->sync_trans, seq,
ctx->storage->map->ref_ext_id,
&msgs[i]->refcount, NULL);
}
map_hdr.highest_file_id = ctx->highest_file_id;
map_hdr.rebuild_count = ++ctx->rebuild_count;
- mail_index_update_header_ext(ctx->trans, ctx->storage->map->map_ext_id,
+ mail_index_update_header_ext(ctx->atomic->sync_trans,
+ ctx->storage->map->map_ext_id,
0, &map_hdr, sizeof(map_hdr));
return 0;
}
{
const void *data;
size_t data_size;
- int ret = 0;
if (mdbox_map_open_or_create(ctx->storage->map) < 0)
return -1;
/* begin by locking the map, so that other processes can't try to
rebuild at the same time. */
- ret = mail_index_sync_begin(ctx->storage->map->index, &ctx->sync_ctx,
- &ctx->sync_view, &ctx->trans, 0);
- if (ret <= 0) {
- i_assert(ret != 0);
- mail_storage_set_internal_error(&ctx->storage->storage.storage);
- mail_index_reset_error(ctx->storage->map->index);
+ if (mdbox_map_atomic_lock(ctx->atomic) < 0)
return -1;
- }
/* get old map header */
- mail_index_get_header_ext(ctx->sync_view, ctx->storage->map->map_ext_id,
+ mail_index_get_header_ext(ctx->atomic->sync_view,
+ ctx->storage->map->map_ext_id,
&data, &data_size);
memset(&ctx->orig_map_hdr, 0, sizeof(ctx->orig_map_hdr));
memcpy(&ctx->orig_map_hdr, data,
if (rebuild_apply_map(ctx) < 0 ||
rebuild_mailboxes(ctx) < 0 ||
- rebuild_finish(ctx) < 0 ||
- mail_index_sync_commit(&ctx->sync_ctx) < 0)
+ rebuild_finish(ctx) < 0) {
+ mdbox_map_atomic_set_failed(ctx->atomic);
return -1;
+ }
return 0;
}
-int mdbox_storage_rebuild(struct mdbox_storage *storage)
+int mdbox_storage_rebuild_in_context(struct mdbox_storage *storage,
+ struct mdbox_map_atomic_context *atomic)
{
struct mdbox_storage_rebuild_context *ctx;
struct stat st;
return -1;
}
- ctx = mdbox_storage_rebuild_init(storage);
+ ctx = mdbox_storage_rebuild_init(storage, atomic);
ret = mdbox_storage_rebuild_scan(ctx);
mdbox_storage_rebuild_deinit(ctx);
}
return ret;
}
+
+int mdbox_storage_rebuild(struct mdbox_storage *storage)
+{
+ struct mdbox_map_atomic_context *atomic;
+ int ret;
+
+ atomic = mdbox_map_atomic_begin(storage->map);
+ ret = mdbox_storage_rebuild_in_context(storage, atomic);
+ if (mdbox_map_atomic_finish(&atomic) < 0)
+ ret = -1;
+ return ret;
+}
#ifndef MDBOX_STORAGE_REBUILD_H
#define MDBOX_STORAGE_REBUILD_H
+struct mdbox_map_atomic_context;
+
+int mdbox_storage_rebuild_in_context(struct mdbox_storage *storage,
+ struct mdbox_map_atomic_context *atomic);
int mdbox_storage_rebuild(struct mdbox_storage *storage);
#endif
static int mdbox_mailbox_unref_mails(struct mdbox_mailbox *mbox)
{
+ struct mdbox_map_atomic_context *atomic;
struct mdbox_map_transaction_context *map_trans;
const struct mail_index_header *hdr;
uint32_t seq, map_uid;
int ret = 0;
- map_trans = mdbox_map_transaction_begin(mbox->storage->map, FALSE);
+ atomic = mdbox_map_atomic_begin(mbox->storage->map);
+ map_trans = mdbox_map_transaction_begin(atomic, FALSE);
hdr = mail_index_get_header(mbox->box.view);
for (seq = 1; seq <= hdr->messages_count; seq++) {
if (mdbox_mail_lookup(mbox, mbox->box.view, seq,
if (ret == 0)
ret = mdbox_map_transaction_commit(map_trans);
mdbox_map_transaction_free(&map_trans);
+ if (mdbox_map_atomic_finish(&atomic) < 0)
+ ret = -1;
return ret;
}
}
/* handle syncing records without map being locked. */
- ctx->map_trans =
- mdbox_map_transaction_begin(ctx->mbox->storage->map, FALSE);
- i_array_init(&ctx->expunged_seqs, 64);
+ if (mdbox_map_atomic_is_locked(ctx->atomic)) {
+ ctx->map_trans = mdbox_map_transaction_begin(ctx->atomic, FALSE);
+ i_array_init(&ctx->expunged_seqs, 64);
+ }
while (mail_index_sync_next(ctx->index_sync_ctx, &sync_rec)) {
if ((ret = mdbox_sync_rec(ctx, &sync_rec)) < 0)
break;
}
- /* write refcount changes to map index. map index is locked by
- the commit() and log head is updated, while tail is left behind. */
- if (ret == 0)
- ret = mdbox_map_transaction_commit(ctx->map_trans);
- /* write changes to mailbox index */
- if (ret == 0)
- ret = dbox_sync_mark_expunges(ctx);
-
- /* finish the map changes and unlock the map. this also updates
- map's tail -> head. */
- if (ret < 0)
- mdbox_map_transaction_set_failed(ctx->map_trans);
- mdbox_map_transaction_free(&ctx->map_trans);
+ /* write refcount changes to map index. transaction commit updates the
+ log head, while tail is left behind. */
+ if (mdbox_map_atomic_is_locked(ctx->atomic)) {
+ if (ret == 0)
+ ret = mdbox_map_transaction_commit(ctx->map_trans);
+ /* write changes to mailbox index */
+ if (ret == 0)
+ ret = dbox_sync_mark_expunges(ctx);
+
+ /* finish the map changes and unlock the map. this also updates
+ map's tail -> head. */
+ if (ret < 0)
+ mdbox_map_atomic_set_failed(ctx->atomic);
+ mdbox_map_transaction_free(&ctx->map_trans);
+ array_free(&ctx->expunged_seqs);
+ }
if (box->v.sync_notify != NULL)
box->v.sync_notify(box, 0, 0);
- array_free(&ctx->expunged_seqs);
return ret == 0 ? 1 :
(ctx->mbox->storage->corrupted ? 0 : -1);
}
-static int mdbox_refresh_header(struct mdbox_mailbox *mbox, bool retry)
+static int mdbox_sync_try_begin(struct mdbox_sync_context *ctx,
+ enum mail_index_sync_flags sync_flags)
{
- struct mail_index_view *view;
- struct mdbox_index_header hdr;
+ struct mdbox_mailbox *mbox = ctx->mbox;
int ret;
- view = mail_index_view_open(mbox->box.index);
- ret = mdbox_read_header(mbox, &hdr);
- mail_index_view_close(&view);
-
+ ret = mail_index_sync_begin(mbox->box.index, &ctx->index_sync_ctx,
+ &ctx->sync_view, &ctx->trans, sync_flags);
+ if (ret < 0) {
+ mail_storage_set_index_error(&mbox->box);
+ return -1;
+ }
if (ret == 0) {
- ret = mbox->storage->corrupted ? -1 : 0;
- } else if (retry) {
- (void)mail_index_refresh(mbox->box.index);
- return mdbox_refresh_header(mbox, FALSE);
+ /* nothing to do */
+ return 0;
}
- return ret;
+
+ if (!mdbox_map_atomic_is_locked(ctx->atomic) &&
+ mail_index_sync_has_expunges(ctx->index_sync_ctx)) {
+ /* we have expunges, so we need to write to map.
+ it needs to be locked before mailbox index. */
+ mail_index_sync_rollback(&ctx->index_sync_ctx);
+ if (mdbox_map_atomic_lock(ctx->atomic) < 0)
+ return -1;
+ return mdbox_sync_try_begin(ctx, sync_flags);
+ }
+ return 1;
}
int mdbox_sync_begin(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags,
+ struct mdbox_map_atomic_context *atomic,
struct mdbox_sync_context **ctx_r)
{
struct mail_storage *storage = mbox->box.storage;
struct mdbox_sync_context *ctx;
enum mail_index_sync_flags sync_flags;
- unsigned int i;
int ret;
bool rebuild, storage_rebuilt = FALSE;
+ *ctx_r = NULL;
+
/* avoid race conditions with mailbox creation, don't check for dbox
headers until syncing has locked the mailbox */
rebuild = mbox->storage->corrupted ||
(flags & MDBOX_SYNC_FLAG_FORCE_REBUILD) != 0;
if (rebuild) {
- if (mdbox_storage_rebuild(mbox->storage) < 0)
+ if (mdbox_storage_rebuild_in_context(mbox->storage, atomic) < 0)
return -1;
index_mailbox_reset_uidvalidity(&mbox->box);
storage_rebuilt = TRUE;
ctx = i_new(struct mdbox_sync_context, 1);
ctx->mbox = mbox;
ctx->flags = flags;
+ ctx->atomic = atomic;
sync_flags = index_storage_get_sync_flags(&mbox->box);
if (!rebuild && (flags & MDBOX_SYNC_FLAG_FORCE) == 0)
/* don't write unnecessary dirty flag updates */
sync_flags |= MAIL_INDEX_SYNC_FLAG_AVOID_FLAG_UPDATES;
- for (i = 0;; i++) {
- ret = mail_index_sync_begin(mbox->box.index,
- &ctx->index_sync_ctx,
- &ctx->sync_view, &ctx->trans,
- sync_flags);
- if (ret <= 0) {
- if (ret < 0)
- mail_storage_set_index_error(&mbox->box);
- i_free(ctx);
- *ctx_r = NULL;
- return ret;
- }
+ ret = mdbox_sync_try_begin(ctx, sync_flags);
+ if (ret <= 0) {
+ /* failed / nothing to do */
+ i_free(ctx);
+ return ret;
+ }
- /* now that we're locked, check again if we want to rebuild */
- if (mdbox_refresh_header(mbox, FALSE) < 0)
- ret = 0;
- else {
- if ((ret = mdbox_sync_index(ctx)) > 0)
- break;
- }
+ if ((ret = mdbox_sync_index(ctx)) <= 0) {
+ mail_index_sync_rollback(&ctx->index_sync_ctx);
+ i_free_and_null(ctx);
- /* failure. keep the index locked while we're doing a
- rebuild. */
- if (ret == 0) {
- if (!storage_rebuilt) {
- /* we'll need to rebuild storage too.
- try again from the beginning. */
- mdbox_storage_set_corrupted(mbox->storage);
- mail_index_sync_rollback(&ctx->index_sync_ctx);
- i_free(ctx);
- return mdbox_sync_begin(mbox, flags, ctx_r);
- }
+ if (ret < 0)
+ return -1;
+
+ /* corrupted */
+ if (storage_rebuilt) {
mail_storage_set_critical(storage,
"mdbox %s: Storage keeps breaking",
- ctx->mbox->box.path);
- ret = -1;
- }
- mail_index_sync_rollback(&ctx->index_sync_ctx);
- if (ret < 0) {
- i_free(ctx);
+ mbox->box.path);
return -1;
}
+
+ /* we'll need to rebuild storage.
+ try again from the beginning. */
+ mdbox_storage_set_corrupted(mbox->storage);
+ return mdbox_sync_begin(mbox, flags, atomic, ctx_r);
}
*ctx_r = ctx;
int mdbox_sync(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags)
{
struct mdbox_sync_context *sync_ctx;
+ struct mdbox_map_atomic_context *atomic;
+ int ret;
- if (mdbox_sync_begin(mbox, flags, &sync_ctx) < 0)
- return -1;
-
- if (sync_ctx == NULL)
- return 0;
- return mdbox_sync_finish(&sync_ctx, TRUE);
+ atomic = mdbox_map_atomic_begin(mbox->storage->map);
+ ret = mdbox_sync_begin(mbox, flags, atomic, &sync_ctx);
+ if (ret == 0 && sync_ctx != NULL)
+ ret = mdbox_sync_finish(&sync_ctx, TRUE);
+ if (mdbox_map_atomic_finish(&atomic) < 0)
+ ret = -1;
+ return ret;
}
struct mailbox_sync_context *
struct mail_index_view *sync_view;
struct mail_index_transaction *trans;
struct mdbox_map_transaction_context *map_trans;
+ struct mdbox_map_atomic_context *atomic;
enum mdbox_sync_flags flags;
ARRAY_TYPE(seq_range) expunged_seqs;
};
int mdbox_sync_begin(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags,
+ struct mdbox_map_atomic_context *atomic,
struct mdbox_sync_context **ctx_r);
int mdbox_sync_finish(struct mdbox_sync_context **ctx, bool success);
int mdbox_sync(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags);