if (mail_transaction_log_sync_lock(index->log, &file_seq,
&file_offset) < 0)
return -1;
- if (mail_index_lock_exclusive(index, 0, 0, &lock_id) < 0) {
+ if (mail_index_lock_exclusive(index, &lock_id) < 0) {
mail_transaction_log_sync_unlock(index->log);
return -1;
}
locks then, though */
if (lock_type == F_WRLCK)
return 0;
+ if (update_index && index->lock_type == F_UNLCK) {
+ if (mail_index_has_changed(index) < 0)
+ return -1;
+ }
if (mail_index_lock_mprotect(index, lock_type) < 0)
return -1;
+ index->lock_type = lock_type;
return 1;
}
return fd;
}
-static int mail_index_need_lock(struct mail_index *index,
- uint32_t log_file_seq, uoff_t log_file_offset)
-{
- if (mail_index_map(index, FALSE) <= 0)
- return 1;
-
- if (log_file_seq != 0 &&
- (index->hdr->log_file_seq > log_file_seq ||
- (index->hdr->log_file_seq == log_file_seq &&
- index->hdr->log_file_offset >= log_file_offset))) {
- /* already synced */
- return 0;
- }
-
- return 1;
-}
-
static int mail_index_lock_exclusive_copy(struct mail_index *index)
{
int fd;
if (index->copy_lock_path != NULL) {
index->excl_lock_count++;
- return 1;
+ return 0;
}
/* copy the index to index.tmp and use it. when */
}
i_assert(index->excl_lock_count == 1);
- return 1;
+ return 0;
}
int mail_index_lock_exclusive(struct mail_index *index,
- uint32_t log_file_seq, uoff_t log_file_offset,
unsigned int *lock_id_r)
{
- unsigned int lock_id;
int ret;
/* exclusive transaction log lock protects exclusive locking
/* wait two seconds for exclusive lock */
ret = mail_index_lock(index, F_WRLCK, 2, TRUE, lock_id_r);
- if (ret > 0) {
- if (mail_index_need_lock(index, log_file_seq, log_file_offset))
- return 1;
-
- mail_index_unlock(index, *lock_id_r);
+ if (ret > 0)
return 0;
- }
if (ret < 0)
return -1;
- /* Grab shared lock to make sure it's not already being
- exclusively locked */
- if (mail_index_lock_shared(index, TRUE, &lock_id) < 0)
- return -1;
-
- if (log_file_seq != 0) {
- /* check first if we really need to recreate it */
- ret = mail_index_need_lock(index, log_file_seq,
- log_file_offset);
- if (ret == 0) {
- mail_index_unlock(index, lock_id);
- return 0;
- }
- }
-
- mail_index_unlock(index, lock_id);
-
*lock_id_r = index->lock_id + 1;
return mail_index_lock_exclusive_copy(index);
}
races, unless transaction log is exclusively locked). */
int mail_index_lock_shared(struct mail_index *index, int update_index,
unsigned int *lock_id_r);
-/* Returns 1 = ok, 0 = already synced up to given log_file_offset, -1 = error */
+/* Returns 0 = ok, -1 = error. */
int mail_index_lock_exclusive(struct mail_index *index,
- uint32_t log_file_seq, uoff_t log_file_offset,
unsigned int *lock_id_r);
void mail_index_unlock(struct mail_index *index, unsigned int lock_id);
/* Returns 1 if given lock_id is valid, 0 if not. */
unsigned int append_count;
uint32_t count, file_seq, src_idx, dest_idx;
uoff_t file_offset;
+ unsigned int lock_id;
int ret;
/* rewind */
return 0;
}
+ if (mail_index_lock_exclusive(index, &lock_id) < 0)
+ return -1;
+
if (MAIL_INDEX_MAP_IS_IN_MEMORY(map))
map->write_to_disk = TRUE;
if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
memcpy(map->mmap_base, &ctx.hdr, sizeof(ctx.hdr));
- if (msync(map->mmap_base, map->file_used_size, MS_SYNC) < 0)
- return mail_index_set_syscall_error(index, "msync()");
+ if (msync(map->mmap_base, map->file_used_size, MS_SYNC) < 0) {
+ mail_index_set_syscall_error(index, "msync()");
+ ret = -1;
+ }
} else {
map->hdr_copy = ctx.hdr;
map->hdr = &map->hdr_copy;
}
+ mail_index_unlock(index, lock_id);
return ret;
}
return ret;
}
+static int mail_index_need_lock(struct mail_index *index,
+ uint32_t log_file_seq, uoff_t log_file_offset)
+{
+ if (index->hdr->log_file_seq > log_file_seq ||
+ (index->hdr->log_file_seq == log_file_seq &&
+ index->hdr->log_file_offset >= log_file_offset)) {
+ /* already synced */
+ return 0;
+ }
+
+ return 1;
+}
+
int mail_index_sync_begin(struct mail_index *index,
struct mail_index_sync_ctx **ctx_r,
struct mail_index_view **view_r,
uoff_t offset;
size_t size;
unsigned int lock_id;
- int ret;
if (mail_transaction_log_sync_lock(index->log, &seq, &offset) < 0)
return -1;
- /* FIXME: really needed yet? If there are readers, the index file
- is copied even if there are no changes.. */
- ret = mail_index_lock_exclusive(index, log_file_seq,
- log_file_offset, &lock_id);
- if (ret <= 0) {
+ if (mail_index_lock_shared(index, TRUE, &lock_id) < 0) {
mail_transaction_log_sync_unlock(index->log);
- return ret;
+ return -1;
}
if (mail_index_map(index, FALSE) <= 0) {
mail_transaction_log_sync_unlock(index->log);
+ mail_index_unlock(index, lock_id);
return -1;
}
+ if (!mail_index_need_lock(index, log_file_seq, log_file_offset)) {
+ mail_index_unlock(index, lock_id);
+ mail_transaction_log_sync_unlock(index->log);
+ return 0;
+ }
+
ctx = i_new(struct mail_index_sync_ctx, 1);
ctx->index = index;
ctx->lock_id = lock_id;
view->inconsistent = TRUE;
return -1;
}
+ } else if (update_index) {
+ // FIXME: check if we need to reopen it!
}
return 0;
view->expunges_buf);
ret = mail_transaction_map(hdr, data, &seqfix_funcs, view);
mail_transaction_expunge_traverse_deinit(view->exp_ctx);
- i_assert(buffer_get_used_size(view->data_buf) == hdr->size);
- *data_r = buffer_get_data(view->data_buf, NULL);
+ if (ret > 0) {
+ /* modified */
+ i_assert(buffer_get_used_size(view->data_buf) ==
+ hdr->size);
+ *data_r = buffer_get_data(view->data_buf, NULL);
+ } else {
+ i_assert(buffer_get_used_size(view->data_buf) == 0);
+ }
}
if ((hdr->type & MAIL_TRANSACTION_EXPUNGE) != 0) {
if (index->fd != -1) {
index->log_locked = TRUE; /* kludging around assert.. */
- if (mail_index_lock_exclusive(index, 0, 0, &lock_id) < 0) {
+ if (mail_index_lock_exclusive(index, &lock_id) < 0) {
(void)file_dotlock_delete(path, fd);
index->log_locked = FALSE;
return -1;
}
break;
}
- case MAIL_TRANSACTION_EXPUNGE: {
+ case MAIL_TRANSACTION_EXPUNGE:
+ case MAIL_TRANSACTION_EXPUNGE|MAIL_TRANSACTION_EXPUNGE_PROT: {
const struct mail_transaction_expunge *rec, *end;
if (map->expunge == NULL)