if (cache_rec->size > CACHE_PREFETCH) {
if (mail_cache_map(cache, offset, cache_rec->size) < 0)
return NULL;
+ cache_rec = CACHE_RECORD(cache, offset);
}
if (offset + cache_rec->size > cache->mmap_length) {
return 0;
}
+static int
+mail_cache_foreach_rec(struct mail_cache_view *view,
+ const struct mail_cache_record *cache_rec,
+ mail_cache_foreach_callback_t *callback, void *context)
+{
+ size_t pos, next_pos, max_size, data_size;
+ uint32_t field;
+ int ret;
+
+ max_size = cache_rec->size;
+ if (max_size < sizeof(*cache_rec) + sizeof(uint32_t)*2) {
+ mail_cache_set_corrupted(view->cache,
+ "record has invalid size");
+ return -1;
+ }
+ max_size -= sizeof(uint32_t);
+
+ for (pos = sizeof(*cache_rec); pos < max_size; ) {
+ field = *((const uint32_t *)CONST_PTR_OFFSET(cache_rec, pos));
+ pos += sizeof(uint32_t);
+
+ data_size = mail_cache_field_sizes[field];
+ if (data_size == (unsigned int)-1) {
+ data_size = *((const uint32_t *)
+ CONST_PTR_OFFSET(cache_rec, pos));
+ pos += sizeof(uint32_t);
+ }
+
+ next_pos = pos + ((data_size + 3) & ~3);
+ if (next_pos > cache_rec->size) {
+ mail_cache_set_corrupted(view->cache,
+ "Record continues outside it's allocated size");
+ return -1;
+ }
+
+ ret = callback(view, field, CONST_PTR_OFFSET(cache_rec, pos),
+ data_size, context);
+ if (ret <= 0)
+ return ret;
+
+ pos = next_pos;
+ }
+ return 1;
+}
+
int mail_cache_foreach(struct mail_cache_view *view, uint32_t seq,
- int (*callback)(struct mail_cache_view *view,
- enum mail_cache_field field,
- const void *data, size_t data_size,
- void *context), void *context)
+ mail_cache_foreach_callback_t *callback, void *context)
{
const struct mail_cache_record *cache_rec;
- size_t pos, next_pos, max_size, data_size;
- uint32_t offset, field;
+ uint32_t offset;
int ret;
if (MAIL_CACHE_IS_UNUSABLE(view->cache))
cache_rec = mail_cache_get_record(view->cache, offset);
while (cache_rec != NULL) {
- max_size = cache_rec->size;
- if (max_size < sizeof(*cache_rec) + sizeof(uint32_t)*2) {
- mail_cache_set_corrupted(view->cache,
- "record has invalid size");
- return -1;
- }
- max_size -= sizeof(uint32_t);
-
- for (pos = sizeof(*cache_rec); pos < max_size; ) {
- field = *((const uint32_t *)
- CONST_PTR_OFFSET(cache_rec, pos));
- pos += sizeof(uint32_t);
-
- data_size = mail_cache_field_sizes[field];
- if (data_size == (unsigned int)-1) {
- data_size = *((const uint32_t *)
- CONST_PTR_OFFSET(cache_rec, pos));
- pos += sizeof(uint32_t);
- }
-
- next_pos = pos + ((data_size + 3) & ~3);
- if (next_pos > cache_rec->size) {
- mail_cache_set_corrupted(view->cache,
- "Record continues outside it's "
- "allocated size");
- return -1;
- }
-
- ret = callback(view, field,
- CONST_PTR_OFFSET(cache_rec, pos),
- data_size, context);
- if (ret <= 0)
- return ret;
-
- pos = next_pos;
- }
+ ret = mail_cache_foreach_rec(view, cache_rec,
+ callback, context);
+ if (ret <= 0)
+ return ret;
cache_rec = mail_cache_get_record(view->cache,
cache_rec->prev_offset);
}
- if (view->transaction != NULL) {
- // FIXME: update
+ if (view->trans_seq1 <= seq && view->trans_seq2 >= seq &&
+ mail_cache_transaction_lookup(view->transaction, seq, &offset)) {
+ cache_rec = mail_cache_get_record(view->cache, offset);
+ if (cache_rec != NULL) {
+ return mail_cache_foreach_rec(view, cache_rec,
+ callback, context);
+ }
}
return 1;
}
struct mail_index_view *view;
struct mail_cache_transaction_ctx *transaction;
+ uint32_t trans_seq1, trans_seq2;
+
char cached_exists[32];
uint32_t cached_exists_seq;
};
+typedef int mail_cache_foreach_callback_t(struct mail_cache_view *view,
+ enum mail_cache_field field,
+ const void *data, size_t data_size,
+ void *context);
+
extern unsigned int mail_cache_field_sizes[32];
extern enum mail_cache_field mail_cache_header_fields[MAIL_CACHE_HEADERS_COUNT];
mail_cache_get_record(struct mail_cache *cache, uint32_t offset);
int mail_cache_foreach(struct mail_cache_view *view, uint32_t seq,
- int (*callback)(struct mail_cache_view *view,
- enum mail_cache_field field,
- const void *data, size_t data_size,
- void *context), void *context);
+ mail_cache_foreach_callback_t *callback, void *context);
int mail_cache_transaction_commit(struct mail_cache_transaction_ctx *ctx);
void mail_cache_transaction_rollback(struct mail_cache_transaction_ctx *ctx);
+int mail_cache_transaction_lookup(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, uint32_t *offset_r);
+
int mail_cache_map(struct mail_cache *cache, size_t offset, size_t size);
void mail_cache_file_close(struct mail_cache *cache);
int mail_cache_reopen(struct mail_cache *cache);
uint32_t reserved_space_offset, reserved_space;
uint32_t last_grow_size;
- uint32_t first_seq, last_seq;
- enum mail_cache_field fields[32];
-
unsigned int changes:1;
};
static void mail_cache_transaction_free(struct mail_cache_transaction_ctx *ctx)
{
ctx->view->transaction = NULL;
+ ctx->view->trans_seq1 = ctx->view->trans_seq2 = 0;
buffer_free(ctx->cache_data);
buffer_free(ctx->cache_data_seq);
size_t size;
unsigned int i;
- mail_cache_transaction_free_space(ctx);
-
- buf = buffer_get_data(ctx->reservations, &size);
- i_assert(size % sizeof(uint32_t)*2 == 0);
- size /= sizeof(*buf);
+ if (mail_cache_lock(cache) > 0) {
+ mail_cache_transaction_free_space(ctx);
- if (size > 0) {
- /* free flushed data as well. do it from end to beginning so
- we have a better chance of updating used_file_size instead
- of adding holes */
- do {
- size -= 2;
- mail_cache_free_space(ctx->cache, buf[size],
- buf[size+1]);
- } while (size > 0);
+ buf = buffer_get_data(ctx->reservations, &size);
+ i_assert(size % sizeof(uint32_t)*2 == 0);
+ size /= sizeof(*buf);
+
+ if (size > 0) {
+ /* free flushed data as well. do it from end to
+ beginning so we have a better chance of updating
+ used_file_size instead of adding holes */
+ do {
+ size -= 2;
+ mail_cache_free_space(ctx->cache, buf[size],
+ buf[size+1]);
+ } while (size > 0);
+ }
+ mail_cache_unlock(cache);
}
/* make sure we don't cache the headers */
/* remember roughly what we have modified, so cache lookups can
look into transactions to see changes. */
- if (seq < ctx->first_seq || ctx->first_seq == 0)
- ctx->first_seq = seq;
- if (seq > ctx->last_seq)
- ctx->last_seq = seq;
- ctx->view->cached_exists[field] = TRUE;
- ctx->fields[field] = TRUE;
+ if (seq < ctx->view->trans_seq1 || ctx->view->trans_seq1 == 0)
+ ctx->view->trans_seq1 = seq;
+ if (seq > ctx->view->trans_seq2)
+ ctx->view->trans_seq2 = seq;
}
full_size = (data_size + 3) & ~3;
return -1;
}
+int mail_cache_transaction_lookup(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, uint32_t *offset_r)
+{
+ return mail_index_update_cache_lookup(ctx->trans, seq, offset_r);
+}
+
+
int mail_cache_link(struct mail_cache *cache, uint32_t old_offset,
uint32_t new_offset)
{
void mail_index_update_cache(struct mail_index_transaction *t, uint32_t seq,
uint32_t file_seq, uint32_t offset,
uint32_t *old_offset_r);
+int mail_index_update_cache_lookup(struct mail_index_transaction *t,
+ uint32_t seq, uint32_t *offset_r);
int mail_index_fix_header(struct mail_index *index, struct mail_index_map *map,
struct mail_index_header *hdr, const char **error_r);
&update, sizeof(update));
}
-static int mail_index_update_seq_buffer(buffer_t **buffer, uint32_t seq,
- const void *record, size_t record_size,
- void *old_record)
+static int
+mail_index_seq_buffer_lookup(buffer_t *buffer, uint32_t seq,
+ size_t record_size, size_t *pos_r)
{
unsigned int idx, left_idx, right_idx;
void *data;
uint32_t full_record_size, *seq_p;
size_t size;
- full_record_size = record_size + sizeof(uint32_t);
+ full_record_size = record_size + sizeof(seq);
- if (*buffer == NULL)
- *buffer = buffer_create_dynamic(default_pool, 1024, (size_t)-1);
- data = buffer_get_modifyable_data(*buffer, &size);
+ data = buffer_get_modifyable_data(buffer, &size);
/* we're probably appending it, check */
if (size == 0)
else if (*seq_p > seq)
right_idx = idx;
else {
- /* already there, update */
- if (old_record != NULL) {
- memcpy(old_record, seq_p+1,
- record_size);
- }
- memcpy(seq_p+1, record, record_size);
+ *pos_r = idx * full_record_size;
return TRUE;
}
}
}
- idx *= full_record_size;
- if (idx != size) {
- buffer_copy(*buffer, idx + full_record_size,
- *buffer, idx, (size_t)-1);
- }
- seq_p = buffer_get_space_unsafe(*buffer, idx, full_record_size);
-
- *seq_p = seq;
- memcpy(seq_p+1, record, record_size);
+ *pos_r = idx * full_record_size;
return FALSE;
}
+static int mail_index_update_seq_buffer(buffer_t **buffer, uint32_t seq,
+ const void *record, size_t record_size,
+ void *old_record)
+{
+ void *p;
+ size_t pos;
+
+ if (*buffer == NULL) {
+ *buffer = buffer_create_dynamic(default_pool, 1024, (size_t)-1);
+ buffer_append(*buffer, &seq, sizeof(seq));
+ buffer_append(*buffer, record, record_size);
+ return FALSE;
+ }
+
+ if (mail_index_seq_buffer_lookup(*buffer, seq, record_size, &pos)) {
+ /* already there, update */
+ p = buffer_get_space_unsafe(*buffer, pos + sizeof(seq),
+ record_size);
+ if (old_record != NULL)
+ memcpy(old_record, p, record_size);
+ memcpy(p, record, record_size);
+ return TRUE;
+ } else {
+ /* insert */
+ buffer_copy(*buffer, pos + sizeof(seq) + record_size,
+ *buffer, pos, (size_t)-1);
+ buffer_write(*buffer, pos, &seq, sizeof(seq));
+ buffer_write(*buffer, pos + sizeof(seq), record, record_size);
+ return FALSE;
+ }
+}
+
static void
mail_index_transaction_reset_cache_updates(struct mail_index_transaction *t)
{
}
}
+int mail_index_update_cache_lookup(struct mail_index_transaction *t,
+ uint32_t seq, uint32_t *offset_r)
+{
+ const void *p;
+ size_t pos;
+
+ if (t->cache_updates == NULL)
+ return FALSE;
+
+ if (!mail_index_seq_buffer_lookup(t->cache_updates, seq,
+ sizeof(*offset_r), &pos))
+ return FALSE;
+
+ p = buffer_get_data(t->cache_updates, NULL);
+ memcpy(offset_r, CONST_PTR_OFFSET(p, pos + sizeof(*offset_r)),
+ sizeof(*offset_r));
+ return TRUE;
+}
+
void mail_index_update_extra_rec(struct mail_index_transaction *t,
uint32_t seq, uint32_t data_id,
const void *data)