dovecot: Link cache records together directly when writing the n...
dovecot at dovecot.org
dovecot at dovecot.org
Thu Jan 10 04:23:27 EET 2008
details: http://hg.dovecot.org/dovecot/rev/876c7bca351c
changeset: 7138:876c7bca351c
user: Timo Sirainen <tss at iki.fi>
date: Thu Jan 10 04:23:23 2008 +0200
description:
Link cache records together directly when writing the new records, instead
of delaying them until later and causing lots of small writes. We still do
this delayed check and do the writes when it's required, but it shouldn't
happen normally.
diffstat:
3 files changed, 59 insertions(+), 14 deletions(-)
src/lib-index/mail-cache-lookup.c | 39 ++++++++++++++++++++++----------
src/lib-index/mail-cache-private.h | 2 +
src/lib-index/mail-cache-transaction.c | 32 ++++++++++++++++++++++++--
diffs (140 lines):
diff -r c33c87781ab4 -r 876c7bca351c src/lib-index/mail-cache-lookup.c
--- a/src/lib-index/mail-cache-lookup.c Mon Jan 07 12:49:40 2008 +0200
+++ b/src/lib-index/mail-cache-lookup.c Thu Jan 10 04:23:23 2008 +0200
@@ -48,24 +48,39 @@ int mail_cache_get_record(struct mail_ca
return 0;
}
+uint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
+ uint32_t seq, uint32_t *reset_id_r)
+{
+ struct mail_cache *cache = mail_index_view_get_index(view)->cache;
+ struct mail_index_map *map;
+ const void *data;
+ uint32_t offset;
+
+ mail_index_lookup_ext_full(view, seq, cache->ext_id,
+ &map, &data, NULL);
+ if (data == NULL) {
+ /* no cache offsets */
+ return 0;
+ }
+ offset = *((const uint32_t *)data);
+ if (offset == 0)
+ return 0;
+
+ if (!mail_index_ext_get_reset_id(view, map, cache->ext_id, reset_id_r))
+ i_unreached();
+ return offset;
+}
+
static int
mail_cache_lookup_offset(struct mail_cache *cache, struct mail_index_view *view,
uint32_t seq, uint32_t *offset_r)
{
- struct mail_index_map *map;
- const void *data;
- uint32_t reset_id;
+ uint32_t offset, reset_id;
int i, ret;
- mail_index_lookup_ext_full(view, seq, cache->ext_id,
- &map, &data, NULL);
- if (data == NULL || *((const uint32_t *)data) == 0) {
- /* nothing in cache (for this record) */
+ offset = mail_cache_lookup_cur_offset(view, seq, &reset_id);
+ if (offset == 0)
return 0;
- }
-
- if (!mail_index_ext_get_reset_id(view, map, cache->ext_id, &reset_id))
- i_unreached();
/* reset_id must match file_seq or the offset is for a different cache
file. if this happens, try if reopening the cache helps. if not,
@@ -86,7 +101,7 @@ mail_cache_lookup_offset(struct mail_cac
}
}
- *offset_r = *((const uint32_t *)data);
+ *offset_r = offset;
return 1;
}
diff -r c33c87781ab4 -r 876c7bca351c src/lib-index/mail-cache-private.h
--- a/src/lib-index/mail-cache-private.h Mon Jan 07 12:49:40 2008 +0200
+++ b/src/lib-index/mail-cache-private.h Thu Jan 10 04:23:23 2008 +0200
@@ -230,6 +230,8 @@ int mail_cache_header_fields_get_next_of
int mail_cache_header_fields_get_next_offset(struct mail_cache *cache,
uint32_t *offset_r);
+uint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
+ uint32_t seq, uint32_t *reset_id_r);
int mail_cache_get_record(struct mail_cache *cache, uint32_t offset,
const struct mail_cache_record **rec_r);
uint32_t mail_cache_get_first_new_seq(struct mail_index_view *view);
diff -r c33c87781ab4 -r 876c7bca351c src/lib-index/mail-cache-transaction.c
--- a/src/lib-index/mail-cache-transaction.c Mon Jan 07 12:49:40 2008 +0200
+++ b/src/lib-index/mail-cache-transaction.c Thu Jan 10 04:23:23 2008 +0200
@@ -673,6 +673,7 @@ mail_cache_transaction_switch_seq(struct
struct mail_cache_record *rec, new_rec;
void *data;
size_t size;
+ uint32_t reset_id;
if (ctx->prev_seq != 0) {
/* fix record size */
@@ -680,6 +681,15 @@ mail_cache_transaction_switch_seq(struct
rec = PTR_OFFSET(data, ctx->prev_pos);
rec->size = size - ctx->prev_pos;
i_assert(rec->size > sizeof(*rec));
+
+ /* set prev_offset if possible */
+ rec->prev_offset =
+ mail_cache_lookup_cur_offset(ctx->view->view,
+ ctx->prev_seq, &reset_id);
+ if (reset_id != ctx->cache->hdr->file_seq)
+ rec->prev_offset = 0;
+ else
+ ctx->cache->hdr_copy.continued_record_count++;
array_append(&ctx->cache_data_seq, &ctx->prev_seq, 1);
ctx->prev_pos = size;
@@ -1025,17 +1035,35 @@ int mail_cache_link(struct mail_cache *c
int mail_cache_link(struct mail_cache *cache, uint32_t old_offset,
uint32_t new_offset)
{
+ const struct mail_cache_record *rec;
+
i_assert(cache->locked);
if (MAIL_CACHE_IS_UNUSABLE(cache))
return -1;
- if (new_offset + sizeof(struct mail_cache_record) >
- cache->hdr_copy.used_file_size) {
+ /* this function is called for each added cache record (or cache
+ extension record update actually) with new_offset pointing to the
+ new record and old_offset pointing to the previous record.
+
+ we want to keep the old and new records linked so both old and new
+ cached data is found. normally they are already linked correctly.
+ the problem only comes when multiple processes are adding cache
+ records at the same time. we'd rather not lose those additions, so
+ force the linking order to be new_offset -> old_offset if it isn't
+ already. */
+ if (mail_cache_map(cache, new_offset, sizeof(*rec)) < 0)
+ return -1;
+ if (new_offset + sizeof(*rec) > cache->mmap_length) {
mail_cache_set_corrupted(cache,
"Cache record offset %u points outside file",
new_offset);
return -1;
+ }
+ rec = CACHE_RECORD(cache, new_offset);
+ if (rec->prev_offset == old_offset) {
+ /* link is already correct */
+ return 0;
}
if (mail_cache_link_unlocked(cache, old_offset, new_offset) < 0)
More information about the dovecot-cvs
mailing list