]> git.ipfire.org Git - thirdparty/git.git/commitdiff
reftable/block: reuse uncompressed blocks
authorPatrick Steinhardt <ps@pks.im>
Mon, 8 Apr 2024 12:16:54 +0000 (14:16 +0200)
committerJunio C Hamano <gitster@pobox.com>
Mon, 15 Apr 2024 17:36:09 +0000 (10:36 -0700)
The reftable backend stores reflog entries in a compressed format and
thus needs to uncompress blocks before one can read records from it.
For each reflog block we thus have to allocate an array that we can
decompress the block contents into. This block is being discarded
whenever the table iterator moves to the next block. Consequently, we
reallocate a new array on every block, which is quite wasteful.

Refactor the code to reuse the uncompressed block data when moving the
block reader to a new block. This significantly reduces the number of
allocations when iterating through many compressed blocks. The following
measurements are done with `git reflog list` when listing 100k reflogs.
Before:

  HEAP SUMMARY:
      in use at exit: 13,473 bytes in 122 blocks
    total heap usage: 45,755 allocs, 45,633 frees, 254,779,456 bytes allocated

After:

  HEAP SUMMARY:
      in use at exit: 13,473 bytes in 122 blocks
    total heap usage: 23,028 allocs, 22,906 frees, 162,813,547 bytes allocated

Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
reftable/block.c
reftable/block.h
reftable/reader.c

index 0c4e71eae3945dbb00f9c2595b3bb1d3f0229fce..9460273290f28348ff1477108a50d8ed924dd24c 100644 (file)
@@ -186,7 +186,6 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
        uint16_t restart_count = 0;
        uint32_t restart_start = 0;
        uint8_t *restart_bytes = NULL;
-       uint8_t *uncompressed = NULL;
 
        reftable_block_done(&br->block);
 
@@ -202,14 +201,15 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
                uLongf src_len = block->len - block_header_skip;
 
                /* Log blocks specify the *uncompressed* size in their header. */
-               REFTABLE_ALLOC_ARRAY(uncompressed, sz);
+               REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
+                                   br->uncompressed_cap);
 
                /* Copy over the block header verbatim. It's not compressed. */
-               memcpy(uncompressed, block->data, block_header_skip);
+               memcpy(br->uncompressed_data, block->data, block_header_skip);
 
                /* Uncompress */
                if (Z_OK !=
-                   uncompress2(uncompressed + block_header_skip, &dst_len,
+                   uncompress2(br->uncompressed_data + block_header_skip, &dst_len,
                                block->data + block_header_skip, &src_len)) {
                        err = REFTABLE_ZLIB_ERROR;
                        goto done;
@@ -222,10 +222,8 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
 
                /* We're done with the input data. */
                reftable_block_done(block);
-               block->data = uncompressed;
-               uncompressed = NULL;
+               block->data = br->uncompressed_data;
                block->len = sz;
-               block->source = malloc_block_source();
                full_block_size = src_len + block_header_skip;
        } else if (full_block_size == 0) {
                full_block_size = sz;
@@ -254,12 +252,12 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
        br->restart_bytes = restart_bytes;
 
 done:
-       reftable_free(uncompressed);
        return err;
 }
 
 void block_reader_release(struct block_reader *br)
 {
+       reftable_free(br->uncompressed_data);
        reftable_block_done(&br->block);
 }
 
index d733d45ee0b4116e388d01017250bc153f469d14..12414eb64271230581e4ff446ae3cdc6fdf5b72c 100644 (file)
@@ -66,6 +66,10 @@ struct block_reader {
        struct reftable_block block;
        int hash_size;
 
+       /* Uncompressed data for log entries. */
+       unsigned char *uncompressed_data;
+       size_t uncompressed_cap;
+
        /* size of the data, excluding restart data. */
        uint32_t block_len;
        uint8_t *restart_bytes;
index dd4de294a12c9d18f58a9f9a6759b3f895e079ca..aacd5f1337ad53c73b7f6ba66b7be7fa1a4b65fb 100644 (file)
@@ -459,6 +459,8 @@ static int reader_seek_linear(struct table_iter *ti,
                 * we would not do a linear search there anymore.
                 */
                memset(&next.br.block, 0, sizeof(next.br.block));
+               next.br.uncompressed_data = NULL;
+               next.br.uncompressed_cap = 0;
 
                err = table_iter_next_block(&next);
                if (err < 0)
@@ -599,25 +601,28 @@ static int reader_seek_internal(struct reftable_reader *r,
        struct reftable_reader_offsets *offs =
                reader_offsets_for(r, reftable_record_type(rec));
        uint64_t idx = offs->index_offset;
-       struct table_iter ti = TABLE_ITER_INIT;
-       int err = 0;
+       struct table_iter ti = TABLE_ITER_INIT, *p;
+       int err;
+
        if (idx > 0)
                return reader_seek_indexed(r, it, rec);
 
        err = reader_start(r, &ti, reftable_record_type(rec), 0);
        if (err < 0)
-               return err;
+               goto out;
+
        err = reader_seek_linear(&ti, rec);
        if (err < 0)
-               return err;
-       else {
-               struct table_iter *p =
-                       reftable_malloc(sizeof(struct table_iter));
-               *p = ti;
-               iterator_from_table_iter(it, p);
-       }
+               goto out;
 
-       return 0;
+       REFTABLE_ALLOC_ARRAY(p, 1);
+       *p = ti;
+       iterator_from_table_iter(it, p);
+
+out:
+       if (err)
+               table_iter_close(&ti);
+       return err;
 }
 
 static int reader_seek(struct reftable_reader *r, struct reftable_iterator *it,