From: Vsevolod Stakhov Date: Tue, 12 May 2026 15:57:40 +0000 (+0100) Subject: [Feature] memstat: per-callsite mempool counters and structured jemalloc X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d32a6dcba041977e015d3abc8ed7d7a2697a57b6;p=thirdparty%2Frspamd.git [Feature] memstat: per-callsite mempool counters and structured jemalloc Track lifetime pools/chunks/bytes counters per mempool callsite and expose them via rspamd_mempool_entry_stat_t. memory_stat now emits per-arena jemalloc stats instead of the raw malloc_stats_print dump. The rspamadm control memstat renderer gains --compact and --only modes, sortable callsite columns (cur/total bytes and pools), and prints just the callsite filename. --- diff --git a/lualib/rspamadm/memstat.lua b/lualib/rspamadm/memstat.lua index f7af789c97..270d931e06 100644 --- a/lualib/rspamadm/memstat.lua +++ b/lualib/rspamadm/memstat.lua @@ -17,6 +17,15 @@ limitations under the License. local rspamd_util = require "rspamd_util" local argparse = require "argparse" +local KNOWN_SUBSYSTEMS = { + summary = true, + process = true, + mempool = true, + callsites = true, + lua = true, + jemalloc = true, +} + local parser = argparse() :name "rspamadm control memstat" :description "Show memory usage statistics across all workers" @@ -25,6 +34,10 @@ parser:flag "-n --number" :description "Disable numbers humanization" parser:flag "-s --short" :description "Short output: only the per-worker summary table" +parser:flag "-c --compact" + :description "Compact output: one line per worker per section" +parser:option "--only" + :description "Comma-separated subsystems to show: summary,process,mempool,callsites,lua,jemalloc" parser:option "--top" :description "Show top-N mempool callsites per worker (default 20)" :convert(tonumber) @@ -39,6 +52,16 @@ parser:flag "--no-lua" :description "Skip Lua heap section" parser:flag "--no-jemalloc" :description "Skip jemalloc section" +parser:option "--callsite-sort" + :description "Sort callsites by: suggestion, cur_bytes, total_bytes, cur_pools, total_pools (default cur_bytes)" + :convert { + suggestion = "suggestion", + cur_bytes = "cur_bytes", + total_bytes = "total_bytes", + cur_pools = "cur_pools", + total_pools = "total_pools", + } + :default("cur_bytes") parser:option "--sort" :description "Sort summary table by: rss, lua, mempool, jemalloc, pid (default pid)" :convert { @@ -97,6 +120,46 @@ local function summary_sorter(workers, mode) end end +local function build_subsystems_filter(opts) + -- Returns a table keyed by subsystem name with boolean values telling + -- whether to show that subsystem. --only takes precedence; otherwise + -- everything is on except sections turned off via --no-*. --short collapses + -- to summary only. + local enabled = { + summary = true, process = true, mempool = true, + callsites = true, lua = true, jemalloc = true, + } + if opts.only and #opts.only > 0 then + for k in pairs(enabled) do + enabled[k] = false + end + for token in string.gmatch(opts.only, "[^,%s]+") do + local name = token:lower() + if KNOWN_SUBSYSTEMS[name] then + enabled[name] = true + else + io.stderr:write(string.format( + "warning: unknown subsystem '%s' in --only (ignored)\n", token)) + end + end + -- summary is always implied unless explicitly excluded via --no-summary, + -- but keep --only authoritative for that too. + return enabled + end + if opts.short then + for k in pairs(enabled) do + enabled[k] = (k == "summary") + end + return enabled + end + if opts.no_process then enabled.process = false end + if opts.no_mempool then enabled.mempool = false end + if opts.no_callsites then enabled.callsites = false end + if opts.no_lua then enabled.lua = false end + if opts.no_jemalloc then enabled.jemalloc = false end + return enabled +end + local function print_summary(workers, total, opts) print("Memory usage by worker:") print("") @@ -127,6 +190,23 @@ local function print_summary(workers, total, opts) print("") end +-- Process memory keys we care about, in render order +local PROC_KEYS = { + "vm_size", "vm_rss", "rss_anon", "rss_file", "rss_shmem", + "vm_data", "vm_stack", "vm_text", "vm_lib", "vm_pte", +} + +local function format_kv_line(t, keys, opts) + local parts = {} + for _, k in ipairs(keys) do + local v = t[k] + if v and v > 0 then + table.insert(parts, string.format("%s=%s", k, bytes(v, opts.number))) + end + end + return table.concat(parts, " ") +end + local function print_process(workers, opts) local any = false for _, pid in ipairs(sorted_keys(workers, pid_sort)) do @@ -137,26 +217,13 @@ local function print_process(workers, opts) print("Process memory:") any = true end - print(string.format(" %s (%s):", pid, w.type or "?")) - local fields = { - { "vm_size", proc.vm_size }, - { "vm_rss", proc.vm_rss }, - { "rss_anon", proc.rss_anon }, - { "rss_file", proc.rss_file }, - { "rss_shmem", proc.rss_shmem }, - { "vm_data", proc.vm_data }, - { "vm_stack", proc.vm_stack }, - { "vm_text", proc.vm_text }, - { "vm_lib", proc.vm_lib }, - { "vm_pte", proc.vm_pte }, - } - local parts = {} - for _, kv in ipairs(fields) do - if kv[2] and kv[2] > 0 then - table.insert(parts, string.format("%s=%s", kv[1], bytes(kv[2], opts.number))) - end + if opts.compact then + print(string.format(" %-7s %-13s %s", + pid, w.type or "?", format_kv_line(proc, PROC_KEYS, opts))) + else + print(string.format(" %s (%s):", pid, w.type or "?")) + print(" " .. format_kv_line(proc, PROC_KEYS, opts):gsub(" ", " ")) end - print(" " .. table.concat(parts, " ")) end end if any then @@ -175,15 +242,20 @@ local function print_mempool_aggregate(workers, opts) any = true end local a = mp.aggregate - print(string.format(" %s (%s):", pid, w.type or "?")) - print(string.format( - " pools=%d/%d bytes=%s chunks=%d/%d shared=%d oversized=%d fragmented=%s", + local line = string.format( + "pools=%d/%d bytes=%s chunks=%d/%d shared=%d oversized=%d frag=%s", a.pools_allocated or 0, a.pools_freed or 0, bytes(a.bytes_allocated or 0, opts.number), a.chunks_allocated or 0, a.chunks_freed or 0, a.shared_chunks_allocated or 0, a.oversized_chunks or 0, - bytes(a.fragmented_size or 0, opts.number))) + bytes(a.fragmented_size or 0, opts.number)) + if opts.compact then + print(string.format(" %-7s %-13s %s", pid, w.type or "?", line)) + else + print(string.format(" %s (%s):", pid, w.type or "?")) + print(" " .. line) + end end end if any then @@ -191,33 +263,82 @@ local function print_mempool_aggregate(workers, opts) end end +local function callsite_basename(src) + if not src then return "?" end + -- Strip the directory portion: "src/libserver/foo.c:123" -> "foo.c:123". + -- Filenames in callsite locations never contain '/' so the last segment + -- is always file:line. + local tail = string.match(src, "([^/]+)$") + return tail or src +end + +local function callsite_key(e, mode) + if mode == "suggestion" then + return e.cur_suggestion or 0 + elseif mode == "total_bytes" then + return e.bytes_allocated_total or 0 + elseif mode == "cur_pools" then + return (e.pools_allocated or 0) - (e.pools_freed or 0) + elseif mode == "total_pools" then + return e.pools_allocated or 0 + end + -- default cur_bytes + return e.bytes_currently_used or 0 +end + local function print_callsites(workers, opts) local limit = opts.top or 20 + local sort_mode = opts.callsite_sort or "cur_bytes" local any = false for _, pid in ipairs(sorted_keys(workers, pid_sort)) do local w = workers[pid] local entries = w.data and w.data.mempool and w.data.mempool.entries if entries and #entries > 0 then if not any then - print(string.format("Top %d mempool callsites by suggestion:", limit)) + print(string.format("Top %d mempool callsites by %s:", limit, sort_mode)) any = true end table.sort(entries, function(a, b) - return (a.cur_suggestion or 0) > (b.cur_suggestion or 0) + return callsite_key(a, sort_mode) > callsite_key(b, sort_mode) end) print(string.format(" %s (%s):", pid, w.type or "?")) - for i = 1, math.min(limit, #entries) do - local e = entries[i] + if opts.compact then + print(string.format(" %-32s %10s %10s %8s %8s %10s", + "callsite", "cur_bytes", "tot_bytes", "cur_p", "tot_p", "suggest")) + for i = 1, math.min(limit, #entries) do + local e = entries[i] + local cur_pools = (e.pools_allocated or 0) - (e.pools_freed or 0) + print(string.format(" %-32s %10s %10s %8d %8d %10s", + callsite_basename(e.src), + bytes(e.bytes_currently_used or 0, opts.number), + bytes(e.bytes_allocated_total or 0, opts.number), + cur_pools, + e.pools_allocated or 0, + bytes(e.cur_suggestion or 0, opts.number))) + end + else print(string.format( - " [%-9s] %-50s elts=%-4d vars=%-4d dtors=%-4d avg_frag=%-9s avg_left=%-9s n=%d", - bytes(e.cur_suggestion or 0, opts.number), - e.src or "?", - e.cur_elts or 0, - e.cur_vars or 0, - e.cur_dtors or 0, - bytes(e.avg_fragmentation or 0, opts.number), - bytes(e.avg_leftover or 0, opts.number), - e.samples or 0)) + " %-32s %10s %10s %8s %8s %10s %5s %5s %5s %10s %10s %5s", + "callsite", "cur_bytes", "tot_bytes", "cur_p", "tot_p", "suggest", + "elts", "vars", "dtors", "avg_frag", "avg_left", "n")) + for i = 1, math.min(limit, #entries) do + local e = entries[i] + local cur_pools = (e.pools_allocated or 0) - (e.pools_freed or 0) + print(string.format( + " %-32s %10s %10s %8d %8d %10s %5d %5d %5d %10s %10s %5d", + callsite_basename(e.src), + bytes(e.bytes_currently_used or 0, opts.number), + bytes(e.bytes_allocated_total or 0, opts.number), + cur_pools, + e.pools_allocated or 0, + bytes(e.cur_suggestion or 0, opts.number), + e.cur_elts or 0, + e.cur_vars or 0, + e.cur_dtors or 0, + bytes(e.avg_fragmentation or 0, opts.number), + bytes(e.avg_leftover or 0, opts.number), + e.samples or 0)) + end end end end @@ -236,7 +357,7 @@ local function print_lua(workers, opts) print("Lua heap:") any = true end - print(string.format(" %s (%s): %s", + print(string.format(" %-7s %-13s %s", pid, w.type or "?", bytes(lua.used_bytes or 0, opts.number))) end @@ -246,6 +367,10 @@ local function print_lua(workers, opts) end end +local JEMALLOC_STATS_KEYS = { + "allocated", "active", "metadata", "resident", "mapped", "retained", +} + local function print_jemalloc(workers, opts) local any = false for _, pid in ipairs(sorted_keys(workers, pid_sort)) do @@ -256,34 +381,66 @@ local function print_jemalloc(workers, opts) print("Jemalloc:") any = true end - print(string.format(" %s (%s):", pid, w.type or "?")) - if j.stats then - local parts = {} - for _, k in ipairs({ "allocated", "active", "metadata", "resident", "mapped", "retained" }) do - if j.stats[k] then - table.insert(parts, string.format("%s=%s", k, bytes(j.stats[k], opts.number))) + + if opts.compact then + local s = j.stats or {} + local arenas_count = j.arenas and #j.arenas or 0 + print(string.format(" %-7s %-13s alloc=%s active=%s mapped=%s resident=%s retained=%s arenas=%d", + pid, w.type or "?", + bytes(s.allocated or 0, opts.number), + bytes(s.active or 0, opts.number), + bytes(s.mapped or 0, opts.number), + bytes(s.resident or 0, opts.number), + bytes(s.retained or 0, opts.number), + arenas_count)) + else + print(string.format(" %s (%s):", pid, w.type or "?")) + + if j.config then + local cfg_parts = {} + if j.config.version then + table.insert(cfg_parts, string.format("version=%s", tostring(j.config.version))) + end + if j.config.narenas then + table.insert(cfg_parts, string.format("narenas=%d", j.config.narenas)) + end + if j.config.page_size then + table.insert(cfg_parts, string.format("page=%s", + bytes(j.config.page_size, opts.number))) + end + if j.config.dirty_decay_ms ~= nil then + table.insert(cfg_parts, string.format("dirty_decay=%dms", j.config.dirty_decay_ms)) + end + if j.config.muzzy_decay_ms ~= nil then + table.insert(cfg_parts, string.format("muzzy_decay=%dms", j.config.muzzy_decay_ms)) + end + if #cfg_parts > 0 then + print(" config: " .. table.concat(cfg_parts, " ")) end end - if #parts > 0 then - print(" " .. table.concat(parts, " ")) - end - end - if j.config then - local parts = {} - for k, v in pairs(j.config) do - table.insert(parts, string.format("%s=%s", k, tostring(v))) - end - table.sort(parts) - if #parts > 0 then - print(" config: " .. table.concat(parts, " ")) + + if j.stats then + print(" totals: " .. format_kv_line(j.stats, JEMALLOC_STATS_KEYS, opts)) end - end - if j.text and #j.text > 0 then - print(" --- malloc_stats_print ---") - for line in tostring(j.text):gmatch("[^\r\n]+") do - print(" " .. line) + + if j.arenas and #j.arenas > 0 then + print(string.format(" %4s %10s %10s %10s %10s %10s %10s %10s %10s %5s", + "id", "alloc", "small", "large", "mapped", "retained", + "resident", "dirty", "muzzy", "thr")) + for _, a in ipairs(j.arenas) do + print(string.format(" %4d %10s %10s %10s %10s %10s %10s %10s %10s %5d", + a.id or 0, + bytes(a.allocated or 0, opts.number), + bytes(a.small_allocated or 0, opts.number), + bytes(a.large_allocated or 0, opts.number), + bytes(a.mapped or 0, opts.number), + bytes(a.retained or 0, opts.number), + bytes(a.resident or 0, opts.number), + bytes(a.dirty or 0, opts.number), + bytes(a.muzzy or 0, opts.number), + a.nthreads or 0)) + end end - print(" --- end ---") end end end @@ -296,26 +453,24 @@ return function(args, res) local opts = parser:parse(args or {}) local workers = res and res.workers or {} local total = res and res.total + local enabled = build_subsystems_filter(opts) - print_summary(workers, total, opts) - - if opts.short then - return + if enabled.summary then + print_summary(workers, total, opts) end - - if not opts.no_process then + if enabled.process then print_process(workers, opts) end - if not opts.no_mempool then + if enabled.mempool then print_mempool_aggregate(workers, opts) end - if not opts.no_callsites then + if enabled.callsites then print_callsites(workers, opts) end - if not opts.no_lua then + if enabled.lua then print_lua(workers, opts) end - if not opts.no_jemalloc then + if enabled.jemalloc then print_jemalloc(workers, opts) end end diff --git a/src/libserver/memory_stat.cxx b/src/libserver/memory_stat.cxx index 3da54307ab..3fc25c8d34 100644 --- a/src/libserver/memory_stat.cxx +++ b/src/libserver/memory_stat.cxx @@ -143,6 +143,16 @@ emit_mempool_info(ucl_object_t *parent) "avg_leftover", 0, false); ucl_object_insert_key(e, ucl_object_fromint(st->samples), "samples", 0, false); + ucl_object_insert_key(e, ucl_object_fromint(st->pools_allocated), + "pools_allocated", 0, false); + ucl_object_insert_key(e, ucl_object_fromint(st->pools_freed), + "pools_freed", 0, false); + ucl_object_insert_key(e, ucl_object_fromint(st->chunks_allocated), + "chunks_allocated", 0, false); + ucl_object_insert_key(e, ucl_object_fromint(st->bytes_allocated_total), + "bytes_allocated_total", 0, false); + ucl_object_insert_key(e, ucl_object_fromint(st->bytes_currently_used), + "bytes_currently_used", 0, false); ucl_array_append(c->array, e); }, @@ -173,10 +183,50 @@ emit_lua_info(ucl_object_t *parent, struct rspamd_config *cfg) } #ifdef WITH_JEMALLOC -void jemalloc_text_cb(void *ud, const char *msg) +void emit_jemalloc_arena(ucl_object_t *arr, unsigned int idx) { - auto *out = static_cast(ud); - rspamd_printf_fstring(out, "%s", msg); + char path[128]; + + auto get_size = [&](const char *suffix) -> size_t { + size_t v = 0; + rspamd_snprintf(path, sizeof(path), "stats.arenas.%ud.%s", idx, suffix); + size_t sz = sizeof(v); + if (mallctl(path, &v, &sz, nullptr, 0) != 0) { + return 0; + } + return v; + }; + + size_t allocated = get_size("small.allocated") + get_size("large.allocated"); + size_t mapped = get_size("mapped"); + + /* Only report arenas that actually hold something */ + if (allocated == 0 && mapped == 0) { + return; + } + + auto *a = ucl_object_typed_new(UCL_OBJECT); + ucl_object_insert_key(a, ucl_object_fromint(idx), "id", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(allocated), "allocated", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("small.allocated")), + "small_allocated", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("large.allocated")), + "large_allocated", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(mapped), "mapped", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("retained")), + "retained", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("resident")), + "resident", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("pdirty") * (size_t) sysconf(_SC_PAGESIZE)), + "dirty", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("pmuzzy") * (size_t) sysconf(_SC_PAGESIZE)), + "muzzy", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("metadata.allocated")), + "metadata", 0, false); + ucl_object_insert_key(a, ucl_object_fromint(get_size("nthreads")), + "nthreads", 0, false); + + ucl_array_append(arr, a); } #endif @@ -217,16 +267,19 @@ emit_jemalloc_info(ucl_object_t *parent) ucl_object_insert_key(obj, stats, "stats", 0, false); auto *config = ucl_object_typed_new(UCL_OBJECT); - + unsigned int narenas = 0; { - unsigned int narenas = 0; size_t sz = sizeof(narenas); if (mallctl("opt.narenas", &narenas, &sz, nullptr, 0) == 0) { ucl_object_insert_key(config, ucl_object_fromint(narenas), "narenas", 0, false); } } - + { + size_t page_sz = (size_t) sysconf(_SC_PAGESIZE); + ucl_object_insert_key(config, ucl_object_fromint(page_sz), "page_size", + 0, false); + } { ssize_t v = 0; size_t sz = sizeof(v); @@ -239,18 +292,29 @@ emit_jemalloc_info(ucl_object_t *parent) 0, false); } } + { + const char *cfg_str = nullptr; + size_t sz = sizeof(cfg_str); + if (mallctl("version", &cfg_str, &sz, nullptr, 0) == 0 && cfg_str) { + ucl_object_insert_key(config, ucl_object_fromstring(cfg_str), "version", + 0, false); + } + } ucl_object_insert_key(obj, config, "config", 0, false); - /* Capture the human-readable summary as well */ - rspamd_fstring_t *text = rspamd_fstring_sized_new(4096); - malloc_stats_print(jemalloc_text_cb, &text, "Jmdablxe"); - if (text->len > 0) { - ucl_object_insert_key(obj, - ucl_object_fromlstring(text->str, text->len), - "text", 0, false); + /* + * Per-arena breakdown. We probe by index up to opt.narenas; arenas + * that have never been populated are skipped. + */ + auto *arenas = ucl_object_typed_new(UCL_ARRAY); + if (narenas == 0) { + narenas = 32; /* sane upper bound when opt.narenas is unset */ + } + for (unsigned int i = 0; i < narenas; i++) { + emit_jemalloc_arena(arenas, i); } - rspamd_fstring_free(text); + ucl_object_insert_key(obj, arenas, "arenas", 0, false); ucl_object_insert_key(parent, obj, "jemalloc", 0, false); #else diff --git a/src/libutil/mem_pool.c b/src/libutil/mem_pool.c index ab7e3b52bb..ca8a6befe9 100644 --- a/src/libutil/mem_pool.c +++ b/src/libutil/mem_pool.c @@ -470,6 +470,14 @@ rspamd_mempool_new_(gsize size, const char *tag, int flags, const char *loc) g_atomic_int_add(&mem_pool_stat->chunks_allocated, 1); g_atomic_int_add(&mem_pool_stat_local.chunks_allocated, 1); + /* Per-callsite live counters */ + if (entry) { + entry->pools_allocated++; + entry->chunks_allocated++; + entry->bytes_allocated_total += size; + entry->bytes_currently_used += size; + } + return new_pool; } @@ -566,6 +574,13 @@ memory_pool_alloc_common(rspamd_mempool_t *pool, gsize size, gsize alignment, pool_type); } + /* Per-callsite chunk accounting */ + if (pool->priv->entry && new) { + pool->priv->entry->chunks_allocated++; + pool->priv->entry->bytes_allocated_total += new->slice_size; + pool->priv->entry->bytes_currently_used += new->slice_size; + } + /* Connect to pool subsystem */ rspamd_mempool_append_chain(pool, new, pool_type); /* No need to align again, aligned by rspamd_mempool_chain_new */ @@ -998,6 +1013,8 @@ void rspamd_mempool_delete(rspamd_mempool_t *pool) g_ptr_array_free(pool->priv->trash_stack, TRUE); } + uint64_t freed_bytes = 0; + for (i = 0; i < G_N_ELEMENTS(pool->priv->pools); i++) { if (pool->priv->pools[i]) { LL_FOREACH_SAFE(pool->priv->pools[i], cur, tmp) @@ -1009,6 +1026,8 @@ void rspamd_mempool_delete(rspamd_mempool_t *pool) g_atomic_int_add(&mem_pool_stat->chunks_allocated, -1); g_atomic_int_add(&mem_pool_stat_local.chunks_allocated, -1); + freed_bytes += cur->slice_size; + len = cur->slice_size + sizeof(struct _pool_chain); if (i == RSPAMD_MEMPOOL_SHARED) { @@ -1024,6 +1043,16 @@ void rspamd_mempool_delete(rspamd_mempool_t *pool) } } + if (pool->priv->entry && mempool_entries) { + pool->priv->entry->pools_freed++; + if (pool->priv->entry->bytes_currently_used >= freed_bytes) { + pool->priv->entry->bytes_currently_used -= freed_bytes; + } + else { + pool->priv->entry->bytes_currently_used = 0; + } + } + g_atomic_int_inc(&mem_pool_stat->pools_freed); g_atomic_int_inc(&mem_pool_stat_local.pools_freed); POOL_MTX_UNLOCK(); @@ -1102,6 +1131,11 @@ void rspamd_mempool_entries_foreach(rspamd_mempool_entry_cb cb, void *ud) st.samples = valid; st.avg_fragmentation = valid ? (uint32_t) (sum_frag / valid) : 0; st.avg_leftover = valid ? (uint32_t) (sum_left / valid) : 0; + st.pools_allocated = elt->pools_allocated; + st.pools_freed = elt->pools_freed; + st.chunks_allocated = elt->chunks_allocated; + st.bytes_allocated_total = elt->bytes_allocated_total; + st.bytes_currently_used = elt->bytes_currently_used; cb(&st, ud); } diff --git a/src/libutil/mem_pool.h b/src/libutil/mem_pool.h index 3dbe89152e..c13999b7eb 100644 --- a/src/libutil/mem_pool.h +++ b/src/libutil/mem_pool.h @@ -397,6 +397,11 @@ typedef struct rspamd_mempool_entry_stat_s { uint32_t avg_fragmentation; /**< average fragmentation across history */ uint32_t avg_leftover; /**< average leftover across history */ uint32_t samples; /**< number of valid samples used for averages */ + uint64_t pools_allocated; /**< lifetime: pools allocated at this callsite */ + uint64_t pools_freed; /**< lifetime: pools freed at this callsite */ + uint64_t chunks_allocated; /**< lifetime: chunks allocated at this callsite */ + uint64_t bytes_allocated_total; /**< lifetime: bytes allocated for chains at this callsite */ + uint64_t bytes_currently_used; /**< current bytes held by live pool chains at this callsite */ } rspamd_mempool_entry_stat_t; typedef void (*rspamd_mempool_entry_cb)(const rspamd_mempool_entry_stat_t *stat, diff --git a/src/libutil/mem_pool_internal.h b/src/libutil/mem_pool_internal.h index 7d6c9889de..e63da07230 100644 --- a/src/libutil/mem_pool_internal.h +++ b/src/libutil/mem_pool_internal.h @@ -47,6 +47,12 @@ struct rspamd_mempool_entry_point { uint32_t cur_vars; uint32_t cur_dtors; /**< suggested number of destructors to preallocate */ struct entry_elt elts[ENTRY_NELTS]; + /* Live counters for per-callsite reporting */ + uint64_t pools_allocated; /**< lifetime: pools created at this callsite */ + uint64_t pools_freed; /**< lifetime: pools deleted at this callsite */ + uint64_t chunks_allocated; /**< lifetime: chunks (initial + extra) allocated */ + uint64_t bytes_allocated_total; /**< lifetime: bytes allocated for chains */ + uint64_t bytes_currently_used; /**< bytes currently held by live chains */ }; /**