From: David Wang <00107082@163.com> Date: Mon, 9 Jun 2025 06:44:08 +0000 (+0800) Subject: alloc_tag: keep codetag iterator active between read() X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9f44df50fee4d2f6cb374177244ccfa9f0a5cc95;p=thirdparty%2Fkernel%2Flinux.git alloc_tag: keep codetag iterator active between read() When reading /proc/allocinfo, for each read syscall, seq_file would invoke start/stop callbacks. In start callback, a memory is alloced to store iterator and the iterator would start from beginning to walk linearly to current read position. seq_file read() takes at most 4096 bytes, even if read with a larger user space buffer, meaning read out all of /proc/allocinfo, tens of read syscalls are needed. For example, a 306036 bytes allocinfo files need 76 reads: $ sudo cat /proc/allocinfo | wc 3964 16678 306036 $ sudo strace -T -e read cat /proc/allocinfo ... read(3, " 4096 1 arch/x86/k"..., 131072) = 4063 <0.000062> ... read(3, " 0 0 sound/core"..., 131072) = 4021 <0.000150> ... For those n=3964 lines, each read takes about m=3964/76=52 lines, since iterator restart from beginning for each read(), it would move forward m steps on 1st read 2*m steps on 2nd read 3*m steps on 3rd read ... n steps on last read As read() along, those linear seek steps make read() calls slower and slower. Adding those up, codetag iterator moves about O(n*n/m) steps, making data structure traversal take significant part of the whole reading. Profiling when stress reading /proc/allocinfo confirms it: vfs_read(99.959% 1677299/1677995) proc_reg_read_iter(99.856% 1674881/1677299) seq_read_iter(99.959% 1674191/1674881) allocinfo_start(75.664% 1266755/1674191) codetag_next_ct(79.217% 1003487/1266755) <--- srso_return_thunk(1.264% 16011/1266755) __kmalloc_cache_noprof(0.102% 1296/1266755) ... allocinfo_show(21.287% 356378/1674191) allocinfo_next(1.530% 25621/1674191) codetag_next_ct() takes major part. A private data alloced at open() time can be used to carry iterator alive across read() calls, and avoid the memory allocation and iterator reset for each read(). This way, only O(1) memory allocation and O(n) steps iterating, and `time` shows performance improvement from ~7ms to ~4ms. Profiling with the change: vfs_read(99.865% 1581073/1583214) proc_reg_read_iter(99.485% 1572934/1581073) seq_read_iter(99.846% 1570519/1572934) allocinfo_show(87.428% 1373074/1570519) seq_buf_printf(83.695% 1149196/1373074) seq_buf_putc(1.917% 26321/1373074) _find_next_bit(1.531% 21023/1373074) ... codetag_to_text(0.490% 6727/1373074) ... allocinfo_next(6.275% 98543/1570519) ... allocinfo_start(0.369% 5790/1570519) ... Now seq_buf_printf() takes major part. Link: https://lkml.kernel.org/r/20250609064408.112783-1-00107082@163.com Signed-off-by: David Wang <00107082@163.com> Acked-by: Suren Baghdasaryan Cc: Kent Overstreet Cc: Tim Chen Signed-off-by: Andrew Morton --- diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 3a74d63a959e8..36f07dc950699 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -46,21 +46,16 @@ struct allocinfo_private { static void *allocinfo_start(struct seq_file *m, loff_t *pos) { struct allocinfo_private *priv; - struct codetag *ct; loff_t node = *pos; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - m->private = priv; - if (!priv) - return NULL; - - priv->print_header = (node == 0); + priv = (struct allocinfo_private *)m->private; codetag_lock_module_list(alloc_tag_cttype, true); - priv->iter = codetag_get_ct_iter(alloc_tag_cttype); - while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) - node--; - - return ct ? priv : NULL; + if (node == 0) { + priv->print_header = true; + priv->iter = codetag_get_ct_iter(alloc_tag_cttype); + codetag_next_ct(&priv->iter); + } + return priv->iter.ct ? priv : NULL; } static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) @@ -77,12 +72,7 @@ static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) static void allocinfo_stop(struct seq_file *m, void *arg) { - struct allocinfo_private *priv = (struct allocinfo_private *)m->private; - - if (priv) { - codetag_lock_module_list(alloc_tag_cttype, false); - kfree(priv); - } + codetag_lock_module_list(alloc_tag_cttype, false); } static void print_allocinfo_header(struct seq_buf *buf) @@ -817,7 +807,8 @@ static int __init alloc_tag_init(void) return 0; } - if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { + if (!proc_create_seq_private(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op, + sizeof(struct allocinfo_private), NULL)) { pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); shutdown_mem_profiling(false); return -ENOMEM;