]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: htab: extract helper for freeing special structs
authorMykyta Yatsenko <yatsenko@meta.com>
Tue, 23 Sep 2025 11:23:58 +0000 (12:23 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 23 Sep 2025 14:34:38 +0000 (07:34 -0700)
Extract the cleanup of known embedded structs into the dedicated helper.
Remove duplication and introduce a single source of truth for freeing
special embedded structs in hashtab.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250923112404.668720-4-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/hashtab.c

index 71f9931ac64cd4f81901d6d585e3c92fb518f18e..2319f8f8fa3e982ed66018e25b032f9d51ec08c1 100644 (file)
@@ -215,6 +215,16 @@ static bool htab_has_extra_elems(struct bpf_htab *htab)
        return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab);
 }
 
+static void htab_free_internal_structs(struct bpf_htab *htab, struct htab_elem *elem)
+{
+       if (btf_record_has_field(htab->map.record, BPF_TIMER))
+               bpf_obj_free_timer(htab->map.record,
+                                  htab_elem_value(elem, htab->map.key_size));
+       if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
+               bpf_obj_free_workqueue(htab->map.record,
+                                      htab_elem_value(elem, htab->map.key_size));
+}
+
 static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
 {
        u32 num_entries = htab->map.max_entries;
@@ -227,12 +237,7 @@ static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
                struct htab_elem *elem;
 
                elem = get_htab_elem(htab, i);
-               if (btf_record_has_field(htab->map.record, BPF_TIMER))
-                       bpf_obj_free_timer(htab->map.record,
-                                          htab_elem_value(elem, htab->map.key_size));
-               if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
-                       bpf_obj_free_workqueue(htab->map.record,
-                                              htab_elem_value(elem, htab->map.key_size));
+               htab_free_internal_structs(htab, elem);
                cond_resched();
        }
 }
@@ -1502,12 +1507,7 @@ static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
 
                hlist_nulls_for_each_entry(l, n, head, hash_node) {
                        /* We only free timer on uref dropping to zero */
-                       if (btf_record_has_field(htab->map.record, BPF_TIMER))
-                               bpf_obj_free_timer(htab->map.record,
-                                                  htab_elem_value(l, htab->map.key_size));
-                       if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
-                               bpf_obj_free_workqueue(htab->map.record,
-                                                      htab_elem_value(l, htab->map.key_size));
+                       htab_free_internal_structs(htab, l);
                }
                cond_resched_rcu();
        }