return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab);
}
+static void htab_free_internal_structs(struct bpf_htab *htab, struct htab_elem *elem)
+{
+ if (btf_record_has_field(htab->map.record, BPF_TIMER))
+ bpf_obj_free_timer(htab->map.record,
+ htab_elem_value(elem, htab->map.key_size));
+ if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
+ bpf_obj_free_workqueue(htab->map.record,
+ htab_elem_value(elem, htab->map.key_size));
+}
+
static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
{
u32 num_entries = htab->map.max_entries;
struct htab_elem *elem;
elem = get_htab_elem(htab, i);
- if (btf_record_has_field(htab->map.record, BPF_TIMER))
- bpf_obj_free_timer(htab->map.record,
- htab_elem_value(elem, htab->map.key_size));
- if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
- bpf_obj_free_workqueue(htab->map.record,
- htab_elem_value(elem, htab->map.key_size));
+ htab_free_internal_structs(htab, elem);
cond_resched();
}
}
hlist_nulls_for_each_entry(l, n, head, hash_node) {
/* We only free timer on uref dropping to zero */
- if (btf_record_has_field(htab->map.record, BPF_TIMER))
- bpf_obj_free_timer(htab->map.record,
- htab_elem_value(l, htab->map.key_size));
- if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
- bpf_obj_free_workqueue(htab->map.record,
- htab_elem_value(l, htab->map.key_size));
+ htab_free_internal_structs(htab, l);
}
cond_resched_rcu();
}