1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
4 #include <linux/llist.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
11 /* Any context (including NMI) BPF specific memory allocator.
13 * Tracing BPF programs can attach to kprobe and fentry. Hence they
14 * run in unknown context where calling plain kmalloc() might not be safe.
16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17 * Refill this cache asynchronously from irq_work.
20 * 16 32 64 96 128 196 256 512 1024 2048 4096
23 * 16 32 64 96 128 196 256 512 1024 2048 4096
25 * The buckets are prefilled at the start.
26 * BPF programs always run with migration disabled.
27 * It's safe to allocate from cache of the current cpu with irqs disabled.
28 * Free-ing is always done into bucket of the current cpu as well.
29 * irq_work trims extra free elements from buckets with kfree
30 * and refills them with kmalloc, so global kmalloc logic takes care
31 * of freeing objects allocated by one cpu and freed on another.
33 * Every allocated objected is padded with extra 8 bytes that contains
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
38 /* similar to kmalloc, but sizeof == 8 bucket is gone */
39 static u8 size_index
[24] __ro_after_init
= {
66 static int bpf_mem_cache_idx(size_t size
)
68 if (!size
|| size
> 4096)
72 return size_index
[(size
- 1) / 8] - 1;
74 return fls(size
- 1) - 2;
79 struct bpf_mem_cache
{
80 /* per-cpu list of free objects of size 'unit_size'.
81 * All accesses are done with interrupts disabled and 'active' counter
82 * protection with __llist_add() and __llist_del_first().
84 struct llist_head free_llist
;
87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
89 * fail. When 'active' is busy the unit_free() will add an object to
92 struct llist_head free_llist_extra
;
94 struct irq_work refill_work
;
95 struct obj_cgroup
*objcg
;
97 /* count of objects in free_llist */
99 int low_watermark
, high_watermark
, batch
;
102 struct bpf_mem_cache
*tgt
;
104 /* list of objects to be freed after RCU GP */
105 struct llist_head free_by_rcu
;
106 struct llist_node
*free_by_rcu_tail
;
107 struct llist_head waiting_for_gp
;
108 struct llist_node
*waiting_for_gp_tail
;
110 atomic_t call_rcu_in_progress
;
111 struct llist_head free_llist_extra_rcu
;
113 /* list of objects to be freed after RCU tasks trace GP */
114 struct llist_head free_by_rcu_ttrace
;
115 struct llist_head waiting_for_gp_ttrace
;
116 struct rcu_head rcu_ttrace
;
117 atomic_t call_rcu_ttrace_in_progress
;
120 struct bpf_mem_caches
{
121 struct bpf_mem_cache cache
[NUM_CACHES
];
124 static struct llist_node notrace
*__llist_del_first(struct llist_head
*head
)
126 struct llist_node
*entry
, *next
;
136 static void *__alloc(struct bpf_mem_cache
*c
, int node
, gfp_t flags
)
138 if (c
->percpu_size
) {
139 void **obj
= kmalloc_node(c
->percpu_size
, flags
, node
);
140 void *pptr
= __alloc_percpu_gfp(c
->unit_size
, 8, flags
);
151 return kmalloc_node(c
->unit_size
, flags
| __GFP_ZERO
, node
);
154 static struct mem_cgroup
*get_memcg(const struct bpf_mem_cache
*c
)
156 #ifdef CONFIG_MEMCG_KMEM
158 return get_mem_cgroup_from_objcg(c
->objcg
);
162 return root_mem_cgroup
;
168 static void inc_active(struct bpf_mem_cache
*c
, unsigned long *flags
)
170 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
171 /* In RT irq_work runs in per-cpu kthread, so disable
172 * interrupts to avoid preemption and interrupts and
173 * reduce the chance of bpf prog executing on this cpu
174 * when active counter is busy.
176 local_irq_save(*flags
);
177 /* alloc_bulk runs from irq_work which will not preempt a bpf
178 * program that does unit_alloc/unit_free since IRQs are
179 * disabled there. There is no race to increment 'active'
180 * counter. It protects free_llist from corruption in case NMI
181 * bpf prog preempted this loop.
183 WARN_ON_ONCE(local_inc_return(&c
->active
) != 1);
186 static void dec_active(struct bpf_mem_cache
*c
, unsigned long *flags
)
188 local_dec(&c
->active
);
189 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
190 local_irq_restore(*flags
);
193 static void add_obj_to_free_list(struct bpf_mem_cache
*c
, void *obj
)
197 inc_active(c
, &flags
);
198 __llist_add(obj
, &c
->free_llist
);
200 dec_active(c
, &flags
);
203 /* Mostly runs from irq_work except __init phase. */
204 static void alloc_bulk(struct bpf_mem_cache
*c
, int cnt
, int node
, bool atomic
)
206 struct mem_cgroup
*memcg
= NULL
, *old_memcg
;
211 gfp
= __GFP_NOWARN
| __GFP_ACCOUNT
;
212 gfp
|= atomic
? GFP_NOWAIT
: GFP_KERNEL
;
214 for (i
= 0; i
< cnt
; i
++) {
216 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
217 * done only by one CPU == current CPU. Other CPUs might
218 * llist_add() and llist_del_all() in parallel.
220 obj
= llist_del_first(&c
->free_by_rcu_ttrace
);
223 add_obj_to_free_list(c
, obj
);
228 for (; i
< cnt
; i
++) {
229 obj
= llist_del_first(&c
->waiting_for_gp_ttrace
);
232 add_obj_to_free_list(c
, obj
);
237 memcg
= get_memcg(c
);
238 old_memcg
= set_active_memcg(memcg
);
239 for (; i
< cnt
; i
++) {
240 /* Allocate, but don't deplete atomic reserves that typical
241 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
242 * will allocate from the current numa node which is what we
245 obj
= __alloc(c
, node
, gfp
);
248 add_obj_to_free_list(c
, obj
);
250 set_active_memcg(old_memcg
);
251 mem_cgroup_put(memcg
);
254 static void free_one(void *obj
, bool percpu
)
257 free_percpu(((void **)obj
)[1]);
265 static int free_all(struct llist_node
*llnode
, bool percpu
)
267 struct llist_node
*pos
, *t
;
270 llist_for_each_safe(pos
, t
, llnode
) {
271 free_one(pos
, percpu
);
277 static void __free_rcu(struct rcu_head
*head
)
279 struct bpf_mem_cache
*c
= container_of(head
, struct bpf_mem_cache
, rcu_ttrace
);
281 free_all(llist_del_all(&c
->waiting_for_gp_ttrace
), !!c
->percpu_size
);
282 atomic_set(&c
->call_rcu_ttrace_in_progress
, 0);
285 static void __free_rcu_tasks_trace(struct rcu_head
*head
)
287 /* If RCU Tasks Trace grace period implies RCU grace period,
288 * there is no need to invoke call_rcu().
290 if (rcu_trace_implies_rcu_gp())
293 call_rcu(head
, __free_rcu
);
296 static void enque_to_free(struct bpf_mem_cache
*c
, void *obj
)
298 struct llist_node
*llnode
= obj
;
300 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
301 * Nothing races to add to free_by_rcu_ttrace list.
303 llist_add(llnode
, &c
->free_by_rcu_ttrace
);
306 static void do_call_rcu_ttrace(struct bpf_mem_cache
*c
)
308 struct llist_node
*llnode
, *t
;
310 if (atomic_xchg(&c
->call_rcu_ttrace_in_progress
, 1)) {
311 if (unlikely(READ_ONCE(c
->draining
))) {
312 llnode
= llist_del_all(&c
->free_by_rcu_ttrace
);
313 free_all(llnode
, !!c
->percpu_size
);
318 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp_ttrace
));
319 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_by_rcu_ttrace
))
320 llist_add(llnode
, &c
->waiting_for_gp_ttrace
);
322 if (unlikely(READ_ONCE(c
->draining
))) {
323 __free_rcu(&c
->rcu_ttrace
);
327 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
328 * If RCU Tasks Trace grace period implies RCU grace period, free
329 * these elements directly, else use call_rcu() to wait for normal
330 * progs to finish and finally do free_one() on each element.
332 call_rcu_tasks_trace(&c
->rcu_ttrace
, __free_rcu_tasks_trace
);
335 static void free_bulk(struct bpf_mem_cache
*c
)
337 struct bpf_mem_cache
*tgt
= c
->tgt
;
338 struct llist_node
*llnode
, *t
;
342 WARN_ON_ONCE(tgt
->unit_size
!= c
->unit_size
);
343 WARN_ON_ONCE(tgt
->percpu_size
!= c
->percpu_size
);
346 inc_active(c
, &flags
);
347 llnode
= __llist_del_first(&c
->free_llist
);
352 dec_active(c
, &flags
);
354 enque_to_free(tgt
, llnode
);
355 } while (cnt
> (c
->high_watermark
+ c
->low_watermark
) / 2);
357 /* and drain free_llist_extra */
358 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_llist_extra
))
359 enque_to_free(tgt
, llnode
);
360 do_call_rcu_ttrace(tgt
);
363 static void __free_by_rcu(struct rcu_head
*head
)
365 struct bpf_mem_cache
*c
= container_of(head
, struct bpf_mem_cache
, rcu
);
366 struct bpf_mem_cache
*tgt
= c
->tgt
;
367 struct llist_node
*llnode
;
369 WARN_ON_ONCE(tgt
->unit_size
!= c
->unit_size
);
370 WARN_ON_ONCE(tgt
->percpu_size
!= c
->percpu_size
);
372 llnode
= llist_del_all(&c
->waiting_for_gp
);
376 llist_add_batch(llnode
, c
->waiting_for_gp_tail
, &tgt
->free_by_rcu_ttrace
);
378 /* Objects went through regular RCU GP. Send them to RCU tasks trace */
379 do_call_rcu_ttrace(tgt
);
381 atomic_set(&c
->call_rcu_in_progress
, 0);
384 static void check_free_by_rcu(struct bpf_mem_cache
*c
)
386 struct llist_node
*llnode
, *t
;
389 /* drain free_llist_extra_rcu */
390 if (unlikely(!llist_empty(&c
->free_llist_extra_rcu
))) {
391 inc_active(c
, &flags
);
392 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_llist_extra_rcu
))
393 if (__llist_add(llnode
, &c
->free_by_rcu
))
394 c
->free_by_rcu_tail
= llnode
;
395 dec_active(c
, &flags
);
398 if (llist_empty(&c
->free_by_rcu
))
401 if (atomic_xchg(&c
->call_rcu_in_progress
, 1)) {
403 * Instead of kmalloc-ing new rcu_head and triggering 10k
404 * call_rcu() to hit rcutree.qhimark and force RCU to notice
405 * the overload just ask RCU to hurry up. There could be many
406 * objects in free_by_rcu list.
407 * This hint reduces memory consumption for an artificial
408 * benchmark from 2 Gbyte to 150 Mbyte.
410 rcu_request_urgent_qs_task(current
);
414 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp
));
416 inc_active(c
, &flags
);
417 WRITE_ONCE(c
->waiting_for_gp
.first
, __llist_del_all(&c
->free_by_rcu
));
418 c
->waiting_for_gp_tail
= c
->free_by_rcu_tail
;
419 dec_active(c
, &flags
);
421 if (unlikely(READ_ONCE(c
->draining
))) {
422 free_all(llist_del_all(&c
->waiting_for_gp
), !!c
->percpu_size
);
423 atomic_set(&c
->call_rcu_in_progress
, 0);
425 call_rcu_hurry(&c
->rcu
, __free_by_rcu
);
429 static void bpf_mem_refill(struct irq_work
*work
)
431 struct bpf_mem_cache
*c
= container_of(work
, struct bpf_mem_cache
, refill_work
);
434 /* Racy access to free_cnt. It doesn't need to be 100% accurate */
436 if (cnt
< c
->low_watermark
)
437 /* irq_work runs on this cpu and kmalloc will allocate
438 * from the current numa node which is what we want here.
440 alloc_bulk(c
, c
->batch
, NUMA_NO_NODE
, true);
441 else if (cnt
> c
->high_watermark
)
444 check_free_by_rcu(c
);
447 static void notrace
irq_work_raise(struct bpf_mem_cache
*c
)
449 irq_work_queue(&c
->refill_work
);
452 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
453 * the freelist cache will be elem_size * 64 (or less) on each cpu.
455 * For bpf programs that don't have statically known allocation sizes and
456 * assuming (low_mark + high_mark) / 2 as an average number of elements per
457 * bucket and all buckets are used the total amount of memory in freelists
458 * on each cpu will be:
459 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
460 * == ~ 116 Kbyte using below heuristic.
461 * Initialized, but unused bpf allocator (not bpf map specific one) will
462 * consume ~ 11 Kbyte per cpu.
463 * Typical case will be between 11K and 116K closer to 11K.
464 * bpf progs can and should share bpf_mem_cache when possible.
466 static void init_refill_work(struct bpf_mem_cache
*c
)
468 init_irq_work(&c
->refill_work
, bpf_mem_refill
);
469 if (c
->unit_size
<= 256) {
470 c
->low_watermark
= 32;
471 c
->high_watermark
= 96;
473 /* When page_size == 4k, order-0 cache will have low_mark == 2
474 * and high_mark == 6 with batch alloc of 3 individual pages at
476 * 8k allocs and above low == 1, high == 3, batch == 1.
478 c
->low_watermark
= max(32 * 256 / c
->unit_size
, 1);
479 c
->high_watermark
= max(96 * 256 / c
->unit_size
, 3);
481 c
->batch
= max((c
->high_watermark
- c
->low_watermark
) / 4 * 3, 1);
484 static void prefill_mem_cache(struct bpf_mem_cache
*c
, int cpu
)
486 /* To avoid consuming memory assume that 1st run of bpf
487 * prog won't be doing more than 4 map_update_elem from
488 * irq disabled region
490 alloc_bulk(c
, c
->unit_size
<= 256 ? 4 : 1, cpu_to_node(cpu
), false);
493 static int check_obj_size(struct bpf_mem_cache
*c
, unsigned int idx
)
495 struct llist_node
*first
;
496 unsigned int obj_size
;
498 first
= c
->free_llist
.first
;
503 obj_size
= pcpu_alloc_size(((void **)first
)[1]);
505 obj_size
= ksize(first
);
506 if (obj_size
!= c
->unit_size
) {
507 WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
508 idx
, c
->percpu_size
, obj_size
, c
->unit_size
);
514 /* When size != 0 bpf_mem_cache for each cpu.
515 * This is typical bpf hash map use case when all elements have equal size.
517 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
518 * kmalloc/kfree. Max allocation size is 4096 in this case.
519 * This is bpf_dynptr and bpf_kptr use case.
521 int bpf_mem_alloc_init(struct bpf_mem_alloc
*ma
, int size
, bool percpu
)
523 static u16 sizes
[NUM_CACHES
] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
524 int cpu
, i
, err
, unit_size
, percpu_size
= 0;
525 struct bpf_mem_caches
*cc
, __percpu
*pcc
;
526 struct bpf_mem_cache
*c
, __percpu
*pc
;
527 struct obj_cgroup
*objcg
= NULL
;
529 /* room for llist_node and per-cpu pointer */
531 percpu_size
= LLIST_NODE_SZ
+ sizeof(void *);
535 pc
= __alloc_percpu_gfp(sizeof(*pc
), 8, GFP_KERNEL
);
540 size
+= LLIST_NODE_SZ
; /* room for llist_node */
543 #ifdef CONFIG_MEMCG_KMEM
544 if (memcg_bpf_enabled())
545 objcg
= get_obj_cgroup_from_current();
547 for_each_possible_cpu(cpu
) {
548 c
= per_cpu_ptr(pc
, cpu
);
549 c
->unit_size
= unit_size
;
551 c
->percpu_size
= percpu_size
;
554 prefill_mem_cache(c
, cpu
);
560 pcc
= __alloc_percpu_gfp(sizeof(*cc
), 8, GFP_KERNEL
);
564 #ifdef CONFIG_MEMCG_KMEM
565 objcg
= get_obj_cgroup_from_current();
567 for_each_possible_cpu(cpu
) {
568 cc
= per_cpu_ptr(pcc
, cpu
);
569 for (i
= 0; i
< NUM_CACHES
; i
++) {
571 c
->unit_size
= sizes
[i
];
573 c
->percpu_size
= percpu_size
;
577 /* Another bpf_mem_cache will be used when allocating
578 * c->unit_size in bpf_mem_alloc(), so doesn't prefill
579 * for the bpf_mem_cache because these free objects will
582 if (i
!= bpf_mem_cache_idx(c
->unit_size
))
584 prefill_mem_cache(c
, cpu
);
585 err
= check_obj_size(c
, i
);
593 /* refill_work is either zeroed or initialized, so it is safe to
594 * call irq_work_sync().
597 bpf_mem_alloc_destroy(ma
);
601 static void drain_mem_cache(struct bpf_mem_cache
*c
)
603 bool percpu
= !!c
->percpu_size
;
605 /* No progs are using this bpf_mem_cache, but htab_map_free() called
606 * bpf_mem_cache_free() for all remaining elements and they can be in
607 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
609 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
610 * on these lists, so it is safe to use __llist_del_all().
612 free_all(llist_del_all(&c
->free_by_rcu_ttrace
), percpu
);
613 free_all(llist_del_all(&c
->waiting_for_gp_ttrace
), percpu
);
614 free_all(__llist_del_all(&c
->free_llist
), percpu
);
615 free_all(__llist_del_all(&c
->free_llist_extra
), percpu
);
616 free_all(__llist_del_all(&c
->free_by_rcu
), percpu
);
617 free_all(__llist_del_all(&c
->free_llist_extra_rcu
), percpu
);
618 free_all(llist_del_all(&c
->waiting_for_gp
), percpu
);
621 static void check_mem_cache(struct bpf_mem_cache
*c
)
623 WARN_ON_ONCE(!llist_empty(&c
->free_by_rcu_ttrace
));
624 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp_ttrace
));
625 WARN_ON_ONCE(!llist_empty(&c
->free_llist
));
626 WARN_ON_ONCE(!llist_empty(&c
->free_llist_extra
));
627 WARN_ON_ONCE(!llist_empty(&c
->free_by_rcu
));
628 WARN_ON_ONCE(!llist_empty(&c
->free_llist_extra_rcu
));
629 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp
));
632 static void check_leaked_objs(struct bpf_mem_alloc
*ma
)
634 struct bpf_mem_caches
*cc
;
635 struct bpf_mem_cache
*c
;
639 for_each_possible_cpu(cpu
) {
640 c
= per_cpu_ptr(ma
->cache
, cpu
);
645 for_each_possible_cpu(cpu
) {
646 cc
= per_cpu_ptr(ma
->caches
, cpu
);
647 for (i
= 0; i
< NUM_CACHES
; i
++) {
655 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc
*ma
)
657 check_leaked_objs(ma
);
658 free_percpu(ma
->cache
);
659 free_percpu(ma
->caches
);
664 static void free_mem_alloc(struct bpf_mem_alloc
*ma
)
666 /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
667 * might still execute. Wait for them.
669 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
670 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
671 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
672 * so if call_rcu(head, __free_rcu) is skipped due to
673 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
674 * using rcu_trace_implies_rcu_gp() as well.
676 rcu_barrier(); /* wait for __free_by_rcu */
677 rcu_barrier_tasks_trace(); /* wait for __free_rcu */
678 if (!rcu_trace_implies_rcu_gp())
680 free_mem_alloc_no_barrier(ma
);
683 static void free_mem_alloc_deferred(struct work_struct
*work
)
685 struct bpf_mem_alloc
*ma
= container_of(work
, struct bpf_mem_alloc
, work
);
691 static void destroy_mem_alloc(struct bpf_mem_alloc
*ma
, int rcu_in_progress
)
693 struct bpf_mem_alloc
*copy
;
695 if (!rcu_in_progress
) {
696 /* Fast path. No callbacks are pending, hence no need to do
699 free_mem_alloc_no_barrier(ma
);
703 copy
= kmemdup(ma
, sizeof(*ma
), GFP_KERNEL
);
705 /* Slow path with inline barrier-s */
710 /* Defer barriers into worker to let the rest of map memory to be freed */
711 memset(ma
, 0, sizeof(*ma
));
712 INIT_WORK(©
->work
, free_mem_alloc_deferred
);
713 queue_work(system_unbound_wq
, ©
->work
);
716 void bpf_mem_alloc_destroy(struct bpf_mem_alloc
*ma
)
718 struct bpf_mem_caches
*cc
;
719 struct bpf_mem_cache
*c
;
720 int cpu
, i
, rcu_in_progress
;
724 for_each_possible_cpu(cpu
) {
725 c
= per_cpu_ptr(ma
->cache
, cpu
);
726 WRITE_ONCE(c
->draining
, true);
727 irq_work_sync(&c
->refill_work
);
729 rcu_in_progress
+= atomic_read(&c
->call_rcu_ttrace_in_progress
);
730 rcu_in_progress
+= atomic_read(&c
->call_rcu_in_progress
);
732 /* objcg is the same across cpus */
734 obj_cgroup_put(c
->objcg
);
735 destroy_mem_alloc(ma
, rcu_in_progress
);
739 for_each_possible_cpu(cpu
) {
740 cc
= per_cpu_ptr(ma
->caches
, cpu
);
741 for (i
= 0; i
< NUM_CACHES
; i
++) {
743 WRITE_ONCE(c
->draining
, true);
744 irq_work_sync(&c
->refill_work
);
746 rcu_in_progress
+= atomic_read(&c
->call_rcu_ttrace_in_progress
);
747 rcu_in_progress
+= atomic_read(&c
->call_rcu_in_progress
);
751 obj_cgroup_put(c
->objcg
);
752 destroy_mem_alloc(ma
, rcu_in_progress
);
756 /* notrace is necessary here and in other functions to make sure
757 * bpf programs cannot attach to them and cause llist corruptions.
759 static void notrace
*unit_alloc(struct bpf_mem_cache
*c
)
761 struct llist_node
*llnode
= NULL
;
765 /* Disable irqs to prevent the following race for majority of prog types:
768 * preemption or irq -> prog_B
771 * but prog_B could be a perf_event NMI prog.
772 * Use per-cpu 'active' counter to order free_list access between
773 * unit_alloc/unit_free/bpf_mem_refill.
775 local_irq_save(flags
);
776 if (local_inc_return(&c
->active
) == 1) {
777 llnode
= __llist_del_first(&c
->free_llist
);
780 *(struct bpf_mem_cache
**)llnode
= c
;
783 local_dec(&c
->active
);
787 if (cnt
< c
->low_watermark
)
789 /* Enable IRQ after the enqueue of irq work completes, so irq work
790 * will run after IRQ is enabled and free_llist may be refilled by
791 * irq work before other task preempts current task.
793 local_irq_restore(flags
);
798 /* Though 'ptr' object could have been allocated on a different cpu
799 * add it to the free_llist of the current cpu.
800 * Let kfree() logic deal with it when it's later called from irq_work.
802 static void notrace
unit_free(struct bpf_mem_cache
*c
, void *ptr
)
804 struct llist_node
*llnode
= ptr
- LLIST_NODE_SZ
;
808 BUILD_BUG_ON(LLIST_NODE_SZ
> 8);
811 * Remember bpf_mem_cache that allocated this object.
812 * The hint is not accurate.
814 c
->tgt
= *(struct bpf_mem_cache
**)llnode
;
816 local_irq_save(flags
);
817 if (local_inc_return(&c
->active
) == 1) {
818 __llist_add(llnode
, &c
->free_llist
);
821 /* unit_free() cannot fail. Therefore add an object to atomic
822 * llist. free_bulk() will drain it. Though free_llist_extra is
823 * a per-cpu list we have to use atomic llist_add here, since
824 * it also can be interrupted by bpf nmi prog that does another
825 * unit_free() into the same free_llist_extra.
827 llist_add(llnode
, &c
->free_llist_extra
);
829 local_dec(&c
->active
);
831 if (cnt
> c
->high_watermark
)
832 /* free few objects from current cpu into global kmalloc pool */
834 /* Enable IRQ after irq_work_raise() completes, otherwise when current
835 * task is preempted by task which does unit_alloc(), unit_alloc() may
836 * return NULL unexpectedly because irq work is already pending but can
837 * not been triggered and free_llist can not be refilled timely.
839 local_irq_restore(flags
);
842 static void notrace
unit_free_rcu(struct bpf_mem_cache
*c
, void *ptr
)
844 struct llist_node
*llnode
= ptr
- LLIST_NODE_SZ
;
847 c
->tgt
= *(struct bpf_mem_cache
**)llnode
;
849 local_irq_save(flags
);
850 if (local_inc_return(&c
->active
) == 1) {
851 if (__llist_add(llnode
, &c
->free_by_rcu
))
852 c
->free_by_rcu_tail
= llnode
;
854 llist_add(llnode
, &c
->free_llist_extra_rcu
);
856 local_dec(&c
->active
);
858 if (!atomic_read(&c
->call_rcu_in_progress
))
860 local_irq_restore(flags
);
863 /* Called from BPF program or from sys_bpf syscall.
864 * In both cases migration is disabled.
866 void notrace
*bpf_mem_alloc(struct bpf_mem_alloc
*ma
, size_t size
)
872 return ZERO_SIZE_PTR
;
874 idx
= bpf_mem_cache_idx(size
+ LLIST_NODE_SZ
);
878 ret
= unit_alloc(this_cpu_ptr(ma
->caches
)->cache
+ idx
);
879 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
882 static notrace
int bpf_mem_free_idx(void *ptr
, bool percpu
)
887 size
= pcpu_alloc_size(*((void **)ptr
));
889 size
= ksize(ptr
- LLIST_NODE_SZ
);
890 return bpf_mem_cache_idx(size
);
893 void notrace
bpf_mem_free(struct bpf_mem_alloc
*ma
, void *ptr
)
900 idx
= bpf_mem_free_idx(ptr
, ma
->percpu
);
904 unit_free(this_cpu_ptr(ma
->caches
)->cache
+ idx
, ptr
);
907 void notrace
bpf_mem_free_rcu(struct bpf_mem_alloc
*ma
, void *ptr
)
914 idx
= bpf_mem_free_idx(ptr
, ma
->percpu
);
918 unit_free_rcu(this_cpu_ptr(ma
->caches
)->cache
+ idx
, ptr
);
921 void notrace
*bpf_mem_cache_alloc(struct bpf_mem_alloc
*ma
)
925 ret
= unit_alloc(this_cpu_ptr(ma
->cache
));
926 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
929 void notrace
bpf_mem_cache_free(struct bpf_mem_alloc
*ma
, void *ptr
)
934 unit_free(this_cpu_ptr(ma
->cache
), ptr
);
937 void notrace
bpf_mem_cache_free_rcu(struct bpf_mem_alloc
*ma
, void *ptr
)
942 unit_free_rcu(this_cpu_ptr(ma
->cache
), ptr
);
945 /* Directly does a kfree() without putting 'ptr' back to the free_llist
946 * for reuse and without waiting for a rcu_tasks_trace gp.
947 * The caller must first go through the rcu_tasks_trace gp for 'ptr'
948 * before calling bpf_mem_cache_raw_free().
949 * It could be used when the rcu_tasks_trace callback does not have
950 * a hold on the original bpf_mem_alloc object that allocated the
951 * 'ptr'. This should only be used in the uncommon code path.
952 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
953 * and may affect performance.
955 void bpf_mem_cache_raw_free(void *ptr
)
960 kfree(ptr
- LLIST_NODE_SZ
);
963 /* When flags == GFP_KERNEL, it signals that the caller will not cause
964 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
965 * kmalloc if the free_llist is empty.
967 void notrace
*bpf_mem_cache_alloc_flags(struct bpf_mem_alloc
*ma
, gfp_t flags
)
969 struct bpf_mem_cache
*c
;
972 c
= this_cpu_ptr(ma
->cache
);
975 if (!ret
&& flags
== GFP_KERNEL
) {
976 struct mem_cgroup
*memcg
, *old_memcg
;
978 memcg
= get_memcg(c
);
979 old_memcg
= set_active_memcg(memcg
);
980 ret
= __alloc(c
, NUMA_NO_NODE
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_ACCOUNT
);
981 set_active_memcg(old_memcg
);
982 mem_cgroup_put(memcg
);
985 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
988 /* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
989 * actual size of dynamic per-cpu area will always be matched and there is
990 * no need to adjust size_index for per-cpu allocation. However for the
991 * simplicity of the implementation, use an unified size_index for both
992 * kmalloc and per-cpu allocation.
994 static __init
int bpf_mem_cache_adjust_size(void)
998 /* Adjusting the indexes in size_index() according to the object_size
999 * of underlying slab cache, so bpf_mem_alloc() will select a
1000 * bpf_mem_cache with unit_size equal to the object_size of
1001 * the underlying slab cache.
1003 * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
1004 * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
1006 for (size
= 192; size
>= 8; size
-= 8) {
1007 unsigned int kmalloc_size
, index
;
1009 kmalloc_size
= kmalloc_size_roundup(size
);
1010 if (kmalloc_size
== size
)
1013 if (kmalloc_size
<= 192)
1014 index
= size_index
[(kmalloc_size
- 1) / 8];
1016 index
= fls(kmalloc_size
- 1) - 1;
1017 /* Only overwrite if necessary */
1018 if (size_index
[(size
- 1) / 8] != index
)
1019 size_index
[(size
- 1) / 8] = index
;
1024 subsys_initcall(bpf_mem_cache_adjust_size
);