{
struct bpf_map *map = vmf->vma->vm_file->private_data;
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ struct mem_cgroup *new_memcg, *old_memcg;
struct page *page;
long kbase, kaddr;
unsigned long flags;
/* already have a page vmap-ed */
goto out;
+ bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
+
if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
/* User space requested to segfault when page is not allocated by bpf prog */
goto out_unlock_sigsegv;
goto out_unlock_sigsegv;
}
flush_vmap_cache(kaddr, PAGE_SIZE);
+ bpf_map_memcg_exit(old_memcg, new_memcg);
out:
page_ref_add(page, 1);
raw_res_spin_unlock_irqrestore(&arena->spinlock, flags);
vmf->page = page;
return 0;
out_unlock_sigsegv:
+ bpf_map_memcg_exit(old_memcg, new_memcg);
raw_res_spin_unlock_irqrestore(&arena->spinlock, flags);
return VM_FAULT_SIGSEGV;
}
/* user_vm_end/start are fixed before bpf prog runs */
long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena);
+ struct mem_cgroup *new_memcg, *old_memcg;
struct apply_range_data data;
struct page **pages = NULL;
long remaining, mapped = 0;
return 0;
}
+ bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
/* Cap allocation size to KMALLOC_MAX_CACHE_SIZE so kmalloc_nolock() can succeed. */
alloc_pages = min(page_cnt, KMALLOC_MAX_CACHE_SIZE / sizeof(struct page *));
- pages = kmalloc_nolock(alloc_pages * sizeof(struct page *), 0, NUMA_NO_NODE);
- if (!pages)
+ pages = kmalloc_nolock(alloc_pages * sizeof(struct page *), __GFP_ACCOUNT, NUMA_NO_NODE);
+ if (!pages) {
+ bpf_map_memcg_exit(old_memcg, new_memcg);
return 0;
+ }
data.pages = pages;
if (raw_res_spin_lock_irqsave(&arena->spinlock, flags))
flush_vmap_cache(kern_vm_start + uaddr32, mapped << PAGE_SHIFT);
raw_res_spin_unlock_irqrestore(&arena->spinlock, flags);
kfree_nolock(pages);
+ bpf_map_memcg_exit(old_memcg, new_memcg);
return clear_lo32(arena->user_vm_start) + uaddr32;
out:
range_tree_set(&arena->rt, pgoff + mapped, page_cnt - mapped);
raw_res_spin_unlock_irqrestore(&arena->spinlock, flags);
out_free_pages:
kfree_nolock(pages);
+ bpf_map_memcg_exit(old_memcg, new_memcg);
return 0;
}
static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)
{
+ struct mem_cgroup *new_memcg, *old_memcg;
u64 full_uaddr, uaddr_end;
long kaddr, pgoff;
struct page *page;
page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT;
pgoff = compute_pgoff(arena, uaddr);
+ bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
if (!sleepable)
goto defer;
zap_pages(arena, full_uaddr, 1);
__free_page(page);
}
+ bpf_map_memcg_exit(old_memcg, new_memcg);
return;
defer:
- s = kmalloc_nolock(sizeof(struct arena_free_span), 0, -1);
+ s = kmalloc_nolock(sizeof(struct arena_free_span), __GFP_ACCOUNT, -1);
+ bpf_map_memcg_exit(old_memcg, new_memcg);
if (!s)
/*
* If allocation fails in non-sleepable context, pages are intentionally left
static int arena_reserve_pages(struct bpf_arena *arena, long uaddr, u32 page_cnt)
{
long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
+ struct mem_cgroup *new_memcg, *old_memcg;
unsigned long flags;
long pgoff;
int ret;
}
/* "Allocate" the region to prevent it from being allocated. */
+ bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
ret = range_tree_clear(&arena->rt, pgoff, page_cnt);
+ bpf_map_memcg_exit(old_memcg, new_memcg);
out:
raw_res_spin_unlock_irqrestore(&arena->spinlock, flags);
return ret;
static void arena_free_worker(struct work_struct *work)
{
struct bpf_arena *arena = container_of(work, struct bpf_arena, free_work);
+ struct mem_cgroup *new_memcg, *old_memcg;
struct llist_node *list, *pos, *t;
struct arena_free_span *s;
u64 arena_vm_start, user_vm_start;
return;
}
+ bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
+
init_llist_head(&free_pages);
arena_vm_start = bpf_arena_get_kern_vm_start(arena);
user_vm_start = bpf_arena_get_user_vm_start(arena);
page = llist_entry(pos, struct page, pcp_llist);
__free_page(page);
}
+
+ bpf_map_memcg_exit(old_memcg, new_memcg);
}
static void arena_free_irq(struct irq_work *iw)