]>
Commit | Line | Data |
---|---|---|
5351d7ec GKH |
1 | From foo@baz Fri Mar 9 14:18:36 PST 2018 |
2 | From: Daniel Borkmann <daniel@iogearbox.net> | |
3 | Date: Thu, 8 Mar 2018 13:14:40 +0100 | |
4 | Subject: bpf: fix mlock precharge on arraymaps | |
5 | To: gregkh@linuxfoundation.org | |
6 | Cc: ast@kernel.org, daniel@iogearbox.net, stable@vger.kernel.org, Dennis Zhou <dennisszhou@gmail.com> | |
7 | Message-ID: <fd1e6c48bbd5e2a02420d5f09132e97d9331000a.1520504748.git.daniel@iogearbox.net> | |
8 | ||
9 | From: Daniel Borkmann <daniel@iogearbox.net> | |
10 | ||
11 | [ upstream commit 9c2d63b843a5c8a8d0559cc067b5398aa5ec3ffc ] | |
12 | ||
13 | syzkaller recently triggered OOM during percpu map allocation; | |
14 | while there is work in progress by Dennis Zhou to add __GFP_NORETRY | |
15 | semantics for percpu allocator under pressure, there seems also a | |
16 | missing bpf_map_precharge_memlock() check in array map allocation. | |
17 | ||
18 | Given today the actual bpf_map_charge_memlock() happens after the | |
19 | find_and_alloc_map() in syscall path, the bpf_map_precharge_memlock() | |
20 | is there to bail out early before we go and do the map setup work | |
21 | when we find that we hit the limits anyway. Therefore add this for | |
22 | array map as well. | |
23 | ||
24 | Fixes: 6c9059817432 ("bpf: pre-allocate hash map elements") | |
25 | Fixes: a10423b87a7e ("bpf: introduce BPF_MAP_TYPE_PERCPU_ARRAY map") | |
26 | Reported-by: syzbot+adb03f3f0bb57ce3acda@syzkaller.appspotmail.com | |
27 | Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> | |
28 | Cc: Dennis Zhou <dennisszhou@gmail.com> | |
29 | Signed-off-by: Alexei Starovoitov <ast@kernel.org> | |
30 | Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> | |
31 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
32 | --- | |
33 | kernel/bpf/arraymap.c | 28 ++++++++++++++++------------ | |
34 | 1 file changed, 16 insertions(+), 12 deletions(-) | |
35 | ||
36 | --- a/kernel/bpf/arraymap.c | |
37 | +++ b/kernel/bpf/arraymap.c | |
38 | @@ -49,11 +49,11 @@ static int bpf_array_alloc_percpu(struct | |
39 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |
40 | { | |
41 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | |
42 | - int numa_node = bpf_map_attr_numa_node(attr); | |
43 | + int ret, numa_node = bpf_map_attr_numa_node(attr); | |
44 | u32 elem_size, index_mask, max_entries; | |
45 | bool unpriv = !capable(CAP_SYS_ADMIN); | |
46 | + u64 cost, array_size, mask64; | |
47 | struct bpf_array *array; | |
48 | - u64 array_size, mask64; | |
49 | ||
50 | /* check sanity of attributes */ | |
51 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
52 | @@ -97,8 +97,19 @@ static struct bpf_map *array_map_alloc(u | |
53 | array_size += (u64) max_entries * elem_size; | |
54 | ||
55 | /* make sure there is no u32 overflow later in round_up() */ | |
56 | - if (array_size >= U32_MAX - PAGE_SIZE) | |
57 | + cost = array_size; | |
58 | + if (cost >= U32_MAX - PAGE_SIZE) | |
59 | return ERR_PTR(-ENOMEM); | |
60 | + if (percpu) { | |
61 | + cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); | |
62 | + if (cost >= U32_MAX - PAGE_SIZE) | |
63 | + return ERR_PTR(-ENOMEM); | |
64 | + } | |
65 | + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | |
66 | + | |
67 | + ret = bpf_map_precharge_memlock(cost); | |
68 | + if (ret < 0) | |
69 | + return ERR_PTR(ret); | |
70 | ||
71 | /* allocate all map elements and zero-initialize them */ | |
72 | array = bpf_map_area_alloc(array_size, numa_node); | |
73 | @@ -114,20 +125,13 @@ static struct bpf_map *array_map_alloc(u | |
74 | array->map.max_entries = attr->max_entries; | |
75 | array->map.map_flags = attr->map_flags; | |
76 | array->map.numa_node = numa_node; | |
77 | + array->map.pages = cost; | |
78 | array->elem_size = elem_size; | |
79 | ||
80 | - if (!percpu) | |
81 | - goto out; | |
82 | - | |
83 | - array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); | |
84 | - | |
85 | - if (array_size >= U32_MAX - PAGE_SIZE || | |
86 | - bpf_array_alloc_percpu(array)) { | |
87 | + if (percpu && bpf_array_alloc_percpu(array)) { | |
88 | bpf_map_area_free(array); | |
89 | return ERR_PTR(-ENOMEM); | |
90 | } | |
91 | -out: | |
92 | - array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; | |
93 | ||
94 | return &array->map; | |
95 | } |