]> git.ipfire.org Git - thirdparty/linux.git/blame - tools/testing/selftests/bpf/prog_tests/mmap.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / tools / testing / selftests / bpf / prog_tests / mmap.c
CommitLineData
5051b384
AN
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <sys/mman.h>
dde53c1b 4#include "test_mmap.skel.h"
5051b384
AN
5
6struct map_data {
7 __u64 val[512 * 4];
8};
9
5051b384
AN
10static size_t roundup_page(size_t sz)
11{
12 long page_size = sysconf(_SC_PAGE_SIZE);
13 return (sz + page_size - 1) / page_size * page_size;
14}
15
16void test_mmap(void)
17{
dde53c1b 18 const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
5051b384
AN
19 const size_t map_sz = roundup_page(sizeof(struct map_data));
20 const int zero = 0, one = 1, two = 2, far = 1500;
21 const long page_size = sysconf(_SC_PAGE_SIZE);
642c1654 22 int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
5051b384
AN
23 struct bpf_map *data_map, *bss_map;
24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
dde53c1b 25 struct test_mmap__bss *bss_data;
642c1654
AN
26 struct bpf_map_info map_info;
27 __u32 map_info_sz = sizeof(map_info);
dde53c1b
AN
28 struct map_data *map_data;
29 struct test_mmap *skel;
5051b384
AN
30 __u64 val = 0;
31
5dc7a8b2 32 skel = test_mmap__open_and_load();
dde53c1b 33 if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
5051b384 34 return;
5051b384 35
dde53c1b
AN
36 bss_map = skel->maps.bss;
37 data_map = skel->maps.data_map;
5051b384
AN
38 data_map_fd = bpf_map__fd(data_map);
39
642c1654
AN
40 /* get map's ID */
41 memset(&map_info, 0, map_info_sz);
42 err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
43 if (CHECK(err, "map_get_info", "failed %d\n", errno))
44 goto cleanup;
45 data_map_id = map_info.id;
46
47 /* mmap BSS map */
5051b384
AN
48 bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
49 bpf_map__fd(bss_map), 0);
50 if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
51 ".bss mmap failed: %d\n", errno)) {
52 bss_mmaped = NULL;
53 goto cleanup;
54 }
55 /* map as R/W first */
56 map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
57 data_map_fd, 0);
58 if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
59 "data_map mmap failed: %d\n", errno)) {
60 map_mmaped = NULL;
61 goto cleanup;
62 }
63
64 bss_data = bss_mmaped;
65 map_data = map_mmaped;
66
67 CHECK_FAIL(bss_data->in_val);
68 CHECK_FAIL(bss_data->out_val);
dde53c1b
AN
69 CHECK_FAIL(skel->bss->in_val);
70 CHECK_FAIL(skel->bss->out_val);
5051b384
AN
71 CHECK_FAIL(map_data->val[0]);
72 CHECK_FAIL(map_data->val[1]);
73 CHECK_FAIL(map_data->val[2]);
74 CHECK_FAIL(map_data->val[far]);
75
dde53c1b
AN
76 err = test_mmap__attach(skel);
77 if (CHECK(err, "attach_raw_tp", "err %d\n", err))
5051b384
AN
78 goto cleanup;
79
80 bss_data->in_val = 123;
81 val = 111;
82 CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
83
84 usleep(1);
85
86 CHECK_FAIL(bss_data->in_val != 123);
87 CHECK_FAIL(bss_data->out_val != 123);
dde53c1b
AN
88 CHECK_FAIL(skel->bss->in_val != 123);
89 CHECK_FAIL(skel->bss->out_val != 123);
5051b384
AN
90 CHECK_FAIL(map_data->val[0] != 111);
91 CHECK_FAIL(map_data->val[1] != 222);
92 CHECK_FAIL(map_data->val[2] != 123);
93 CHECK_FAIL(map_data->val[far] != 3 * 123);
94
95 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
96 CHECK_FAIL(val != 111);
97 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
98 CHECK_FAIL(val != 222);
99 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
100 CHECK_FAIL(val != 123);
101 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
102 CHECK_FAIL(val != 3 * 123);
103
104 /* data_map freeze should fail due to R/W mmap() */
105 err = bpf_map_freeze(data_map_fd);
106 if (CHECK(!err || errno != EBUSY, "no_freeze",
107 "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
108 goto cleanup;
109
642c1654
AN
110 err = mprotect(map_mmaped, map_sz, PROT_READ);
111 if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
112 goto cleanup;
113
5051b384
AN
114 /* unmap R/W mapping */
115 err = munmap(map_mmaped, map_sz);
116 map_mmaped = NULL;
117 if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
118 goto cleanup;
119
120 /* re-map as R/O now */
121 map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
122 if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
123 "data_map R/O mmap failed: %d\n", errno)) {
124 map_mmaped = NULL;
125 goto cleanup;
126 }
642c1654
AN
127 err = mprotect(map_mmaped, map_sz, PROT_WRITE);
128 if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
129 goto cleanup;
130 err = mprotect(map_mmaped, map_sz, PROT_EXEC);
131 if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
132 goto cleanup;
5051b384
AN
133 map_data = map_mmaped;
134
135 /* map/unmap in a loop to test ref counting */
136 for (i = 0; i < 10; i++) {
137 int flags = i % 2 ? PROT_READ : PROT_WRITE;
138 void *p;
139
140 p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
141 if (CHECK_FAIL(p == MAP_FAILED))
142 goto cleanup;
143 err = munmap(p, map_sz);
144 if (CHECK_FAIL(err))
145 goto cleanup;
146 }
147
148 /* data_map freeze should now succeed due to no R/W mapping */
149 err = bpf_map_freeze(data_map_fd);
150 if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
151 err, errno))
152 goto cleanup;
153
154 /* mapping as R/W now should fail */
155 tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
156 data_map_fd, 0);
157 if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
158 munmap(tmp1, map_sz);
159 goto cleanup;
160 }
161
162 bss_data->in_val = 321;
163 usleep(1);
164 CHECK_FAIL(bss_data->in_val != 321);
165 CHECK_FAIL(bss_data->out_val != 321);
dde53c1b
AN
166 CHECK_FAIL(skel->bss->in_val != 321);
167 CHECK_FAIL(skel->bss->out_val != 321);
5051b384
AN
168 CHECK_FAIL(map_data->val[0] != 111);
169 CHECK_FAIL(map_data->val[1] != 222);
170 CHECK_FAIL(map_data->val[2] != 321);
171 CHECK_FAIL(map_data->val[far] != 3 * 321);
172
173 /* check some more advanced mmap() manipulations */
174
175 /* map all but last page: pages 1-3 mapped */
176 tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
177 data_map_fd, 0);
178 if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
179 goto cleanup;
180
181 /* unmap second page: pages 1, 3 mapped */
182 err = munmap(tmp1 + page_size, page_size);
183 if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
184 munmap(tmp1, map_sz);
185 goto cleanup;
186 }
187
188 /* map page 2 back */
189 tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
190 MAP_SHARED | MAP_FIXED, data_map_fd, 0);
191 if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
192 munmap(tmp1, page_size);
193 munmap(tmp1 + 2*page_size, page_size);
194 goto cleanup;
195 }
196 CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
197 "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
198
199 /* re-map all 4 pages */
200 tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
201 data_map_fd, 0);
202 if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
203 munmap(tmp1, 3 * page_size); /* unmap page 1 */
204 goto cleanup;
205 }
206 CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
207
208 map_data = tmp2;
209 CHECK_FAIL(bss_data->in_val != 321);
210 CHECK_FAIL(bss_data->out_val != 321);
dde53c1b
AN
211 CHECK_FAIL(skel->bss->in_val != 321);
212 CHECK_FAIL(skel->bss->out_val != 321);
5051b384
AN
213 CHECK_FAIL(map_data->val[0] != 111);
214 CHECK_FAIL(map_data->val[1] != 222);
215 CHECK_FAIL(map_data->val[2] != 321);
216 CHECK_FAIL(map_data->val[far] != 3 * 321);
217
218 munmap(tmp2, 4 * page_size);
642c1654 219
333291ce
AN
220 /* map all 4 pages, but with pg_off=1 page, should fail */
221 tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
222 data_map_fd, page_size /* initial page shift */);
223 if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
224 munmap(tmp1, 4 * page_size);
225 goto cleanup;
226 }
227
642c1654
AN
228 tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
229 if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
230 goto cleanup;
231
232 test_mmap__destroy(skel);
233 skel = NULL;
234 CHECK_FAIL(munmap(bss_mmaped, bss_sz));
235 bss_mmaped = NULL;
236 CHECK_FAIL(munmap(map_mmaped, map_sz));
237 map_mmaped = NULL;
238
239 /* map should be still held by active mmap */
240 tmp_fd = bpf_map_get_fd_by_id(data_map_id);
241 if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
242 munmap(tmp1, map_sz);
243 goto cleanup;
244 }
245 close(tmp_fd);
246
247 /* this should release data map finally */
248 munmap(tmp1, map_sz);
249
250 /* we need to wait for RCU grace period */
251 for (i = 0; i < 10000; i++) {
252 __u32 id = data_map_id - 1;
253 if (bpf_map_get_next_id(id, &id) || id > data_map_id)
254 break;
255 usleep(1);
256 }
257
258 /* should fail to get map FD by non-existing ID */
259 tmp_fd = bpf_map_get_fd_by_id(data_map_id);
260 if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
261 "unexpectedly succeeded %d\n", tmp_fd)) {
262 close(tmp_fd);
263 goto cleanup;
264 }
265
5051b384
AN
266cleanup:
267 if (bss_mmaped)
268 CHECK_FAIL(munmap(bss_mmaped, bss_sz));
269 if (map_mmaped)
270 CHECK_FAIL(munmap(map_mmaped, map_sz));
dde53c1b 271 test_mmap__destroy(skel);
5051b384 272}