]>
git.ipfire.org Git - thirdparty/linux.git/blob - tools/testing/selftests/bpf/prog_tests/mmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
4 #include "test_mmap.skel.h"
10 static size_t roundup_page(size_t sz
)
12 long page_size
= sysconf(_SC_PAGE_SIZE
);
13 return (sz
+ page_size
- 1) / page_size
* page_size
;
18 const size_t bss_sz
= roundup_page(sizeof(struct test_mmap__bss
));
19 const size_t map_sz
= roundup_page(sizeof(struct map_data
));
20 const int zero
= 0, one
= 1, two
= 2, far
= 1500;
21 const long page_size
= sysconf(_SC_PAGE_SIZE
);
22 int err
, duration
= 0, i
, data_map_fd
, data_map_id
, tmp_fd
;
23 struct bpf_map
*data_map
, *bss_map
;
24 void *bss_mmaped
= NULL
, *map_mmaped
= NULL
, *tmp1
, *tmp2
;
25 struct test_mmap__bss
*bss_data
;
26 struct bpf_map_info map_info
;
27 __u32 map_info_sz
= sizeof(map_info
);
28 struct map_data
*map_data
;
29 struct test_mmap
*skel
;
32 skel
= test_mmap__open_and_load();
33 if (CHECK(!skel
, "skel_open_and_load", "skeleton open/load failed\n"))
36 bss_map
= skel
->maps
.bss
;
37 data_map
= skel
->maps
.data_map
;
38 data_map_fd
= bpf_map__fd(data_map
);
41 memset(&map_info
, 0, map_info_sz
);
42 err
= bpf_obj_get_info_by_fd(data_map_fd
, &map_info
, &map_info_sz
);
43 if (CHECK(err
, "map_get_info", "failed %d\n", errno
))
45 data_map_id
= map_info
.id
;
48 bss_mmaped
= mmap(NULL
, bss_sz
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
49 bpf_map__fd(bss_map
), 0);
50 if (CHECK(bss_mmaped
== MAP_FAILED
, "bss_mmap",
51 ".bss mmap failed: %d\n", errno
)) {
55 /* map as R/W first */
56 map_mmaped
= mmap(NULL
, map_sz
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
58 if (CHECK(map_mmaped
== MAP_FAILED
, "data_mmap",
59 "data_map mmap failed: %d\n", errno
)) {
64 bss_data
= bss_mmaped
;
65 map_data
= map_mmaped
;
67 CHECK_FAIL(bss_data
->in_val
);
68 CHECK_FAIL(bss_data
->out_val
);
69 CHECK_FAIL(skel
->bss
->in_val
);
70 CHECK_FAIL(skel
->bss
->out_val
);
71 CHECK_FAIL(map_data
->val
[0]);
72 CHECK_FAIL(map_data
->val
[1]);
73 CHECK_FAIL(map_data
->val
[2]);
74 CHECK_FAIL(map_data
->val
[far
]);
76 err
= test_mmap__attach(skel
);
77 if (CHECK(err
, "attach_raw_tp", "err %d\n", err
))
80 bss_data
->in_val
= 123;
82 CHECK_FAIL(bpf_map_update_elem(data_map_fd
, &zero
, &val
, 0));
86 CHECK_FAIL(bss_data
->in_val
!= 123);
87 CHECK_FAIL(bss_data
->out_val
!= 123);
88 CHECK_FAIL(skel
->bss
->in_val
!= 123);
89 CHECK_FAIL(skel
->bss
->out_val
!= 123);
90 CHECK_FAIL(map_data
->val
[0] != 111);
91 CHECK_FAIL(map_data
->val
[1] != 222);
92 CHECK_FAIL(map_data
->val
[2] != 123);
93 CHECK_FAIL(map_data
->val
[far
] != 3 * 123);
95 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd
, &zero
, &val
));
96 CHECK_FAIL(val
!= 111);
97 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd
, &one
, &val
));
98 CHECK_FAIL(val
!= 222);
99 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd
, &two
, &val
));
100 CHECK_FAIL(val
!= 123);
101 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd
, &far
, &val
));
102 CHECK_FAIL(val
!= 3 * 123);
104 /* data_map freeze should fail due to R/W mmap() */
105 err
= bpf_map_freeze(data_map_fd
);
106 if (CHECK(!err
|| errno
!= EBUSY
, "no_freeze",
107 "data_map freeze succeeded: err=%d, errno=%d\n", err
, errno
))
110 err
= mprotect(map_mmaped
, map_sz
, PROT_READ
);
111 if (CHECK(err
, "mprotect_ro", "mprotect to r/o failed %d\n", errno
))
114 /* unmap R/W mapping */
115 err
= munmap(map_mmaped
, map_sz
);
117 if (CHECK(err
, "data_map_munmap", "data_map munmap failed: %d\n", errno
))
120 /* re-map as R/O now */
121 map_mmaped
= mmap(NULL
, map_sz
, PROT_READ
, MAP_SHARED
, data_map_fd
, 0);
122 if (CHECK(map_mmaped
== MAP_FAILED
, "data_mmap",
123 "data_map R/O mmap failed: %d\n", errno
)) {
127 err
= mprotect(map_mmaped
, map_sz
, PROT_WRITE
);
128 if (CHECK(!err
, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
130 err
= mprotect(map_mmaped
, map_sz
, PROT_EXEC
);
131 if (CHECK(!err
, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
133 map_data
= map_mmaped
;
135 /* map/unmap in a loop to test ref counting */
136 for (i
= 0; i
< 10; i
++) {
137 int flags
= i
% 2 ? PROT_READ
: PROT_WRITE
;
140 p
= mmap(NULL
, map_sz
, flags
, MAP_SHARED
, data_map_fd
, 0);
141 if (CHECK_FAIL(p
== MAP_FAILED
))
143 err
= munmap(p
, map_sz
);
148 /* data_map freeze should now succeed due to no R/W mapping */
149 err
= bpf_map_freeze(data_map_fd
);
150 if (CHECK(err
, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
154 /* mapping as R/W now should fail */
155 tmp1
= mmap(NULL
, map_sz
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
157 if (CHECK(tmp1
!= MAP_FAILED
, "data_mmap", "mmap succeeded\n")) {
158 munmap(tmp1
, map_sz
);
162 bss_data
->in_val
= 321;
164 CHECK_FAIL(bss_data
->in_val
!= 321);
165 CHECK_FAIL(bss_data
->out_val
!= 321);
166 CHECK_FAIL(skel
->bss
->in_val
!= 321);
167 CHECK_FAIL(skel
->bss
->out_val
!= 321);
168 CHECK_FAIL(map_data
->val
[0] != 111);
169 CHECK_FAIL(map_data
->val
[1] != 222);
170 CHECK_FAIL(map_data
->val
[2] != 321);
171 CHECK_FAIL(map_data
->val
[far
] != 3 * 321);
173 /* check some more advanced mmap() manipulations */
175 /* map all but last page: pages 1-3 mapped */
176 tmp1
= mmap(NULL
, 3 * page_size
, PROT_READ
, MAP_SHARED
,
178 if (CHECK(tmp1
== MAP_FAILED
, "adv_mmap1", "errno %d\n", errno
))
181 /* unmap second page: pages 1, 3 mapped */
182 err
= munmap(tmp1
+ page_size
, page_size
);
183 if (CHECK(err
, "adv_mmap2", "errno %d\n", errno
)) {
184 munmap(tmp1
, map_sz
);
188 /* map page 2 back */
189 tmp2
= mmap(tmp1
+ page_size
, page_size
, PROT_READ
,
190 MAP_SHARED
| MAP_FIXED
, data_map_fd
, 0);
191 if (CHECK(tmp2
== MAP_FAILED
, "adv_mmap3", "errno %d\n", errno
)) {
192 munmap(tmp1
, page_size
);
193 munmap(tmp1
+ 2*page_size
, page_size
);
196 CHECK(tmp1
+ page_size
!= tmp2
, "adv_mmap4",
197 "tmp1: %p, tmp2: %p\n", tmp1
, tmp2
);
199 /* re-map all 4 pages */
200 tmp2
= mmap(tmp1
, 4 * page_size
, PROT_READ
, MAP_SHARED
| MAP_FIXED
,
202 if (CHECK(tmp2
== MAP_FAILED
, "adv_mmap5", "errno %d\n", errno
)) {
203 munmap(tmp1
, 3 * page_size
); /* unmap page 1 */
206 CHECK(tmp1
!= tmp2
, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1
, tmp2
);
209 CHECK_FAIL(bss_data
->in_val
!= 321);
210 CHECK_FAIL(bss_data
->out_val
!= 321);
211 CHECK_FAIL(skel
->bss
->in_val
!= 321);
212 CHECK_FAIL(skel
->bss
->out_val
!= 321);
213 CHECK_FAIL(map_data
->val
[0] != 111);
214 CHECK_FAIL(map_data
->val
[1] != 222);
215 CHECK_FAIL(map_data
->val
[2] != 321);
216 CHECK_FAIL(map_data
->val
[far
] != 3 * 321);
218 munmap(tmp2
, 4 * page_size
);
220 tmp1
= mmap(NULL
, map_sz
, PROT_READ
, MAP_SHARED
, data_map_fd
, 0);
221 if (CHECK(tmp1
== MAP_FAILED
, "last_mmap", "failed %d\n", errno
))
224 test_mmap__destroy(skel
);
226 CHECK_FAIL(munmap(bss_mmaped
, bss_sz
));
228 CHECK_FAIL(munmap(map_mmaped
, map_sz
));
231 /* map should be still held by active mmap */
232 tmp_fd
= bpf_map_get_fd_by_id(data_map_id
);
233 if (CHECK(tmp_fd
< 0, "get_map_by_id", "failed %d\n", errno
)) {
234 munmap(tmp1
, map_sz
);
239 /* this should release data map finally */
240 munmap(tmp1
, map_sz
);
242 /* we need to wait for RCU grace period */
243 for (i
= 0; i
< 10000; i
++) {
244 __u32 id
= data_map_id
- 1;
245 if (bpf_map_get_next_id(id
, &id
) || id
> data_map_id
)
250 /* should fail to get map FD by non-existing ID */
251 tmp_fd
= bpf_map_get_fd_by_id(data_map_id
);
252 if (CHECK(tmp_fd
>= 0, "get_map_by_id_after",
253 "unexpectedly succeeded %d\n", tmp_fd
)) {
260 CHECK_FAIL(munmap(bss_mmaped
, bss_sz
));
262 CHECK_FAIL(munmap(map_mmaped
, map_sz
));
263 test_mmap__destroy(skel
);