1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25 static inline void set_bit(unsigned int nr
, unsigned long *addr
)
27 unsigned long mask
= BIT_MASK(nr
);
28 unsigned long *p
= ((unsigned long *)addr
) + BIT_WORD(nr
);
33 static inline bool test_bit(unsigned int nr
, unsigned long *addr
)
35 return 1UL & (addr
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1)));
39 static unsigned long BUFFER_SIZE
;
41 static unsigned long PAGE_SIZE
;
43 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
44 #define offsetofend(TYPE, MEMBER) \
45 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
48 * Have the kernel check the refcount on pages. I don't know why a freshly
49 * mmap'd anon non-compound page starts out with a ref of 3
51 #define check_refs(_ptr, _length, _refs) \
53 struct iommu_test_cmd test_cmd = { \
54 .size = sizeof(test_cmd), \
55 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
56 .check_refs = { .length = _length, \
57 .uptr = (uintptr_t)(_ptr), \
62 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
66 static int _test_cmd_mock_domain(int fd
, unsigned int ioas_id
, __u32
*stdev_id
,
67 __u32
*hwpt_id
, __u32
*idev_id
)
69 struct iommu_test_cmd cmd
= {
71 .op
= IOMMU_TEST_OP_MOCK_DOMAIN
,
77 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
81 *stdev_id
= cmd
.mock_domain
.out_stdev_id
;
84 *hwpt_id
= cmd
.mock_domain
.out_hwpt_id
;
86 *idev_id
= cmd
.mock_domain
.out_idev_id
;
89 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
90 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
92 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
93 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
94 stdev_id, hwpt_id, NULL))
96 static int _test_cmd_mock_domain_flags(int fd
, unsigned int ioas_id
,
97 __u32 stdev_flags
, __u32
*stdev_id
,
98 __u32
*hwpt_id
, __u32
*idev_id
)
100 struct iommu_test_cmd cmd
= {
102 .op
= IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
,
104 .mock_domain_flags
= { .dev_flags
= stdev_flags
},
108 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
112 *stdev_id
= cmd
.mock_domain_flags
.out_stdev_id
;
115 *hwpt_id
= cmd
.mock_domain_flags
.out_hwpt_id
;
117 *idev_id
= cmd
.mock_domain_flags
.out_idev_id
;
120 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
121 ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
122 stdev_id, hwpt_id, idev_id))
123 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
124 EXPECT_ERRNO(_errno, \
125 _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
126 stdev_id, hwpt_id, NULL))
128 static int _test_cmd_mock_domain_replace(int fd
, __u32 stdev_id
, __u32 pt_id
,
131 struct iommu_test_cmd cmd
= {
133 .op
= IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE
,
135 .mock_domain_replace
= {
141 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
145 *hwpt_id
= cmd
.mock_domain_replace
.pt_id
;
149 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
150 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
152 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
153 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
156 static int _test_cmd_hwpt_alloc(int fd
, __u32 device_id
, __u32 pt_id
,
157 __u32 flags
, __u32
*hwpt_id
, __u32 data_type
,
158 void *data
, size_t data_len
)
160 struct iommu_hwpt_alloc cmd
= {
165 .data_type
= data_type
,
166 .data_len
= data_len
,
167 .data_uptr
= (uint64_t)data
,
171 ret
= ioctl(fd
, IOMMU_HWPT_ALLOC
, &cmd
);
175 *hwpt_id
= cmd
.out_hwpt_id
;
179 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
180 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
181 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
183 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
184 EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
185 self->fd, device_id, pt_id, flags, \
186 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
188 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
189 data_type, data, data_len) \
190 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
191 hwpt_id, data_type, data, data_len))
192 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
193 data_type, data, data_len) \
194 EXPECT_ERRNO(_errno, \
195 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
196 hwpt_id, data_type, data, data_len))
198 static int _test_cmd_access_replace_ioas(int fd
, __u32 access_id
,
199 unsigned int ioas_id
)
201 struct iommu_test_cmd cmd
= {
203 .op
= IOMMU_TEST_OP_ACCESS_REPLACE_IOAS
,
205 .access_replace_ioas
= { .ioas_id
= ioas_id
},
209 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
214 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
215 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
217 static int _test_cmd_set_dirty_tracking(int fd
, __u32 hwpt_id
, bool enabled
)
219 struct iommu_hwpt_set_dirty_tracking cmd
= {
221 .flags
= enabled
? IOMMU_HWPT_DIRTY_TRACKING_ENABLE
: 0,
226 ret
= ioctl(fd
, IOMMU_HWPT_SET_DIRTY_TRACKING
, &cmd
);
231 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
232 ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
234 static int _test_cmd_get_dirty_bitmap(int fd
, __u32 hwpt_id
, size_t length
,
235 __u64 iova
, size_t page_size
,
236 __u64
*bitmap
, __u32 flags
)
238 struct iommu_hwpt_get_dirty_bitmap cmd
= {
244 .page_size
= page_size
,
245 .data
= (uintptr_t)bitmap
,
249 ret
= ioctl(fd
, IOMMU_HWPT_GET_DIRTY_BITMAP
, &cmd
);
255 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
257 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
258 page_size, bitmap, flags))
260 static int _test_cmd_mock_domain_set_dirty(int fd
, __u32 hwpt_id
, size_t length
,
261 __u64 iova
, size_t page_size
,
262 __u64
*bitmap
, __u64
*dirty
)
264 struct iommu_test_cmd cmd
= {
266 .op
= IOMMU_TEST_OP_DIRTY
,
271 .page_size
= page_size
,
272 .uptr
= (uintptr_t)bitmap
,
277 ret
= ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY
), &cmd
);
281 *dirty
= cmd
.dirty
.out_nr_dirty
;
285 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
288 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
289 page_size, bitmap, nr))
291 static int _test_mock_dirty_bitmaps(int fd
, __u32 hwpt_id
, size_t length
,
292 __u64 iova
, size_t page_size
, __u64
*bitmap
,
293 __u64 bitmap_size
, __u32 flags
,
294 struct __test_metadata
*_metadata
)
296 unsigned long i
, count
, nbits
= bitmap_size
* BITS_PER_BYTE
;
297 unsigned long nr
= nbits
/ 2;
300 /* Mark all even bits as dirty in the mock domain */
301 for (count
= 0, i
= 0; i
< nbits
; count
+= !(i
% 2), i
++)
303 set_bit(i
, (unsigned long *)bitmap
);
304 ASSERT_EQ(nr
, count
);
306 test_cmd_mock_domain_set_dirty(fd
, hwpt_id
, length
, iova
, page_size
,
308 ASSERT_EQ(nr
, out_dirty
);
310 /* Expect all even bits as dirty in the user bitmap */
311 memset(bitmap
, 0, bitmap_size
);
312 test_cmd_get_dirty_bitmap(fd
, hwpt_id
, length
, iova
, page_size
, bitmap
,
314 for (count
= 0, i
= 0; i
< nbits
; count
+= !(i
% 2), i
++)
315 ASSERT_EQ(!(i
% 2), test_bit(i
, (unsigned long *)bitmap
));
316 ASSERT_EQ(count
, out_dirty
);
318 memset(bitmap
, 0, bitmap_size
);
319 test_cmd_get_dirty_bitmap(fd
, hwpt_id
, length
, iova
, page_size
, bitmap
,
322 /* It as read already -- expect all zeroes */
323 for (i
= 0; i
< nbits
; i
++) {
324 ASSERT_EQ(!(i
% 2) && (flags
&
325 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR
),
326 test_bit(i
, (unsigned long *)bitmap
));
331 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \
332 bitmap_size, flags, _metadata) \
333 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
334 page_size, bitmap, bitmap_size, \
337 static int _test_cmd_create_access(int fd
, unsigned int ioas_id
,
338 __u32
*access_id
, unsigned int flags
)
340 struct iommu_test_cmd cmd
= {
342 .op
= IOMMU_TEST_OP_CREATE_ACCESS
,
344 .create_access
= { .flags
= flags
},
348 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
351 *access_id
= cmd
.create_access
.out_access_fd
;
354 #define test_cmd_create_access(ioas_id, access_id, flags) \
355 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
358 static int _test_cmd_destroy_access(unsigned int access_id
)
360 return close(access_id
);
362 #define test_cmd_destroy_access(access_id) \
363 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
365 static int _test_cmd_destroy_access_pages(int fd
, unsigned int access_id
,
366 unsigned int access_pages_id
)
368 struct iommu_test_cmd cmd
= {
370 .op
= IOMMU_TEST_OP_DESTROY_ACCESS_PAGES
,
372 .destroy_access_pages
= { .access_pages_id
= access_pages_id
},
374 return ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
376 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
377 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
379 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
380 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
381 self->fd, access_id, access_pages_id))
383 static int _test_ioctl_destroy(int fd
, unsigned int id
)
385 struct iommu_destroy cmd
= {
389 return ioctl(fd
, IOMMU_DESTROY
, &cmd
);
391 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
393 static int _test_ioctl_ioas_alloc(int fd
, __u32
*id
)
395 struct iommu_ioas_alloc cmd
= {
400 ret
= ioctl(fd
, IOMMU_IOAS_ALLOC
, &cmd
);
403 *id
= cmd
.out_ioas_id
;
406 #define test_ioctl_ioas_alloc(id) \
408 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
409 ASSERT_NE(0, *(id)); \
412 static int _test_ioctl_ioas_map(int fd
, unsigned int ioas_id
, void *buffer
,
413 size_t length
, __u64
*iova
, unsigned int flags
)
415 struct iommu_ioas_map cmd
= {
419 .user_va
= (uintptr_t)buffer
,
424 if (flags
& IOMMU_IOAS_MAP_FIXED_IOVA
)
427 ret
= ioctl(fd
, IOMMU_IOAS_MAP
, &cmd
);
431 #define test_ioctl_ioas_map(buffer, length, iova_p) \
432 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
434 IOMMU_IOAS_MAP_WRITEABLE | \
435 IOMMU_IOAS_MAP_READABLE))
437 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
438 EXPECT_ERRNO(_errno, \
439 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
441 IOMMU_IOAS_MAP_WRITEABLE | \
442 IOMMU_IOAS_MAP_READABLE))
444 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
445 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
447 IOMMU_IOAS_MAP_WRITEABLE | \
448 IOMMU_IOAS_MAP_READABLE))
450 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
452 __u64 __iova = iova; \
453 ASSERT_EQ(0, _test_ioctl_ioas_map( \
454 self->fd, self->ioas_id, buffer, length, \
456 IOMMU_IOAS_MAP_FIXED_IOVA | \
457 IOMMU_IOAS_MAP_WRITEABLE | \
458 IOMMU_IOAS_MAP_READABLE)); \
461 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
463 __u64 __iova = iova; \
465 _test_ioctl_ioas_map( \
466 self->fd, ioas_id, buffer, length, &__iova, \
467 IOMMU_IOAS_MAP_FIXED_IOVA | \
468 IOMMU_IOAS_MAP_WRITEABLE | \
469 IOMMU_IOAS_MAP_READABLE)); \
472 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
474 __u64 __iova = iova; \
475 EXPECT_ERRNO(_errno, \
476 _test_ioctl_ioas_map( \
477 self->fd, self->ioas_id, buffer, length, \
479 IOMMU_IOAS_MAP_FIXED_IOVA | \
480 IOMMU_IOAS_MAP_WRITEABLE | \
481 IOMMU_IOAS_MAP_READABLE)); \
484 static int _test_ioctl_ioas_unmap(int fd
, unsigned int ioas_id
, uint64_t iova
,
485 size_t length
, uint64_t *out_len
)
487 struct iommu_ioas_unmap cmd
= {
495 ret
= ioctl(fd
, IOMMU_IOAS_UNMAP
, &cmd
);
497 *out_len
= cmd
.length
;
500 #define test_ioctl_ioas_unmap(iova, length) \
501 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
504 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
505 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
508 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
509 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
512 static int _test_ioctl_set_temp_memory_limit(int fd
, unsigned int limit
)
514 struct iommu_test_cmd memlimit_cmd
= {
515 .size
= sizeof(memlimit_cmd
),
516 .op
= IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
,
517 .memory_limit
= { .limit
= limit
},
520 return ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
),
524 #define test_ioctl_set_temp_memory_limit(limit) \
525 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
527 #define test_ioctl_set_default_memory_limit() \
528 test_ioctl_set_temp_memory_limit(65536)
530 static void teardown_iommufd(int fd
, struct __test_metadata
*_metadata
)
532 struct iommu_test_cmd test_cmd
= {
533 .size
= sizeof(test_cmd
),
534 .op
= IOMMU_TEST_OP_MD_CHECK_REFS
,
535 .check_refs
= { .length
= BUFFER_SIZE
,
536 .uptr
= (uintptr_t)buffer
},
542 EXPECT_EQ(0, close(fd
));
544 fd
= open("/dev/iommu", O_RDWR
);
546 EXPECT_EQ(0, ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS
),
548 EXPECT_EQ(0, close(fd
));
551 #define EXPECT_ERRNO(expected_errno, cmd) \
553 ASSERT_EQ(-1, cmd); \
554 EXPECT_EQ(expected_errno, errno); \
559 /* @data can be NULL */
560 static int _test_cmd_get_hw_info(int fd
, __u32 device_id
, void *data
,
561 size_t data_len
, uint32_t *capabilities
)
563 struct iommu_test_hw_info
*info
= (struct iommu_test_hw_info
*)data
;
564 struct iommu_hw_info cmd
= {
567 .data_len
= data_len
,
568 .data_uptr
= (uint64_t)data
,
569 .out_capabilities
= 0,
573 ret
= ioctl(fd
, IOMMU_GET_HW_INFO
, &cmd
);
577 assert(cmd
.out_data_type
== IOMMU_HW_INFO_TYPE_SELFTEST
);
580 * The struct iommu_test_hw_info should be the one defined
581 * by the current kernel.
583 assert(cmd
.data_len
== sizeof(struct iommu_test_hw_info
));
586 * Trailing bytes should be 0 if user buffer is larger than
587 * the data that kernel reports.
589 if (data_len
> cmd
.data_len
) {
590 char *ptr
= (char *)(data
+ cmd
.data_len
);
593 while (idx
< data_len
- cmd
.data_len
) {
594 assert(!*(ptr
+ idx
));
600 if (data_len
>= offsetofend(struct iommu_test_hw_info
, test_reg
))
601 assert(info
->test_reg
== IOMMU_HW_INFO_SELFTEST_REGVAL
);
602 if (data_len
>= offsetofend(struct iommu_test_hw_info
, flags
))
603 assert(!info
->flags
);
607 *capabilities
= cmd
.out_capabilities
;
612 #define test_cmd_get_hw_info(device_id, data, data_len) \
613 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
616 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
617 EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
620 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
621 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))