]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/iommu/iommufd_utils.h
KVM: Harden copying of userspace-array against overflow
[thirdparty/kernel/stable.git] / tools / testing / selftests / iommu / iommufd_utils.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
24
25 static inline void set_bit(unsigned int nr, unsigned long *addr)
26 {
27 unsigned long mask = BIT_MASK(nr);
28 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
29
30 *p |= mask;
31 }
32
33 static inline bool test_bit(unsigned int nr, unsigned long *addr)
34 {
35 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
36 }
37
38 static void *buffer;
39 static unsigned long BUFFER_SIZE;
40
41 static unsigned long PAGE_SIZE;
42
43 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
44 #define offsetofend(TYPE, MEMBER) \
45 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
46
47 /*
48 * Have the kernel check the refcount on pages. I don't know why a freshly
49 * mmap'd anon non-compound page starts out with a ref of 3
50 */
51 #define check_refs(_ptr, _length, _refs) \
52 ({ \
53 struct iommu_test_cmd test_cmd = { \
54 .size = sizeof(test_cmd), \
55 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
56 .check_refs = { .length = _length, \
57 .uptr = (uintptr_t)(_ptr), \
58 .refs = _refs }, \
59 }; \
60 ASSERT_EQ(0, \
61 ioctl(self->fd, \
62 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
63 &test_cmd)); \
64 })
65
66 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
67 __u32 *hwpt_id, __u32 *idev_id)
68 {
69 struct iommu_test_cmd cmd = {
70 .size = sizeof(cmd),
71 .op = IOMMU_TEST_OP_MOCK_DOMAIN,
72 .id = ioas_id,
73 .mock_domain = {},
74 };
75 int ret;
76
77 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
78 if (ret)
79 return ret;
80 if (stdev_id)
81 *stdev_id = cmd.mock_domain.out_stdev_id;
82 assert(cmd.id != 0);
83 if (hwpt_id)
84 *hwpt_id = cmd.mock_domain.out_hwpt_id;
85 if (idev_id)
86 *idev_id = cmd.mock_domain.out_idev_id;
87 return 0;
88 }
89 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
90 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
91 hwpt_id, idev_id))
92 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
93 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
94 stdev_id, hwpt_id, NULL))
95
96 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
97 __u32 stdev_flags, __u32 *stdev_id,
98 __u32 *hwpt_id, __u32 *idev_id)
99 {
100 struct iommu_test_cmd cmd = {
101 .size = sizeof(cmd),
102 .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
103 .id = ioas_id,
104 .mock_domain_flags = { .dev_flags = stdev_flags },
105 };
106 int ret;
107
108 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
109 if (ret)
110 return ret;
111 if (stdev_id)
112 *stdev_id = cmd.mock_domain_flags.out_stdev_id;
113 assert(cmd.id != 0);
114 if (hwpt_id)
115 *hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
116 if (idev_id)
117 *idev_id = cmd.mock_domain_flags.out_idev_id;
118 return 0;
119 }
120 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
121 ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
122 stdev_id, hwpt_id, idev_id))
123 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
124 EXPECT_ERRNO(_errno, \
125 _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
126 stdev_id, hwpt_id, NULL))
127
128 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
129 __u32 *hwpt_id)
130 {
131 struct iommu_test_cmd cmd = {
132 .size = sizeof(cmd),
133 .op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
134 .id = stdev_id,
135 .mock_domain_replace = {
136 .pt_id = pt_id,
137 },
138 };
139 int ret;
140
141 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
142 if (ret)
143 return ret;
144 if (hwpt_id)
145 *hwpt_id = cmd.mock_domain_replace.pt_id;
146 return 0;
147 }
148
149 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
150 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
151 NULL))
152 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
153 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
154 pt_id, NULL))
155
156 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
157 __u32 flags, __u32 *hwpt_id, __u32 data_type,
158 void *data, size_t data_len)
159 {
160 struct iommu_hwpt_alloc cmd = {
161 .size = sizeof(cmd),
162 .flags = flags,
163 .dev_id = device_id,
164 .pt_id = pt_id,
165 .data_type = data_type,
166 .data_len = data_len,
167 .data_uptr = (uint64_t)data,
168 };
169 int ret;
170
171 ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
172 if (ret)
173 return ret;
174 if (hwpt_id)
175 *hwpt_id = cmd.out_hwpt_id;
176 return 0;
177 }
178
179 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
180 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
181 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
182 0))
183 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
184 EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
185 self->fd, device_id, pt_id, flags, \
186 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
187
188 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
189 data_type, data, data_len) \
190 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
191 hwpt_id, data_type, data, data_len))
192 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
193 data_type, data, data_len) \
194 EXPECT_ERRNO(_errno, \
195 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
196 hwpt_id, data_type, data, data_len))
197
198 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
199 unsigned int ioas_id)
200 {
201 struct iommu_test_cmd cmd = {
202 .size = sizeof(cmd),
203 .op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
204 .id = access_id,
205 .access_replace_ioas = { .ioas_id = ioas_id },
206 };
207 int ret;
208
209 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
210 if (ret)
211 return ret;
212 return 0;
213 }
214 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
215 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
216
217 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
218 {
219 struct iommu_hwpt_set_dirty_tracking cmd = {
220 .size = sizeof(cmd),
221 .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
222 .hwpt_id = hwpt_id,
223 };
224 int ret;
225
226 ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
227 if (ret)
228 return -errno;
229 return 0;
230 }
231 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
232 ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
233
234 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
235 __u64 iova, size_t page_size,
236 __u64 *bitmap, __u32 flags)
237 {
238 struct iommu_hwpt_get_dirty_bitmap cmd = {
239 .size = sizeof(cmd),
240 .hwpt_id = hwpt_id,
241 .flags = flags,
242 .iova = iova,
243 .length = length,
244 .page_size = page_size,
245 .data = (uintptr_t)bitmap,
246 };
247 int ret;
248
249 ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
250 if (ret)
251 return ret;
252 return 0;
253 }
254
255 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
256 bitmap, flags) \
257 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
258 page_size, bitmap, flags))
259
260 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
261 __u64 iova, size_t page_size,
262 __u64 *bitmap, __u64 *dirty)
263 {
264 struct iommu_test_cmd cmd = {
265 .size = sizeof(cmd),
266 .op = IOMMU_TEST_OP_DIRTY,
267 .id = hwpt_id,
268 .dirty = {
269 .iova = iova,
270 .length = length,
271 .page_size = page_size,
272 .uptr = (uintptr_t)bitmap,
273 }
274 };
275 int ret;
276
277 ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
278 if (ret)
279 return -ret;
280 if (dirty)
281 *dirty = cmd.dirty.out_nr_dirty;
282 return 0;
283 }
284
285 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
286 bitmap, nr) \
287 ASSERT_EQ(0, \
288 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
289 page_size, bitmap, nr))
290
291 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
292 __u64 iova, size_t page_size, __u64 *bitmap,
293 __u64 bitmap_size, __u32 flags,
294 struct __test_metadata *_metadata)
295 {
296 unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE;
297 unsigned long nr = nbits / 2;
298 __u64 out_dirty = 0;
299
300 /* Mark all even bits as dirty in the mock domain */
301 for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
302 if (!(i % 2))
303 set_bit(i, (unsigned long *)bitmap);
304 ASSERT_EQ(nr, count);
305
306 test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
307 bitmap, &out_dirty);
308 ASSERT_EQ(nr, out_dirty);
309
310 /* Expect all even bits as dirty in the user bitmap */
311 memset(bitmap, 0, bitmap_size);
312 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
313 flags);
314 for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
315 ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap));
316 ASSERT_EQ(count, out_dirty);
317
318 memset(bitmap, 0, bitmap_size);
319 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
320 flags);
321
322 /* It as read already -- expect all zeroes */
323 for (i = 0; i < nbits; i++) {
324 ASSERT_EQ(!(i % 2) && (flags &
325 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
326 test_bit(i, (unsigned long *)bitmap));
327 }
328
329 return 0;
330 }
331 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \
332 bitmap_size, flags, _metadata) \
333 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
334 page_size, bitmap, bitmap_size, \
335 flags, _metadata))
336
337 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
338 __u32 *access_id, unsigned int flags)
339 {
340 struct iommu_test_cmd cmd = {
341 .size = sizeof(cmd),
342 .op = IOMMU_TEST_OP_CREATE_ACCESS,
343 .id = ioas_id,
344 .create_access = { .flags = flags },
345 };
346 int ret;
347
348 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
349 if (ret)
350 return ret;
351 *access_id = cmd.create_access.out_access_fd;
352 return 0;
353 }
354 #define test_cmd_create_access(ioas_id, access_id, flags) \
355 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
356 flags))
357
358 static int _test_cmd_destroy_access(unsigned int access_id)
359 {
360 return close(access_id);
361 }
362 #define test_cmd_destroy_access(access_id) \
363 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
364
365 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
366 unsigned int access_pages_id)
367 {
368 struct iommu_test_cmd cmd = {
369 .size = sizeof(cmd),
370 .op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
371 .id = access_id,
372 .destroy_access_pages = { .access_pages_id = access_pages_id },
373 };
374 return ioctl(fd, IOMMU_TEST_CMD, &cmd);
375 }
376 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
377 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
378 access_pages_id))
379 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
380 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
381 self->fd, access_id, access_pages_id))
382
383 static int _test_ioctl_destroy(int fd, unsigned int id)
384 {
385 struct iommu_destroy cmd = {
386 .size = sizeof(cmd),
387 .id = id,
388 };
389 return ioctl(fd, IOMMU_DESTROY, &cmd);
390 }
391 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
392
393 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
394 {
395 struct iommu_ioas_alloc cmd = {
396 .size = sizeof(cmd),
397 };
398 int ret;
399
400 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
401 if (ret)
402 return ret;
403 *id = cmd.out_ioas_id;
404 return 0;
405 }
406 #define test_ioctl_ioas_alloc(id) \
407 ({ \
408 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
409 ASSERT_NE(0, *(id)); \
410 })
411
412 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
413 size_t length, __u64 *iova, unsigned int flags)
414 {
415 struct iommu_ioas_map cmd = {
416 .size = sizeof(cmd),
417 .flags = flags,
418 .ioas_id = ioas_id,
419 .user_va = (uintptr_t)buffer,
420 .length = length,
421 };
422 int ret;
423
424 if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
425 cmd.iova = *iova;
426
427 ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
428 *iova = cmd.iova;
429 return ret;
430 }
431 #define test_ioctl_ioas_map(buffer, length, iova_p) \
432 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
433 length, iova_p, \
434 IOMMU_IOAS_MAP_WRITEABLE | \
435 IOMMU_IOAS_MAP_READABLE))
436
437 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
438 EXPECT_ERRNO(_errno, \
439 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
440 length, iova_p, \
441 IOMMU_IOAS_MAP_WRITEABLE | \
442 IOMMU_IOAS_MAP_READABLE))
443
444 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
445 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
446 iova_p, \
447 IOMMU_IOAS_MAP_WRITEABLE | \
448 IOMMU_IOAS_MAP_READABLE))
449
450 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
451 ({ \
452 __u64 __iova = iova; \
453 ASSERT_EQ(0, _test_ioctl_ioas_map( \
454 self->fd, self->ioas_id, buffer, length, \
455 &__iova, \
456 IOMMU_IOAS_MAP_FIXED_IOVA | \
457 IOMMU_IOAS_MAP_WRITEABLE | \
458 IOMMU_IOAS_MAP_READABLE)); \
459 })
460
461 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
462 ({ \
463 __u64 __iova = iova; \
464 ASSERT_EQ(0, \
465 _test_ioctl_ioas_map( \
466 self->fd, ioas_id, buffer, length, &__iova, \
467 IOMMU_IOAS_MAP_FIXED_IOVA | \
468 IOMMU_IOAS_MAP_WRITEABLE | \
469 IOMMU_IOAS_MAP_READABLE)); \
470 })
471
472 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
473 ({ \
474 __u64 __iova = iova; \
475 EXPECT_ERRNO(_errno, \
476 _test_ioctl_ioas_map( \
477 self->fd, self->ioas_id, buffer, length, \
478 &__iova, \
479 IOMMU_IOAS_MAP_FIXED_IOVA | \
480 IOMMU_IOAS_MAP_WRITEABLE | \
481 IOMMU_IOAS_MAP_READABLE)); \
482 })
483
484 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
485 size_t length, uint64_t *out_len)
486 {
487 struct iommu_ioas_unmap cmd = {
488 .size = sizeof(cmd),
489 .ioas_id = ioas_id,
490 .iova = iova,
491 .length = length,
492 };
493 int ret;
494
495 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
496 if (out_len)
497 *out_len = cmd.length;
498 return ret;
499 }
500 #define test_ioctl_ioas_unmap(iova, length) \
501 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
502 length, NULL))
503
504 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
505 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
506 NULL))
507
508 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
509 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
510 iova, length, NULL))
511
512 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
513 {
514 struct iommu_test_cmd memlimit_cmd = {
515 .size = sizeof(memlimit_cmd),
516 .op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
517 .memory_limit = { .limit = limit },
518 };
519
520 return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
521 &memlimit_cmd);
522 }
523
524 #define test_ioctl_set_temp_memory_limit(limit) \
525 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
526
527 #define test_ioctl_set_default_memory_limit() \
528 test_ioctl_set_temp_memory_limit(65536)
529
530 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
531 {
532 struct iommu_test_cmd test_cmd = {
533 .size = sizeof(test_cmd),
534 .op = IOMMU_TEST_OP_MD_CHECK_REFS,
535 .check_refs = { .length = BUFFER_SIZE,
536 .uptr = (uintptr_t)buffer },
537 };
538
539 if (fd == -1)
540 return;
541
542 EXPECT_EQ(0, close(fd));
543
544 fd = open("/dev/iommu", O_RDWR);
545 EXPECT_NE(-1, fd);
546 EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
547 &test_cmd));
548 EXPECT_EQ(0, close(fd));
549 }
550
551 #define EXPECT_ERRNO(expected_errno, cmd) \
552 ({ \
553 ASSERT_EQ(-1, cmd); \
554 EXPECT_EQ(expected_errno, errno); \
555 })
556
557 #endif
558
559 /* @data can be NULL */
560 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
561 size_t data_len, uint32_t *capabilities)
562 {
563 struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
564 struct iommu_hw_info cmd = {
565 .size = sizeof(cmd),
566 .dev_id = device_id,
567 .data_len = data_len,
568 .data_uptr = (uint64_t)data,
569 .out_capabilities = 0,
570 };
571 int ret;
572
573 ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
574 if (ret)
575 return ret;
576
577 assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
578
579 /*
580 * The struct iommu_test_hw_info should be the one defined
581 * by the current kernel.
582 */
583 assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
584
585 /*
586 * Trailing bytes should be 0 if user buffer is larger than
587 * the data that kernel reports.
588 */
589 if (data_len > cmd.data_len) {
590 char *ptr = (char *)(data + cmd.data_len);
591 int idx = 0;
592
593 while (idx < data_len - cmd.data_len) {
594 assert(!*(ptr + idx));
595 idx++;
596 }
597 }
598
599 if (info) {
600 if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
601 assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
602 if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
603 assert(!info->flags);
604 }
605
606 if (capabilities)
607 *capabilities = cmd.out_capabilities;
608
609 return 0;
610 }
611
612 #define test_cmd_get_hw_info(device_id, data, data_len) \
613 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
614 data_len, NULL))
615
616 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
617 EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
618 data_len, NULL))
619
620 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
621 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))