]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - lib/test_firmware.c
tipc: purge deferredq list for each grp member in tipc_group_delete
[thirdparty/kernel/linux.git] / lib / test_firmware.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This module provides an interface to trigger and test firmware loading.
4 *
5 * It is designed to be used for basic evaluation of the firmware loading
6 * subsystem (for example when validating firmware verification). It lacks
7 * any extra dependencies, and will not normally be loaded by the system
8 * unless explicitly requested by name.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/completion.h>
17 #include <linux/firmware.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/miscdevice.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26
27 #define TEST_FIRMWARE_NAME "test-firmware.bin"
28 #define TEST_FIRMWARE_NUM_REQS 4
29
30 static DEFINE_MUTEX(test_fw_mutex);
31 static const struct firmware *test_firmware;
32
33 struct test_batched_req {
34 u8 idx;
35 int rc;
36 bool sent;
37 const struct firmware *fw;
38 const char *name;
39 struct completion completion;
40 struct task_struct *task;
41 struct device *dev;
42 };
43
44 /**
45 * test_config - represents configuration for the test for different triggers
46 *
47 * @name: the name of the firmware file to look for
48 * @sync_direct: when the sync trigger is used if this is true
49 * request_firmware_direct() will be used instead.
50 * @send_uevent: whether or not to send a uevent for async requests
51 * @num_requests: number of requests to try per test case. This is trigger
52 * specific.
53 * @reqs: stores all requests information
54 * @read_fw_idx: index of thread from which we want to read firmware results
55 * from through the read_fw trigger.
56 * @test_result: a test may use this to collect the result from the call
57 * of the request_firmware*() calls used in their tests. In order of
58 * priority we always keep first any setup error. If no setup errors were
59 * found then we move on to the first error encountered while running the
60 * API. Note that for async calls this typically will be a successful
61 * result (0) unless of course you've used bogus parameters, or the system
62 * is out of memory. In the async case the callback is expected to do a
63 * bit more homework to figure out what happened, unfortunately the only
64 * information passed today on error is the fact that no firmware was
65 * found so we can only assume -ENOENT on async calls if the firmware is
66 * NULL.
67 *
68 * Errors you can expect:
69 *
70 * API specific:
71 *
72 * 0: success for sync, for async it means request was sent
73 * -EINVAL: invalid parameters or request
74 * -ENOENT: files not found
75 *
76 * System environment:
77 *
78 * -ENOMEM: memory pressure on system
79 * -ENODEV: out of number of devices to test
80 * -EINVAL: an unexpected error has occurred
81 * @req_firmware: if @sync_direct is true this is set to
82 * request_firmware_direct(), otherwise request_firmware()
83 */
84 struct test_config {
85 char *name;
86 bool sync_direct;
87 bool send_uevent;
88 u8 num_requests;
89 u8 read_fw_idx;
90
91 /*
92 * These below don't belong her but we'll move them once we create
93 * a struct fw_test_device and stuff the misc_dev under there later.
94 */
95 struct test_batched_req *reqs;
96 int test_result;
97 int (*req_firmware)(const struct firmware **fw, const char *name,
98 struct device *device);
99 };
100
101 static struct test_config *test_fw_config;
102
103 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
104 size_t size, loff_t *offset)
105 {
106 ssize_t rc = 0;
107
108 mutex_lock(&test_fw_mutex);
109 if (test_firmware)
110 rc = simple_read_from_buffer(buf, size, offset,
111 test_firmware->data,
112 test_firmware->size);
113 mutex_unlock(&test_fw_mutex);
114 return rc;
115 }
116
117 static const struct file_operations test_fw_fops = {
118 .owner = THIS_MODULE,
119 .read = test_fw_misc_read,
120 };
121
122 static void __test_release_all_firmware(void)
123 {
124 struct test_batched_req *req;
125 u8 i;
126
127 if (!test_fw_config->reqs)
128 return;
129
130 for (i = 0; i < test_fw_config->num_requests; i++) {
131 req = &test_fw_config->reqs[i];
132 if (req->fw)
133 release_firmware(req->fw);
134 }
135
136 vfree(test_fw_config->reqs);
137 test_fw_config->reqs = NULL;
138 }
139
140 static void test_release_all_firmware(void)
141 {
142 mutex_lock(&test_fw_mutex);
143 __test_release_all_firmware();
144 mutex_unlock(&test_fw_mutex);
145 }
146
147
148 static void __test_firmware_config_free(void)
149 {
150 __test_release_all_firmware();
151 kfree_const(test_fw_config->name);
152 test_fw_config->name = NULL;
153 }
154
155 /*
156 * XXX: move to kstrncpy() once merged.
157 *
158 * Users should use kfree_const() when freeing these.
159 */
160 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
161 {
162 *dst = kstrndup(name, count, gfp);
163 if (!*dst)
164 return -ENOSPC;
165 return count;
166 }
167
168 static int __test_firmware_config_init(void)
169 {
170 int ret;
171
172 ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
173 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
174 if (ret < 0)
175 goto out;
176
177 test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
178 test_fw_config->send_uevent = true;
179 test_fw_config->sync_direct = false;
180 test_fw_config->req_firmware = request_firmware;
181 test_fw_config->test_result = 0;
182 test_fw_config->reqs = NULL;
183
184 return 0;
185
186 out:
187 __test_firmware_config_free();
188 return ret;
189 }
190
191 static ssize_t reset_store(struct device *dev,
192 struct device_attribute *attr,
193 const char *buf, size_t count)
194 {
195 int ret;
196
197 mutex_lock(&test_fw_mutex);
198
199 __test_firmware_config_free();
200
201 ret = __test_firmware_config_init();
202 if (ret < 0) {
203 ret = -ENOMEM;
204 pr_err("could not alloc settings for config trigger: %d\n",
205 ret);
206 goto out;
207 }
208
209 pr_info("reset\n");
210 ret = count;
211
212 out:
213 mutex_unlock(&test_fw_mutex);
214
215 return ret;
216 }
217 static DEVICE_ATTR_WO(reset);
218
219 static ssize_t config_show(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222 {
223 int len = 0;
224
225 mutex_lock(&test_fw_mutex);
226
227 len += snprintf(buf, PAGE_SIZE,
228 "Custom trigger configuration for: %s\n",
229 dev_name(dev));
230
231 if (test_fw_config->name)
232 len += snprintf(buf+len, PAGE_SIZE,
233 "name:\t%s\n",
234 test_fw_config->name);
235 else
236 len += snprintf(buf+len, PAGE_SIZE,
237 "name:\tEMTPY\n");
238
239 len += snprintf(buf+len, PAGE_SIZE,
240 "num_requests:\t%u\n", test_fw_config->num_requests);
241
242 len += snprintf(buf+len, PAGE_SIZE,
243 "send_uevent:\t\t%s\n",
244 test_fw_config->send_uevent ?
245 "FW_ACTION_HOTPLUG" :
246 "FW_ACTION_NOHOTPLUG");
247 len += snprintf(buf+len, PAGE_SIZE,
248 "sync_direct:\t\t%s\n",
249 test_fw_config->sync_direct ? "true" : "false");
250 len += snprintf(buf+len, PAGE_SIZE,
251 "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
252
253 mutex_unlock(&test_fw_mutex);
254
255 return len;
256 }
257 static DEVICE_ATTR_RO(config);
258
259 static ssize_t config_name_store(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count)
262 {
263 int ret;
264
265 mutex_lock(&test_fw_mutex);
266 kfree_const(test_fw_config->name);
267 ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
268 mutex_unlock(&test_fw_mutex);
269
270 return ret;
271 }
272
273 /*
274 * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
275 */
276 static ssize_t config_test_show_str(char *dst,
277 char *src)
278 {
279 int len;
280
281 mutex_lock(&test_fw_mutex);
282 len = snprintf(dst, PAGE_SIZE, "%s\n", src);
283 mutex_unlock(&test_fw_mutex);
284
285 return len;
286 }
287
288 static int test_dev_config_update_bool(const char *buf, size_t size,
289 bool *cfg)
290 {
291 int ret;
292
293 mutex_lock(&test_fw_mutex);
294 if (strtobool(buf, cfg) < 0)
295 ret = -EINVAL;
296 else
297 ret = size;
298 mutex_unlock(&test_fw_mutex);
299
300 return ret;
301 }
302
303 static ssize_t
304 test_dev_config_show_bool(char *buf,
305 bool config)
306 {
307 bool val;
308
309 mutex_lock(&test_fw_mutex);
310 val = config;
311 mutex_unlock(&test_fw_mutex);
312
313 return snprintf(buf, PAGE_SIZE, "%d\n", val);
314 }
315
316 static ssize_t test_dev_config_show_int(char *buf, int cfg)
317 {
318 int val;
319
320 mutex_lock(&test_fw_mutex);
321 val = cfg;
322 mutex_unlock(&test_fw_mutex);
323
324 return snprintf(buf, PAGE_SIZE, "%d\n", val);
325 }
326
327 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
328 {
329 int ret;
330 long new;
331
332 ret = kstrtol(buf, 10, &new);
333 if (ret)
334 return ret;
335
336 if (new > U8_MAX)
337 return -EINVAL;
338
339 mutex_lock(&test_fw_mutex);
340 *(u8 *)cfg = new;
341 mutex_unlock(&test_fw_mutex);
342
343 /* Always return full write size even if we didn't consume all */
344 return size;
345 }
346
347 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
348 {
349 u8 val;
350
351 mutex_lock(&test_fw_mutex);
352 val = cfg;
353 mutex_unlock(&test_fw_mutex);
354
355 return snprintf(buf, PAGE_SIZE, "%u\n", val);
356 }
357
358 static ssize_t config_name_show(struct device *dev,
359 struct device_attribute *attr,
360 char *buf)
361 {
362 return config_test_show_str(buf, test_fw_config->name);
363 }
364 static DEVICE_ATTR_RW(config_name);
365
366 static ssize_t config_num_requests_store(struct device *dev,
367 struct device_attribute *attr,
368 const char *buf, size_t count)
369 {
370 int rc;
371
372 mutex_lock(&test_fw_mutex);
373 if (test_fw_config->reqs) {
374 pr_err("Must call release_all_firmware prior to changing config\n");
375 rc = -EINVAL;
376 mutex_unlock(&test_fw_mutex);
377 goto out;
378 }
379 mutex_unlock(&test_fw_mutex);
380
381 rc = test_dev_config_update_u8(buf, count,
382 &test_fw_config->num_requests);
383
384 out:
385 return rc;
386 }
387
388 static ssize_t config_num_requests_show(struct device *dev,
389 struct device_attribute *attr,
390 char *buf)
391 {
392 return test_dev_config_show_u8(buf, test_fw_config->num_requests);
393 }
394 static DEVICE_ATTR_RW(config_num_requests);
395
396 static ssize_t config_sync_direct_store(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf, size_t count)
399 {
400 int rc = test_dev_config_update_bool(buf, count,
401 &test_fw_config->sync_direct);
402
403 if (rc == count)
404 test_fw_config->req_firmware = test_fw_config->sync_direct ?
405 request_firmware_direct :
406 request_firmware;
407 return rc;
408 }
409
410 static ssize_t config_sync_direct_show(struct device *dev,
411 struct device_attribute *attr,
412 char *buf)
413 {
414 return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
415 }
416 static DEVICE_ATTR_RW(config_sync_direct);
417
418 static ssize_t config_send_uevent_store(struct device *dev,
419 struct device_attribute *attr,
420 const char *buf, size_t count)
421 {
422 return test_dev_config_update_bool(buf, count,
423 &test_fw_config->send_uevent);
424 }
425
426 static ssize_t config_send_uevent_show(struct device *dev,
427 struct device_attribute *attr,
428 char *buf)
429 {
430 return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
431 }
432 static DEVICE_ATTR_RW(config_send_uevent);
433
434 static ssize_t config_read_fw_idx_store(struct device *dev,
435 struct device_attribute *attr,
436 const char *buf, size_t count)
437 {
438 return test_dev_config_update_u8(buf, count,
439 &test_fw_config->read_fw_idx);
440 }
441
442 static ssize_t config_read_fw_idx_show(struct device *dev,
443 struct device_attribute *attr,
444 char *buf)
445 {
446 return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
447 }
448 static DEVICE_ATTR_RW(config_read_fw_idx);
449
450
451 static ssize_t trigger_request_store(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
454 {
455 int rc;
456 char *name;
457
458 name = kstrndup(buf, count, GFP_KERNEL);
459 if (!name)
460 return -ENOSPC;
461
462 pr_info("loading '%s'\n", name);
463
464 mutex_lock(&test_fw_mutex);
465 release_firmware(test_firmware);
466 test_firmware = NULL;
467 rc = request_firmware(&test_firmware, name, dev);
468 if (rc) {
469 pr_info("load of '%s' failed: %d\n", name, rc);
470 goto out;
471 }
472 pr_info("loaded: %zu\n", test_firmware->size);
473 rc = count;
474
475 out:
476 mutex_unlock(&test_fw_mutex);
477
478 kfree(name);
479
480 return rc;
481 }
482 static DEVICE_ATTR_WO(trigger_request);
483
484 static DECLARE_COMPLETION(async_fw_done);
485
486 static void trigger_async_request_cb(const struct firmware *fw, void *context)
487 {
488 test_firmware = fw;
489 complete(&async_fw_done);
490 }
491
492 static ssize_t trigger_async_request_store(struct device *dev,
493 struct device_attribute *attr,
494 const char *buf, size_t count)
495 {
496 int rc;
497 char *name;
498
499 name = kstrndup(buf, count, GFP_KERNEL);
500 if (!name)
501 return -ENOSPC;
502
503 pr_info("loading '%s'\n", name);
504
505 mutex_lock(&test_fw_mutex);
506 release_firmware(test_firmware);
507 test_firmware = NULL;
508 rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
509 NULL, trigger_async_request_cb);
510 if (rc) {
511 pr_info("async load of '%s' failed: %d\n", name, rc);
512 kfree(name);
513 goto out;
514 }
515 /* Free 'name' ASAP, to test for race conditions */
516 kfree(name);
517
518 wait_for_completion(&async_fw_done);
519
520 if (test_firmware) {
521 pr_info("loaded: %zu\n", test_firmware->size);
522 rc = count;
523 } else {
524 pr_err("failed to async load firmware\n");
525 rc = -ENODEV;
526 }
527
528 out:
529 mutex_unlock(&test_fw_mutex);
530
531 return rc;
532 }
533 static DEVICE_ATTR_WO(trigger_async_request);
534
535 static ssize_t trigger_custom_fallback_store(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t count)
538 {
539 int rc;
540 char *name;
541
542 name = kstrndup(buf, count, GFP_KERNEL);
543 if (!name)
544 return -ENOSPC;
545
546 pr_info("loading '%s' using custom fallback mechanism\n", name);
547
548 mutex_lock(&test_fw_mutex);
549 release_firmware(test_firmware);
550 test_firmware = NULL;
551 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
552 dev, GFP_KERNEL, NULL,
553 trigger_async_request_cb);
554 if (rc) {
555 pr_info("async load of '%s' failed: %d\n", name, rc);
556 kfree(name);
557 goto out;
558 }
559 /* Free 'name' ASAP, to test for race conditions */
560 kfree(name);
561
562 wait_for_completion(&async_fw_done);
563
564 if (test_firmware) {
565 pr_info("loaded: %zu\n", test_firmware->size);
566 rc = count;
567 } else {
568 pr_err("failed to async load firmware\n");
569 rc = -ENODEV;
570 }
571
572 out:
573 mutex_unlock(&test_fw_mutex);
574
575 return rc;
576 }
577 static DEVICE_ATTR_WO(trigger_custom_fallback);
578
579 static int test_fw_run_batch_request(void *data)
580 {
581 struct test_batched_req *req = data;
582
583 if (!req) {
584 test_fw_config->test_result = -EINVAL;
585 return -EINVAL;
586 }
587
588 req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
589 if (req->rc) {
590 pr_info("#%u: batched sync load failed: %d\n",
591 req->idx, req->rc);
592 if (!test_fw_config->test_result)
593 test_fw_config->test_result = req->rc;
594 } else if (req->fw) {
595 req->sent = true;
596 pr_info("#%u: batched sync loaded %zu\n",
597 req->idx, req->fw->size);
598 }
599 complete(&req->completion);
600
601 req->task = NULL;
602
603 return 0;
604 }
605
606 /*
607 * We use a kthread as otherwise the kernel serializes all our sync requests
608 * and we would not be able to mimic batched requests on a sync call. Batched
609 * requests on a sync call can for instance happen on a device driver when
610 * multiple cards are used and firmware loading happens outside of probe.
611 */
612 static ssize_t trigger_batched_requests_store(struct device *dev,
613 struct device_attribute *attr,
614 const char *buf, size_t count)
615 {
616 struct test_batched_req *req;
617 int rc;
618 u8 i;
619
620 mutex_lock(&test_fw_mutex);
621
622 test_fw_config->reqs =
623 vzalloc(array3_size(sizeof(struct test_batched_req),
624 test_fw_config->num_requests, 2));
625 if (!test_fw_config->reqs) {
626 rc = -ENOMEM;
627 goto out_unlock;
628 }
629
630 pr_info("batched sync firmware loading '%s' %u times\n",
631 test_fw_config->name, test_fw_config->num_requests);
632
633 for (i = 0; i < test_fw_config->num_requests; i++) {
634 req = &test_fw_config->reqs[i];
635 req->fw = NULL;
636 req->idx = i;
637 req->name = test_fw_config->name;
638 req->dev = dev;
639 init_completion(&req->completion);
640 req->task = kthread_run(test_fw_run_batch_request, req,
641 "%s-%u", KBUILD_MODNAME, req->idx);
642 if (!req->task || IS_ERR(req->task)) {
643 pr_err("Setting up thread %u failed\n", req->idx);
644 req->task = NULL;
645 rc = -ENOMEM;
646 goto out_bail;
647 }
648 }
649
650 rc = count;
651
652 /*
653 * We require an explicit release to enable more time and delay of
654 * calling release_firmware() to improve our chances of forcing a
655 * batched request. If we instead called release_firmware() right away
656 * then we might miss on an opportunity of having a successful firmware
657 * request pass on the opportunity to be come a batched request.
658 */
659
660 out_bail:
661 for (i = 0; i < test_fw_config->num_requests; i++) {
662 req = &test_fw_config->reqs[i];
663 if (req->task || req->sent)
664 wait_for_completion(&req->completion);
665 }
666
667 /* Override any worker error if we had a general setup error */
668 if (rc < 0)
669 test_fw_config->test_result = rc;
670
671 out_unlock:
672 mutex_unlock(&test_fw_mutex);
673
674 return rc;
675 }
676 static DEVICE_ATTR_WO(trigger_batched_requests);
677
678 /*
679 * We wait for each callback to return with the lock held, no need to lock here
680 */
681 static void trigger_batched_cb(const struct firmware *fw, void *context)
682 {
683 struct test_batched_req *req = context;
684
685 if (!req) {
686 test_fw_config->test_result = -EINVAL;
687 return;
688 }
689
690 /* forces *some* batched requests to queue up */
691 if (!req->idx)
692 ssleep(2);
693
694 req->fw = fw;
695
696 /*
697 * Unfortunately the firmware API gives us nothing other than a null FW
698 * if the firmware was not found on async requests. Best we can do is
699 * just assume -ENOENT. A better API would pass the actual return
700 * value to the callback.
701 */
702 if (!fw && !test_fw_config->test_result)
703 test_fw_config->test_result = -ENOENT;
704
705 complete(&req->completion);
706 }
707
708 static
709 ssize_t trigger_batched_requests_async_store(struct device *dev,
710 struct device_attribute *attr,
711 const char *buf, size_t count)
712 {
713 struct test_batched_req *req;
714 bool send_uevent;
715 int rc;
716 u8 i;
717
718 mutex_lock(&test_fw_mutex);
719
720 test_fw_config->reqs =
721 vzalloc(array3_size(sizeof(struct test_batched_req),
722 test_fw_config->num_requests, 2));
723 if (!test_fw_config->reqs) {
724 rc = -ENOMEM;
725 goto out;
726 }
727
728 pr_info("batched loading '%s' custom fallback mechanism %u times\n",
729 test_fw_config->name, test_fw_config->num_requests);
730
731 send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
732 FW_ACTION_NOHOTPLUG;
733
734 for (i = 0; i < test_fw_config->num_requests; i++) {
735 req = &test_fw_config->reqs[i];
736 req->name = test_fw_config->name;
737 req->fw = NULL;
738 req->idx = i;
739 init_completion(&req->completion);
740 rc = request_firmware_nowait(THIS_MODULE, send_uevent,
741 req->name,
742 dev, GFP_KERNEL, req,
743 trigger_batched_cb);
744 if (rc) {
745 pr_info("#%u: batched async load failed setup: %d\n",
746 i, rc);
747 req->rc = rc;
748 goto out_bail;
749 } else
750 req->sent = true;
751 }
752
753 rc = count;
754
755 out_bail:
756
757 /*
758 * We require an explicit release to enable more time and delay of
759 * calling release_firmware() to improve our chances of forcing a
760 * batched request. If we instead called release_firmware() right away
761 * then we might miss on an opportunity of having a successful firmware
762 * request pass on the opportunity to be come a batched request.
763 */
764
765 for (i = 0; i < test_fw_config->num_requests; i++) {
766 req = &test_fw_config->reqs[i];
767 if (req->sent)
768 wait_for_completion(&req->completion);
769 }
770
771 /* Override any worker error if we had a general setup error */
772 if (rc < 0)
773 test_fw_config->test_result = rc;
774
775 out:
776 mutex_unlock(&test_fw_mutex);
777
778 return rc;
779 }
780 static DEVICE_ATTR_WO(trigger_batched_requests_async);
781
782 static ssize_t test_result_show(struct device *dev,
783 struct device_attribute *attr,
784 char *buf)
785 {
786 return test_dev_config_show_int(buf, test_fw_config->test_result);
787 }
788 static DEVICE_ATTR_RO(test_result);
789
790 static ssize_t release_all_firmware_store(struct device *dev,
791 struct device_attribute *attr,
792 const char *buf, size_t count)
793 {
794 test_release_all_firmware();
795 return count;
796 }
797 static DEVICE_ATTR_WO(release_all_firmware);
798
799 static ssize_t read_firmware_show(struct device *dev,
800 struct device_attribute *attr,
801 char *buf)
802 {
803 struct test_batched_req *req;
804 u8 idx;
805 ssize_t rc = 0;
806
807 mutex_lock(&test_fw_mutex);
808
809 idx = test_fw_config->read_fw_idx;
810 if (idx >= test_fw_config->num_requests) {
811 rc = -ERANGE;
812 goto out;
813 }
814
815 if (!test_fw_config->reqs) {
816 rc = -EINVAL;
817 goto out;
818 }
819
820 req = &test_fw_config->reqs[idx];
821 if (!req->fw) {
822 pr_err("#%u: failed to async load firmware\n", idx);
823 rc = -ENOENT;
824 goto out;
825 }
826
827 pr_info("#%u: loaded %zu\n", idx, req->fw->size);
828
829 if (req->fw->size > PAGE_SIZE) {
830 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
831 rc = -EINVAL;
832 goto out;
833 }
834 memcpy(buf, req->fw->data, req->fw->size);
835
836 rc = req->fw->size;
837 out:
838 mutex_unlock(&test_fw_mutex);
839
840 return rc;
841 }
842 static DEVICE_ATTR_RO(read_firmware);
843
844 #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
845
846 static struct attribute *test_dev_attrs[] = {
847 TEST_FW_DEV_ATTR(reset),
848
849 TEST_FW_DEV_ATTR(config),
850 TEST_FW_DEV_ATTR(config_name),
851 TEST_FW_DEV_ATTR(config_num_requests),
852 TEST_FW_DEV_ATTR(config_sync_direct),
853 TEST_FW_DEV_ATTR(config_send_uevent),
854 TEST_FW_DEV_ATTR(config_read_fw_idx),
855
856 /* These don't use the config at all - they could be ported! */
857 TEST_FW_DEV_ATTR(trigger_request),
858 TEST_FW_DEV_ATTR(trigger_async_request),
859 TEST_FW_DEV_ATTR(trigger_custom_fallback),
860
861 /* These use the config and can use the test_result */
862 TEST_FW_DEV_ATTR(trigger_batched_requests),
863 TEST_FW_DEV_ATTR(trigger_batched_requests_async),
864
865 TEST_FW_DEV_ATTR(release_all_firmware),
866 TEST_FW_DEV_ATTR(test_result),
867 TEST_FW_DEV_ATTR(read_firmware),
868 NULL,
869 };
870
871 ATTRIBUTE_GROUPS(test_dev);
872
873 static struct miscdevice test_fw_misc_device = {
874 .minor = MISC_DYNAMIC_MINOR,
875 .name = "test_firmware",
876 .fops = &test_fw_fops,
877 .groups = test_dev_groups,
878 };
879
880 static int __init test_firmware_init(void)
881 {
882 int rc;
883
884 test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
885 if (!test_fw_config)
886 return -ENOMEM;
887
888 rc = __test_firmware_config_init();
889 if (rc)
890 return rc;
891
892 rc = misc_register(&test_fw_misc_device);
893 if (rc) {
894 kfree(test_fw_config);
895 pr_err("could not register misc device: %d\n", rc);
896 return rc;
897 }
898
899 pr_warn("interface ready\n");
900
901 return 0;
902 }
903
904 module_init(test_firmware_init);
905
906 static void __exit test_firmware_exit(void)
907 {
908 mutex_lock(&test_fw_mutex);
909 release_firmware(test_firmware);
910 misc_deregister(&test_fw_misc_device);
911 __test_firmware_config_free();
912 kfree(test_fw_config);
913 mutex_unlock(&test_fw_mutex);
914
915 pr_warn("removed interface\n");
916 }
917
918 module_exit(test_firmware_exit);
919
920 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
921 MODULE_LICENSE("GPL");