1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based performance-test facility
5 * Copyright (C) IBM Corporation, 2015
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
10 #define pr_fmt(fmt) fmt
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
47 #define PERF_FLAG "-perf:"
48 #define PERFOUT_STRING(s) \
49 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
50 #define VERBOSE_PERFOUT_STRING(s) \
51 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
52 #define VERBOSE_PERFOUT_ERRSTRING(s) \
53 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
56 * The intended use cases for the nreaders and nwriters module parameters
59 * 1. Specify only the nr_cpus kernel boot parameter. This will
60 * set both nreaders and nwriters to the value specified by
61 * nr_cpus for a mixed reader/writer test.
63 * 2. Specify the nr_cpus kernel boot parameter, but set
64 * rcuperf.nreaders to zero. This will set nwriters to the
65 * value specified by nr_cpus for an update-only test.
67 * 3. Specify the nr_cpus kernel boot parameter, but set
68 * rcuperf.nwriters to zero. This will set nreaders to the
69 * value specified by nr_cpus for a read-only test.
71 * Various other use cases may of course be specified.
75 # define RCUPERF_SHUTDOWN 0
77 # define RCUPERF_SHUTDOWN 1
80 torture_param(bool, gp_async
, false, "Use asynchronous GP wait primitives");
81 torture_param(int, gp_async_max
, 1000, "Max # outstanding waits per reader");
82 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
83 torture_param(int, holdoff
, 10, "Holdoff time before test start (s)");
84 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
85 torture_param(int, nwriters
, -1, "Number of RCU updater threads");
86 torture_param(bool, shutdown
, RCUPERF_SHUTDOWN
,
87 "Shutdown at end of performance tests.");
88 torture_param(int, verbose
, 1, "Enable verbose debugging printk()s");
89 torture_param(int, writer_holdoff
, 0, "Holdoff (us) between GPs, zero to disable");
90 torture_param(int, kfree_rcu_test
, 0, "Do we run a kfree_rcu() perf test?");
92 static char *perf_type
= "rcu";
93 module_param(perf_type
, charp
, 0444);
94 MODULE_PARM_DESC(perf_type
, "Type of RCU to performance-test (rcu, srcu, ...)");
96 static int nrealreaders
;
97 static int nrealwriters
;
98 static struct task_struct
**writer_tasks
;
99 static struct task_struct
**reader_tasks
;
100 static struct task_struct
*shutdown_task
;
102 static u64
**writer_durations
;
103 static int *writer_n_durations
;
104 static atomic_t n_rcu_perf_reader_started
;
105 static atomic_t n_rcu_perf_writer_started
;
106 static atomic_t n_rcu_perf_writer_finished
;
107 static wait_queue_head_t shutdown_wq
;
108 static u64 t_rcu_perf_writer_started
;
109 static u64 t_rcu_perf_writer_finished
;
110 static unsigned long b_rcu_gp_test_started
;
111 static unsigned long b_rcu_gp_test_finished
;
112 static DEFINE_PER_CPU(atomic_t
, n_async_inflight
);
114 #define MAX_MEAS 10000
118 * Operations vector for selecting different types of tests.
121 struct rcu_perf_ops
{
124 void (*cleanup
)(void);
125 int (*readlock
)(void);
126 void (*readunlock
)(int idx
);
127 unsigned long (*get_gp_seq
)(void);
128 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
129 unsigned long (*exp_completed
)(void);
130 void (*async
)(struct rcu_head
*head
, rcu_callback_t func
);
131 void (*gp_barrier
)(void);
133 void (*exp_sync
)(void);
137 static struct rcu_perf_ops
*cur_ops
;
140 * Definitions for rcu perf testing.
143 static int rcu_perf_read_lock(void) __acquires(RCU
)
149 static void rcu_perf_read_unlock(int idx
) __releases(RCU
)
154 static unsigned long __maybe_unused
rcu_no_completed(void)
159 static void rcu_sync_perf_init(void)
163 static struct rcu_perf_ops rcu_ops
= {
165 .init
= rcu_sync_perf_init
,
166 .readlock
= rcu_perf_read_lock
,
167 .readunlock
= rcu_perf_read_unlock
,
168 .get_gp_seq
= rcu_get_gp_seq
,
169 .gp_diff
= rcu_seq_diff
,
170 .exp_completed
= rcu_exp_batches_completed
,
172 .gp_barrier
= rcu_barrier
,
173 .sync
= synchronize_rcu
,
174 .exp_sync
= synchronize_rcu_expedited
,
179 * Definitions for srcu perf testing.
182 DEFINE_STATIC_SRCU(srcu_ctl_perf
);
183 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl_perf
;
185 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp
)
187 return srcu_read_lock(srcu_ctlp
);
190 static void srcu_perf_read_unlock(int idx
) __releases(srcu_ctlp
)
192 srcu_read_unlock(srcu_ctlp
, idx
);
195 static unsigned long srcu_perf_completed(void)
197 return srcu_batches_completed(srcu_ctlp
);
200 static void srcu_call_rcu(struct rcu_head
*head
, rcu_callback_t func
)
202 call_srcu(srcu_ctlp
, head
, func
);
205 static void srcu_rcu_barrier(void)
207 srcu_barrier(srcu_ctlp
);
210 static void srcu_perf_synchronize(void)
212 synchronize_srcu(srcu_ctlp
);
215 static void srcu_perf_synchronize_expedited(void)
217 synchronize_srcu_expedited(srcu_ctlp
);
220 static struct rcu_perf_ops srcu_ops
= {
221 .ptype
= SRCU_FLAVOR
,
222 .init
= rcu_sync_perf_init
,
223 .readlock
= srcu_perf_read_lock
,
224 .readunlock
= srcu_perf_read_unlock
,
225 .get_gp_seq
= srcu_perf_completed
,
226 .gp_diff
= rcu_seq_diff
,
227 .exp_completed
= srcu_perf_completed
,
228 .async
= srcu_call_rcu
,
229 .gp_barrier
= srcu_rcu_barrier
,
230 .sync
= srcu_perf_synchronize
,
231 .exp_sync
= srcu_perf_synchronize_expedited
,
235 static struct srcu_struct srcud
;
237 static void srcu_sync_perf_init(void)
240 init_srcu_struct(srcu_ctlp
);
243 static void srcu_sync_perf_cleanup(void)
245 cleanup_srcu_struct(srcu_ctlp
);
248 static struct rcu_perf_ops srcud_ops
= {
249 .ptype
= SRCU_FLAVOR
,
250 .init
= srcu_sync_perf_init
,
251 .cleanup
= srcu_sync_perf_cleanup
,
252 .readlock
= srcu_perf_read_lock
,
253 .readunlock
= srcu_perf_read_unlock
,
254 .get_gp_seq
= srcu_perf_completed
,
255 .gp_diff
= rcu_seq_diff
,
256 .exp_completed
= srcu_perf_completed
,
257 .async
= srcu_call_rcu
,
258 .gp_barrier
= srcu_rcu_barrier
,
259 .sync
= srcu_perf_synchronize
,
260 .exp_sync
= srcu_perf_synchronize_expedited
,
265 * Definitions for RCU-tasks perf testing.
268 static int tasks_perf_read_lock(void)
273 static void tasks_perf_read_unlock(int idx
)
277 static struct rcu_perf_ops tasks_ops
= {
278 .ptype
= RCU_TASKS_FLAVOR
,
279 .init
= rcu_sync_perf_init
,
280 .readlock
= tasks_perf_read_lock
,
281 .readunlock
= tasks_perf_read_unlock
,
282 .get_gp_seq
= rcu_no_completed
,
283 .gp_diff
= rcu_seq_diff
,
284 .async
= call_rcu_tasks
,
285 .gp_barrier
= rcu_barrier_tasks
,
286 .sync
= synchronize_rcu_tasks
,
287 .exp_sync
= synchronize_rcu_tasks
,
291 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old
)
293 if (!cur_ops
->gp_diff
)
295 return cur_ops
->gp_diff(new, old
);
299 * If performance tests complete, wait for shutdown to commence.
301 static void rcu_perf_wait_shutdown(void)
303 cond_resched_tasks_rcu_qs();
304 if (atomic_read(&n_rcu_perf_writer_finished
) < nrealwriters
)
306 while (!torture_must_stop())
307 schedule_timeout_uninterruptible(1);
311 * RCU perf reader kthread. Repeatedly does empty RCU read-side
312 * critical section, minimizing update-side interference.
315 rcu_perf_reader(void *arg
)
321 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
322 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
323 set_user_nice(current
, MAX_NICE
);
324 atomic_inc(&n_rcu_perf_reader_started
);
327 local_irq_save(flags
);
328 idx
= cur_ops
->readlock();
329 cur_ops
->readunlock(idx
);
330 local_irq_restore(flags
);
331 rcu_perf_wait_shutdown();
332 } while (!torture_must_stop());
333 torture_kthread_stopping("rcu_perf_reader");
338 * Callback function for asynchronous grace periods from rcu_perf_writer().
340 static void rcu_perf_async_cb(struct rcu_head
*rhp
)
342 atomic_dec(this_cpu_ptr(&n_async_inflight
));
347 * RCU perf writer kthread. Repeatedly does a grace period.
350 rcu_perf_writer(void *arg
)
355 struct rcu_head
*rhp
= NULL
;
356 struct sched_param sp
;
357 bool started
= false, done
= false, alldone
= false;
360 u64
*wdpp
= writer_durations
[me
];
362 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
364 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
365 sp
.sched_priority
= 1;
366 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
369 schedule_timeout_uninterruptible(holdoff
* HZ
);
372 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
373 * so that RCU is not always expedited for normal GP tests.
374 * The system_state test is approximate, but works well in practice.
376 while (!gp_exp
&& system_state
!= SYSTEM_RUNNING
)
377 schedule_timeout_uninterruptible(1);
379 t
= ktime_get_mono_fast_ns();
380 if (atomic_inc_return(&n_rcu_perf_writer_started
) >= nrealwriters
) {
381 t_rcu_perf_writer_started
= t
;
383 b_rcu_gp_test_started
=
384 cur_ops
->exp_completed() / 2;
386 b_rcu_gp_test_started
= cur_ops
->get_gp_seq();
392 udelay(writer_holdoff
);
394 *wdp
= ktime_get_mono_fast_ns();
398 rhp
= kmalloc(sizeof(*rhp
), GFP_KERNEL
);
399 if (rhp
&& atomic_read(this_cpu_ptr(&n_async_inflight
)) < gp_async_max
) {
400 atomic_inc(this_cpu_ptr(&n_async_inflight
));
401 cur_ops
->async(rhp
, rcu_perf_async_cb
);
403 } else if (!kthread_should_stop()) {
404 cur_ops
->gp_barrier();
407 kfree(rhp
); /* Because we are stopping. */
414 t
= ktime_get_mono_fast_ns();
418 atomic_read(&n_rcu_perf_writer_started
) >= nrealwriters
)
420 if (!done
&& i
>= MIN_MEAS
) {
422 sp
.sched_priority
= 0;
423 sched_setscheduler_nocheck(current
,
425 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
426 perf_type
, PERF_FLAG
, me
, MIN_MEAS
);
427 if (atomic_inc_return(&n_rcu_perf_writer_finished
) >=
429 schedule_timeout_interruptible(10);
430 rcu_ftrace_dump(DUMP_ALL
);
431 PERFOUT_STRING("Test complete");
432 t_rcu_perf_writer_finished
= t
;
434 b_rcu_gp_test_finished
=
435 cur_ops
->exp_completed() / 2;
437 b_rcu_gp_test_finished
=
438 cur_ops
->get_gp_seq();
441 smp_mb(); /* Assign before wake. */
442 wake_up(&shutdown_wq
);
446 if (done
&& !alldone
&&
447 atomic_read(&n_rcu_perf_writer_finished
) >= nrealwriters
)
449 if (started
&& !alldone
&& i
< MAX_MEAS
- 1)
451 rcu_perf_wait_shutdown();
452 } while (!torture_must_stop());
454 cur_ops
->gp_barrier();
456 writer_n_durations
[me
] = i_max
;
457 torture_kthread_stopping("rcu_perf_writer");
462 rcu_perf_print_module_parms(struct rcu_perf_ops
*cur_ops
, const char *tag
)
464 pr_alert("%s" PERF_FLAG
465 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
466 perf_type
, tag
, nrealreaders
, nrealwriters
, verbose
, shutdown
);
470 rcu_perf_cleanup(void)
479 * Would like warning at start, but everything is expedited
480 * during the mid-boot phase, so have to wait till the end.
482 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp
)
483 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
484 if (rcu_gp_is_normal() && gp_exp
)
485 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
486 if (gp_exp
&& gp_async
)
487 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
489 if (torture_cleanup_begin())
492 torture_cleanup_end();
497 for (i
= 0; i
< nrealreaders
; i
++)
498 torture_stop_kthread(rcu_perf_reader
,
504 for (i
= 0; i
< nrealwriters
; i
++) {
505 torture_stop_kthread(rcu_perf_writer
,
507 if (!writer_n_durations
)
509 j
= writer_n_durations
[i
];
510 pr_alert("%s%s writer %d gps: %d\n",
511 perf_type
, PERF_FLAG
, i
, j
);
514 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
515 perf_type
, PERF_FLAG
,
516 t_rcu_perf_writer_started
, t_rcu_perf_writer_finished
,
517 t_rcu_perf_writer_finished
-
518 t_rcu_perf_writer_started
,
520 rcuperf_seq_diff(b_rcu_gp_test_finished
,
521 b_rcu_gp_test_started
));
522 for (i
= 0; i
< nrealwriters
; i
++) {
523 if (!writer_durations
)
525 if (!writer_n_durations
)
527 wdpp
= writer_durations
[i
];
530 for (j
= 0; j
<= writer_n_durations
[i
]; j
++) {
532 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
533 perf_type
, PERF_FLAG
,
536 schedule_timeout_uninterruptible(1);
538 kfree(writer_durations
[i
]);
541 kfree(writer_durations
);
542 kfree(writer_n_durations
);
545 /* Do torture-type-specific cleanup operations. */
546 if (cur_ops
->cleanup
!= NULL
)
549 torture_cleanup_end();
553 * Return the number if non-negative. If -1, the number of CPUs.
554 * If less than -1, that much less than the number of CPUs, but
557 static int compute_real(int n
)
564 nr
= num_online_cpus() + 1 + n
;
572 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
576 rcu_perf_shutdown(void *arg
)
579 wait_event(shutdown_wq
,
580 atomic_read(&n_rcu_perf_writer_finished
) >=
582 } while (atomic_read(&n_rcu_perf_writer_finished
) < nrealwriters
);
583 smp_mb(); /* Wake before output. */
590 * kfree_rcu() performance tests: Start a kfree_rcu() loop on all CPUs for number
591 * of iterations and measure total time and number of GP for all iterations to complete.
594 torture_param(int, kfree_nthreads
, -1, "Number of threads running loops of kfree_rcu().");
595 torture_param(int, kfree_alloc_num
, 8000, "Number of allocations and frees done in an iteration.");
596 torture_param(int, kfree_loops
, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
598 static struct task_struct
**kfree_reader_tasks
;
599 static int kfree_nrealthreads
;
600 static atomic_t n_kfree_perf_thread_started
;
601 static atomic_t n_kfree_perf_thread_ended
;
609 kfree_perf_thread(void *arg
)
613 struct kfree_obj
*alloc_ptr
;
614 u64 start_time
, end_time
;
615 long long mem_begin
, mem_during
= 0;
617 VERBOSE_PERFOUT_STRING("kfree_perf_thread task started");
618 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
619 set_user_nice(current
, MAX_NICE
);
621 start_time
= ktime_get_mono_fast_ns();
623 if (atomic_inc_return(&n_kfree_perf_thread_started
) >= kfree_nrealthreads
) {
625 b_rcu_gp_test_started
= cur_ops
->exp_completed() / 2;
627 b_rcu_gp_test_started
= cur_ops
->get_gp_seq();
632 mem_during
= mem_begin
= si_mem_available();
633 } else if (loop
% (kfree_loops
/ 4) == 0) {
634 mem_during
= (mem_during
+ si_mem_available()) / 2;
637 for (i
= 0; i
< kfree_alloc_num
; i
++) {
638 alloc_ptr
= kmalloc(sizeof(struct kfree_obj
), GFP_KERNEL
);
642 kfree_rcu(alloc_ptr
, rh
);
646 } while (!torture_must_stop() && ++loop
< kfree_loops
);
648 if (atomic_inc_return(&n_kfree_perf_thread_ended
) >= kfree_nrealthreads
) {
649 end_time
= ktime_get_mono_fast_ns();
652 b_rcu_gp_test_finished
= cur_ops
->exp_completed() / 2;
654 b_rcu_gp_test_finished
= cur_ops
->get_gp_seq();
656 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
657 (unsigned long long)(end_time
- start_time
), kfree_loops
,
658 rcuperf_seq_diff(b_rcu_gp_test_finished
, b_rcu_gp_test_started
),
659 (mem_begin
- mem_during
) >> (20 - PAGE_SHIFT
));
662 smp_mb(); /* Assign before wake. */
663 wake_up(&shutdown_wq
);
667 torture_kthread_stopping("kfree_perf_thread");
672 kfree_perf_cleanup(void)
676 if (torture_cleanup_begin())
679 if (kfree_reader_tasks
) {
680 for (i
= 0; i
< kfree_nrealthreads
; i
++)
681 torture_stop_kthread(kfree_perf_thread
,
682 kfree_reader_tasks
[i
]);
683 kfree(kfree_reader_tasks
);
686 torture_cleanup_end();
690 * shutdown kthread. Just waits to be awakened, then shuts down system.
693 kfree_perf_shutdown(void *arg
)
696 wait_event(shutdown_wq
,
697 atomic_read(&n_kfree_perf_thread_ended
) >=
699 } while (atomic_read(&n_kfree_perf_thread_ended
) < kfree_nrealthreads
);
701 smp_mb(); /* Wake before output. */
703 kfree_perf_cleanup();
709 kfree_perf_init(void)
714 kfree_nrealthreads
= compute_real(kfree_nthreads
);
715 /* Start up the kthreads. */
717 init_waitqueue_head(&shutdown_wq
);
718 firsterr
= torture_create_kthread(kfree_perf_shutdown
, NULL
,
722 schedule_timeout_uninterruptible(1);
725 kfree_reader_tasks
= kcalloc(kfree_nrealthreads
, sizeof(kfree_reader_tasks
[0]),
727 if (kfree_reader_tasks
== NULL
) {
732 for (i
= 0; i
< kfree_nrealthreads
; i
++) {
733 firsterr
= torture_create_kthread(kfree_perf_thread
, (void *)i
,
734 kfree_reader_tasks
[i
]);
739 while (atomic_read(&n_kfree_perf_thread_started
) < kfree_nrealthreads
)
740 schedule_timeout_uninterruptible(1);
747 kfree_perf_cleanup();
756 static struct rcu_perf_ops
*perf_ops
[] = {
757 &rcu_ops
, &srcu_ops
, &srcud_ops
, &tasks_ops
,
760 if (!torture_init_begin(perf_type
, verbose
))
763 /* Process args and tell the world that the perf'er is on the job. */
764 for (i
= 0; i
< ARRAY_SIZE(perf_ops
); i
++) {
765 cur_ops
= perf_ops
[i
];
766 if (strcmp(perf_type
, cur_ops
->name
) == 0)
769 if (i
== ARRAY_SIZE(perf_ops
)) {
770 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type
);
771 pr_alert("rcu-perf types:");
772 for (i
= 0; i
< ARRAY_SIZE(perf_ops
); i
++)
773 pr_cont(" %s", perf_ops
[i
]->name
);
775 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST
));
784 return kfree_perf_init();
786 nrealwriters
= compute_real(nwriters
);
787 nrealreaders
= compute_real(nreaders
);
788 atomic_set(&n_rcu_perf_reader_started
, 0);
789 atomic_set(&n_rcu_perf_writer_started
, 0);
790 atomic_set(&n_rcu_perf_writer_finished
, 0);
791 rcu_perf_print_module_parms(cur_ops
, "Start of test");
793 /* Start up the kthreads. */
796 init_waitqueue_head(&shutdown_wq
);
797 firsterr
= torture_create_kthread(rcu_perf_shutdown
, NULL
,
801 schedule_timeout_uninterruptible(1);
803 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
805 if (reader_tasks
== NULL
) {
806 VERBOSE_PERFOUT_ERRSTRING("out of memory");
810 for (i
= 0; i
< nrealreaders
; i
++) {
811 firsterr
= torture_create_kthread(rcu_perf_reader
, (void *)i
,
816 while (atomic_read(&n_rcu_perf_reader_started
) < nrealreaders
)
817 schedule_timeout_uninterruptible(1);
818 writer_tasks
= kcalloc(nrealwriters
, sizeof(reader_tasks
[0]),
820 writer_durations
= kcalloc(nrealwriters
, sizeof(*writer_durations
),
823 kcalloc(nrealwriters
, sizeof(*writer_n_durations
),
825 if (!writer_tasks
|| !writer_durations
|| !writer_n_durations
) {
826 VERBOSE_PERFOUT_ERRSTRING("out of memory");
830 for (i
= 0; i
< nrealwriters
; i
++) {
831 writer_durations
[i
] =
832 kcalloc(MAX_MEAS
, sizeof(*writer_durations
[i
]),
834 if (!writer_durations
[i
]) {
838 firsterr
= torture_create_kthread(rcu_perf_writer
, (void *)i
,
852 module_init(rcu_perf_init
);
853 module_exit(rcu_perf_cleanup
);