]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/trace/blktrace.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[thirdparty/kernel/stable.git] / kernel / trace / blktrace.c
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 *
17 */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
30 #include <linux/blk-cgroup.h>
31
32 #include "../../block/blk.h"
33
34 #include <trace/events/block.h>
35
36 #include "trace_output.h"
37
38 #ifdef CONFIG_BLK_DEV_IO_TRACE
39
40 static unsigned int blktrace_seq __read_mostly = 1;
41
42 static struct trace_array *blk_tr;
43 static bool blk_tracer_enabled __read_mostly;
44
45 static LIST_HEAD(running_trace_list);
46 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
47
48 /* Select an alternative, minimalistic output than the original one */
49 #define TRACE_BLK_OPT_CLASSIC 0x1
50 #define TRACE_BLK_OPT_CGROUP 0x2
51 #define TRACE_BLK_OPT_CGNAME 0x4
52
53 static struct tracer_opt blk_tracer_opts[] = {
54 /* Default disable the minimalistic output */
55 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
56 #ifdef CONFIG_BLK_CGROUP
57 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
58 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
59 #endif
60 { }
61 };
62
63 static struct tracer_flags blk_tracer_flags = {
64 .val = 0,
65 .opts = blk_tracer_opts,
66 };
67
68 /* Global reference count of probes */
69 static DEFINE_MUTEX(blk_probe_mutex);
70 static int blk_probes_ref;
71
72 static void blk_register_tracepoints(void);
73 static void blk_unregister_tracepoints(void);
74
75 /*
76 * Send out a notify message.
77 */
78 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
79 const void *data, size_t len,
80 union kernfs_node_id *cgid)
81 {
82 struct blk_io_trace *t;
83 struct ring_buffer_event *event = NULL;
84 struct ring_buffer *buffer = NULL;
85 int pc = 0;
86 int cpu = smp_processor_id();
87 bool blk_tracer = blk_tracer_enabled;
88 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
89
90 if (blk_tracer) {
91 buffer = blk_tr->trace_buffer.buffer;
92 pc = preempt_count();
93 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
94 sizeof(*t) + len + cgid_len,
95 0, pc);
96 if (!event)
97 return;
98 t = ring_buffer_event_data(event);
99 goto record_it;
100 }
101
102 if (!bt->rchan)
103 return;
104
105 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
106 if (t) {
107 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
108 t->time = ktime_to_ns(ktime_get());
109 record_it:
110 t->device = bt->dev;
111 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
112 t->pid = pid;
113 t->cpu = cpu;
114 t->pdu_len = len + cgid_len;
115 if (cgid)
116 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
117 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
118
119 if (blk_tracer)
120 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
121 }
122 }
123
124 /*
125 * Send out a notify for this process, if we haven't done so since a trace
126 * started
127 */
128 static void trace_note_tsk(struct task_struct *tsk)
129 {
130 unsigned long flags;
131 struct blk_trace *bt;
132
133 tsk->btrace_seq = blktrace_seq;
134 spin_lock_irqsave(&running_trace_lock, flags);
135 list_for_each_entry(bt, &running_trace_list, running_list) {
136 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
137 sizeof(tsk->comm), NULL);
138 }
139 spin_unlock_irqrestore(&running_trace_lock, flags);
140 }
141
142 static void trace_note_time(struct blk_trace *bt)
143 {
144 struct timespec64 now;
145 unsigned long flags;
146 u32 words[2];
147
148 /* need to check user space to see if this breaks in y2038 or y2106 */
149 ktime_get_real_ts64(&now);
150 words[0] = (u32)now.tv_sec;
151 words[1] = now.tv_nsec;
152
153 local_irq_save(flags);
154 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
155 local_irq_restore(flags);
156 }
157
158 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
159 const char *fmt, ...)
160 {
161 int n;
162 va_list args;
163 unsigned long flags;
164 char *buf;
165
166 if (unlikely(bt->trace_state != Blktrace_running &&
167 !blk_tracer_enabled))
168 return;
169
170 /*
171 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
172 * message to the trace.
173 */
174 if (!(bt->act_mask & BLK_TC_NOTIFY))
175 return;
176
177 local_irq_save(flags);
178 buf = this_cpu_ptr(bt->msg_data);
179 va_start(args, fmt);
180 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
181 va_end(args);
182
183 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
184 blkcg = NULL;
185 #ifdef CONFIG_BLK_CGROUP
186 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
187 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
188 #else
189 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
190 #endif
191 local_irq_restore(flags);
192 }
193 EXPORT_SYMBOL_GPL(__trace_note_message);
194
195 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
196 pid_t pid)
197 {
198 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
199 return 1;
200 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
201 return 1;
202 if (bt->pid && pid != bt->pid)
203 return 1;
204
205 return 0;
206 }
207
208 /*
209 * Data direction bit lookup
210 */
211 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
212 BLK_TC_ACT(BLK_TC_WRITE) };
213
214 #define BLK_TC_RAHEAD BLK_TC_AHEAD
215 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
216
217 /* The ilog2() calls fall out because they're constant */
218 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
219 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
220
221 /*
222 * The worker for the various blk_add_trace*() types. Fills out a
223 * blk_io_trace structure and places it in a per-cpu subbuffer.
224 */
225 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
226 int op, int op_flags, u32 what, int error, int pdu_len,
227 void *pdu_data, union kernfs_node_id *cgid)
228 {
229 struct task_struct *tsk = current;
230 struct ring_buffer_event *event = NULL;
231 struct ring_buffer *buffer = NULL;
232 struct blk_io_trace *t;
233 unsigned long flags = 0;
234 unsigned long *sequence;
235 pid_t pid;
236 int cpu, pc = 0;
237 bool blk_tracer = blk_tracer_enabled;
238 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
239
240 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
241 return;
242
243 what |= ddir_act[op_is_write(op) ? WRITE : READ];
244 what |= MASK_TC_BIT(op_flags, SYNC);
245 what |= MASK_TC_BIT(op_flags, RAHEAD);
246 what |= MASK_TC_BIT(op_flags, META);
247 what |= MASK_TC_BIT(op_flags, PREFLUSH);
248 what |= MASK_TC_BIT(op_flags, FUA);
249 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
250 what |= BLK_TC_ACT(BLK_TC_DISCARD);
251 if (op == REQ_OP_FLUSH)
252 what |= BLK_TC_ACT(BLK_TC_FLUSH);
253 if (cgid)
254 what |= __BLK_TA_CGROUP;
255
256 pid = tsk->pid;
257 if (act_log_check(bt, what, sector, pid))
258 return;
259 cpu = raw_smp_processor_id();
260
261 if (blk_tracer) {
262 tracing_record_cmdline(current);
263
264 buffer = blk_tr->trace_buffer.buffer;
265 pc = preempt_count();
266 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
267 sizeof(*t) + pdu_len + cgid_len,
268 0, pc);
269 if (!event)
270 return;
271 t = ring_buffer_event_data(event);
272 goto record_it;
273 }
274
275 if (unlikely(tsk->btrace_seq != blktrace_seq))
276 trace_note_tsk(tsk);
277
278 /*
279 * A word about the locking here - we disable interrupts to reserve
280 * some space in the relay per-cpu buffer, to prevent an irq
281 * from coming in and stepping on our toes.
282 */
283 local_irq_save(flags);
284 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
285 if (t) {
286 sequence = per_cpu_ptr(bt->sequence, cpu);
287
288 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
289 t->sequence = ++(*sequence);
290 t->time = ktime_to_ns(ktime_get());
291 record_it:
292 /*
293 * These two are not needed in ftrace as they are in the
294 * generic trace_entry, filled by tracing_generic_entry_update,
295 * but for the trace_event->bin() synthesizer benefit we do it
296 * here too.
297 */
298 t->cpu = cpu;
299 t->pid = pid;
300
301 t->sector = sector;
302 t->bytes = bytes;
303 t->action = what;
304 t->device = bt->dev;
305 t->error = error;
306 t->pdu_len = pdu_len + cgid_len;
307
308 if (cgid_len)
309 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
310 if (pdu_len)
311 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
312
313 if (blk_tracer) {
314 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
315 return;
316 }
317 }
318
319 local_irq_restore(flags);
320 }
321
322 static void blk_trace_free(struct blk_trace *bt)
323 {
324 debugfs_remove(bt->msg_file);
325 debugfs_remove(bt->dropped_file);
326 relay_close(bt->rchan);
327 debugfs_remove(bt->dir);
328 free_percpu(bt->sequence);
329 free_percpu(bt->msg_data);
330 kfree(bt);
331 }
332
333 static void get_probe_ref(void)
334 {
335 mutex_lock(&blk_probe_mutex);
336 if (++blk_probes_ref == 1)
337 blk_register_tracepoints();
338 mutex_unlock(&blk_probe_mutex);
339 }
340
341 static void put_probe_ref(void)
342 {
343 mutex_lock(&blk_probe_mutex);
344 if (!--blk_probes_ref)
345 blk_unregister_tracepoints();
346 mutex_unlock(&blk_probe_mutex);
347 }
348
349 static void blk_trace_cleanup(struct blk_trace *bt)
350 {
351 blk_trace_free(bt);
352 put_probe_ref();
353 }
354
355 static int __blk_trace_remove(struct request_queue *q)
356 {
357 struct blk_trace *bt;
358
359 bt = xchg(&q->blk_trace, NULL);
360 if (!bt)
361 return -EINVAL;
362
363 if (bt->trace_state != Blktrace_running)
364 blk_trace_cleanup(bt);
365
366 return 0;
367 }
368
369 int blk_trace_remove(struct request_queue *q)
370 {
371 int ret;
372
373 mutex_lock(&q->blk_trace_mutex);
374 ret = __blk_trace_remove(q);
375 mutex_unlock(&q->blk_trace_mutex);
376
377 return ret;
378 }
379 EXPORT_SYMBOL_GPL(blk_trace_remove);
380
381 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
382 size_t count, loff_t *ppos)
383 {
384 struct blk_trace *bt = filp->private_data;
385 char buf[16];
386
387 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
388
389 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
390 }
391
392 static const struct file_operations blk_dropped_fops = {
393 .owner = THIS_MODULE,
394 .open = simple_open,
395 .read = blk_dropped_read,
396 .llseek = default_llseek,
397 };
398
399 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
400 size_t count, loff_t *ppos)
401 {
402 char *msg;
403 struct blk_trace *bt;
404
405 if (count >= BLK_TN_MAX_MSG)
406 return -EINVAL;
407
408 msg = memdup_user_nul(buffer, count);
409 if (IS_ERR(msg))
410 return PTR_ERR(msg);
411
412 bt = filp->private_data;
413 __trace_note_message(bt, NULL, "%s", msg);
414 kfree(msg);
415
416 return count;
417 }
418
419 static const struct file_operations blk_msg_fops = {
420 .owner = THIS_MODULE,
421 .open = simple_open,
422 .write = blk_msg_write,
423 .llseek = noop_llseek,
424 };
425
426 /*
427 * Keep track of how many times we encountered a full subbuffer, to aid
428 * the user space app in telling how many lost events there were.
429 */
430 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
431 void *prev_subbuf, size_t prev_padding)
432 {
433 struct blk_trace *bt;
434
435 if (!relay_buf_full(buf))
436 return 1;
437
438 bt = buf->chan->private_data;
439 atomic_inc(&bt->dropped);
440 return 0;
441 }
442
443 static int blk_remove_buf_file_callback(struct dentry *dentry)
444 {
445 debugfs_remove(dentry);
446
447 return 0;
448 }
449
450 static struct dentry *blk_create_buf_file_callback(const char *filename,
451 struct dentry *parent,
452 umode_t mode,
453 struct rchan_buf *buf,
454 int *is_global)
455 {
456 return debugfs_create_file(filename, mode, parent, buf,
457 &relay_file_operations);
458 }
459
460 static struct rchan_callbacks blk_relay_callbacks = {
461 .subbuf_start = blk_subbuf_start_callback,
462 .create_buf_file = blk_create_buf_file_callback,
463 .remove_buf_file = blk_remove_buf_file_callback,
464 };
465
466 static void blk_trace_setup_lba(struct blk_trace *bt,
467 struct block_device *bdev)
468 {
469 struct hd_struct *part = NULL;
470
471 if (bdev)
472 part = bdev->bd_part;
473
474 if (part) {
475 bt->start_lba = part->start_sect;
476 bt->end_lba = part->start_sect + part->nr_sects;
477 } else {
478 bt->start_lba = 0;
479 bt->end_lba = -1ULL;
480 }
481 }
482
483 /*
484 * Setup everything required to start tracing
485 */
486 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
487 struct block_device *bdev,
488 struct blk_user_trace_setup *buts)
489 {
490 struct blk_trace *bt = NULL;
491 struct dentry *dir = NULL;
492 int ret;
493
494 if (!buts->buf_size || !buts->buf_nr)
495 return -EINVAL;
496
497 if (!blk_debugfs_root)
498 return -ENOENT;
499
500 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
501 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
502
503 /*
504 * some device names have larger paths - convert the slashes
505 * to underscores for this to work as expected
506 */
507 strreplace(buts->name, '/', '_');
508
509 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
510 if (!bt)
511 return -ENOMEM;
512
513 ret = -ENOMEM;
514 bt->sequence = alloc_percpu(unsigned long);
515 if (!bt->sequence)
516 goto err;
517
518 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
519 if (!bt->msg_data)
520 goto err;
521
522 ret = -ENOENT;
523
524 dir = debugfs_lookup(buts->name, blk_debugfs_root);
525 if (!dir)
526 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
527 if (!dir)
528 goto err;
529
530 bt->dev = dev;
531 atomic_set(&bt->dropped, 0);
532 INIT_LIST_HEAD(&bt->running_list);
533
534 ret = -EIO;
535 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
536 &blk_dropped_fops);
537 if (!bt->dropped_file)
538 goto err;
539
540 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
541 if (!bt->msg_file)
542 goto err;
543
544 bt->rchan = relay_open("trace", dir, buts->buf_size,
545 buts->buf_nr, &blk_relay_callbacks, bt);
546 if (!bt->rchan)
547 goto err;
548
549 bt->act_mask = buts->act_mask;
550 if (!bt->act_mask)
551 bt->act_mask = (u16) -1;
552
553 blk_trace_setup_lba(bt, bdev);
554
555 /* overwrite with user settings */
556 if (buts->start_lba)
557 bt->start_lba = buts->start_lba;
558 if (buts->end_lba)
559 bt->end_lba = buts->end_lba;
560
561 bt->pid = buts->pid;
562 bt->trace_state = Blktrace_setup;
563
564 ret = -EBUSY;
565 if (cmpxchg(&q->blk_trace, NULL, bt))
566 goto err;
567
568 get_probe_ref();
569
570 ret = 0;
571 err:
572 if (dir && !bt->dir)
573 dput(dir);
574 if (ret)
575 blk_trace_free(bt);
576 return ret;
577 }
578
579 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
580 struct block_device *bdev, char __user *arg)
581 {
582 struct blk_user_trace_setup buts;
583 int ret;
584
585 ret = copy_from_user(&buts, arg, sizeof(buts));
586 if (ret)
587 return -EFAULT;
588
589 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
590 if (ret)
591 return ret;
592
593 if (copy_to_user(arg, &buts, sizeof(buts))) {
594 __blk_trace_remove(q);
595 return -EFAULT;
596 }
597 return 0;
598 }
599
600 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
601 struct block_device *bdev,
602 char __user *arg)
603 {
604 int ret;
605
606 mutex_lock(&q->blk_trace_mutex);
607 ret = __blk_trace_setup(q, name, dev, bdev, arg);
608 mutex_unlock(&q->blk_trace_mutex);
609
610 return ret;
611 }
612 EXPORT_SYMBOL_GPL(blk_trace_setup);
613
614 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
615 static int compat_blk_trace_setup(struct request_queue *q, char *name,
616 dev_t dev, struct block_device *bdev,
617 char __user *arg)
618 {
619 struct blk_user_trace_setup buts;
620 struct compat_blk_user_trace_setup cbuts;
621 int ret;
622
623 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
624 return -EFAULT;
625
626 buts = (struct blk_user_trace_setup) {
627 .act_mask = cbuts.act_mask,
628 .buf_size = cbuts.buf_size,
629 .buf_nr = cbuts.buf_nr,
630 .start_lba = cbuts.start_lba,
631 .end_lba = cbuts.end_lba,
632 .pid = cbuts.pid,
633 };
634
635 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
636 if (ret)
637 return ret;
638
639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
640 __blk_trace_remove(q);
641 return -EFAULT;
642 }
643
644 return 0;
645 }
646 #endif
647
648 static int __blk_trace_startstop(struct request_queue *q, int start)
649 {
650 int ret;
651 struct blk_trace *bt = q->blk_trace;
652
653 if (bt == NULL)
654 return -EINVAL;
655
656 /*
657 * For starting a trace, we can transition from a setup or stopped
658 * trace. For stopping a trace, the state must be running
659 */
660 ret = -EINVAL;
661 if (start) {
662 if (bt->trace_state == Blktrace_setup ||
663 bt->trace_state == Blktrace_stopped) {
664 blktrace_seq++;
665 smp_mb();
666 bt->trace_state = Blktrace_running;
667 spin_lock_irq(&running_trace_lock);
668 list_add(&bt->running_list, &running_trace_list);
669 spin_unlock_irq(&running_trace_lock);
670
671 trace_note_time(bt);
672 ret = 0;
673 }
674 } else {
675 if (bt->trace_state == Blktrace_running) {
676 bt->trace_state = Blktrace_stopped;
677 spin_lock_irq(&running_trace_lock);
678 list_del_init(&bt->running_list);
679 spin_unlock_irq(&running_trace_lock);
680 relay_flush(bt->rchan);
681 ret = 0;
682 }
683 }
684
685 return ret;
686 }
687
688 int blk_trace_startstop(struct request_queue *q, int start)
689 {
690 int ret;
691
692 mutex_lock(&q->blk_trace_mutex);
693 ret = __blk_trace_startstop(q, start);
694 mutex_unlock(&q->blk_trace_mutex);
695
696 return ret;
697 }
698 EXPORT_SYMBOL_GPL(blk_trace_startstop);
699
700 /*
701 * When reading or writing the blktrace sysfs files, the references to the
702 * opened sysfs or device files should prevent the underlying block device
703 * from being removed. So no further delete protection is really needed.
704 */
705
706 /**
707 * blk_trace_ioctl: - handle the ioctls associated with tracing
708 * @bdev: the block device
709 * @cmd: the ioctl cmd
710 * @arg: the argument data, if any
711 *
712 **/
713 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
714 {
715 struct request_queue *q;
716 int ret, start = 0;
717 char b[BDEVNAME_SIZE];
718
719 q = bdev_get_queue(bdev);
720 if (!q)
721 return -ENXIO;
722
723 mutex_lock(&q->blk_trace_mutex);
724
725 switch (cmd) {
726 case BLKTRACESETUP:
727 bdevname(bdev, b);
728 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
729 break;
730 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
731 case BLKTRACESETUP32:
732 bdevname(bdev, b);
733 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
734 break;
735 #endif
736 case BLKTRACESTART:
737 start = 1;
738 case BLKTRACESTOP:
739 ret = __blk_trace_startstop(q, start);
740 break;
741 case BLKTRACETEARDOWN:
742 ret = __blk_trace_remove(q);
743 break;
744 default:
745 ret = -ENOTTY;
746 break;
747 }
748
749 mutex_unlock(&q->blk_trace_mutex);
750 return ret;
751 }
752
753 /**
754 * blk_trace_shutdown: - stop and cleanup trace structures
755 * @q: the request queue associated with the device
756 *
757 **/
758 void blk_trace_shutdown(struct request_queue *q)
759 {
760 mutex_lock(&q->blk_trace_mutex);
761
762 if (q->blk_trace) {
763 __blk_trace_startstop(q, 0);
764 __blk_trace_remove(q);
765 }
766
767 mutex_unlock(&q->blk_trace_mutex);
768 }
769
770 #ifdef CONFIG_BLK_CGROUP
771 static union kernfs_node_id *
772 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
773 {
774 struct blk_trace *bt = q->blk_trace;
775
776 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
777 return NULL;
778
779 if (!bio->bi_css)
780 return NULL;
781 return cgroup_get_kernfs_id(bio->bi_css->cgroup);
782 }
783 #else
784 static union kernfs_node_id *
785 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
786 {
787 return NULL;
788 }
789 #endif
790
791 static union kernfs_node_id *
792 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
793 {
794 if (!rq->bio)
795 return NULL;
796 /* Use the first bio */
797 return blk_trace_bio_get_cgid(q, rq->bio);
798 }
799
800 /*
801 * blktrace probes
802 */
803
804 /**
805 * blk_add_trace_rq - Add a trace for a request oriented action
806 * @rq: the source request
807 * @error: return status to log
808 * @nr_bytes: number of completed bytes
809 * @what: the action
810 * @cgid: the cgroup info
811 *
812 * Description:
813 * Records an action against a request. Will log the bio offset + size.
814 *
815 **/
816 static void blk_add_trace_rq(struct request *rq, int error,
817 unsigned int nr_bytes, u32 what,
818 union kernfs_node_id *cgid)
819 {
820 struct blk_trace *bt = rq->q->blk_trace;
821
822 if (likely(!bt))
823 return;
824
825 if (blk_rq_is_passthrough(rq))
826 what |= BLK_TC_ACT(BLK_TC_PC);
827 else
828 what |= BLK_TC_ACT(BLK_TC_FS);
829
830 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
831 rq->cmd_flags, what, error, 0, NULL, cgid);
832 }
833
834 static void blk_add_trace_rq_insert(void *ignore,
835 struct request_queue *q, struct request *rq)
836 {
837 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
838 blk_trace_request_get_cgid(q, rq));
839 }
840
841 static void blk_add_trace_rq_issue(void *ignore,
842 struct request_queue *q, struct request *rq)
843 {
844 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
845 blk_trace_request_get_cgid(q, rq));
846 }
847
848 static void blk_add_trace_rq_requeue(void *ignore,
849 struct request_queue *q,
850 struct request *rq)
851 {
852 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
853 blk_trace_request_get_cgid(q, rq));
854 }
855
856 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
857 int error, unsigned int nr_bytes)
858 {
859 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
860 blk_trace_request_get_cgid(rq->q, rq));
861 }
862
863 /**
864 * blk_add_trace_bio - Add a trace for a bio oriented action
865 * @q: queue the io is for
866 * @bio: the source bio
867 * @what: the action
868 * @error: error, if any
869 *
870 * Description:
871 * Records an action against a bio. Will log the bio offset + size.
872 *
873 **/
874 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
875 u32 what, int error)
876 {
877 struct blk_trace *bt = q->blk_trace;
878
879 if (likely(!bt))
880 return;
881
882 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
883 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
884 blk_trace_bio_get_cgid(q, bio));
885 }
886
887 static void blk_add_trace_bio_bounce(void *ignore,
888 struct request_queue *q, struct bio *bio)
889 {
890 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
891 }
892
893 static void blk_add_trace_bio_complete(void *ignore,
894 struct request_queue *q, struct bio *bio,
895 int error)
896 {
897 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
898 }
899
900 static void blk_add_trace_bio_backmerge(void *ignore,
901 struct request_queue *q,
902 struct request *rq,
903 struct bio *bio)
904 {
905 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
906 }
907
908 static void blk_add_trace_bio_frontmerge(void *ignore,
909 struct request_queue *q,
910 struct request *rq,
911 struct bio *bio)
912 {
913 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
914 }
915
916 static void blk_add_trace_bio_queue(void *ignore,
917 struct request_queue *q, struct bio *bio)
918 {
919 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
920 }
921
922 static void blk_add_trace_getrq(void *ignore,
923 struct request_queue *q,
924 struct bio *bio, int rw)
925 {
926 if (bio)
927 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
928 else {
929 struct blk_trace *bt = q->blk_trace;
930
931 if (bt)
932 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
933 NULL, NULL);
934 }
935 }
936
937
938 static void blk_add_trace_sleeprq(void *ignore,
939 struct request_queue *q,
940 struct bio *bio, int rw)
941 {
942 if (bio)
943 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
944 else {
945 struct blk_trace *bt = q->blk_trace;
946
947 if (bt)
948 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
949 0, 0, NULL, NULL);
950 }
951 }
952
953 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
954 {
955 struct blk_trace *bt = q->blk_trace;
956
957 if (bt)
958 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
959 }
960
961 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
962 unsigned int depth, bool explicit)
963 {
964 struct blk_trace *bt = q->blk_trace;
965
966 if (bt) {
967 __be64 rpdu = cpu_to_be64(depth);
968 u32 what;
969
970 if (explicit)
971 what = BLK_TA_UNPLUG_IO;
972 else
973 what = BLK_TA_UNPLUG_TIMER;
974
975 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
976 }
977 }
978
979 static void blk_add_trace_split(void *ignore,
980 struct request_queue *q, struct bio *bio,
981 unsigned int pdu)
982 {
983 struct blk_trace *bt = q->blk_trace;
984
985 if (bt) {
986 __be64 rpdu = cpu_to_be64(pdu);
987
988 __blk_add_trace(bt, bio->bi_iter.bi_sector,
989 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
990 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
991 &rpdu, blk_trace_bio_get_cgid(q, bio));
992 }
993 }
994
995 /**
996 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
997 * @ignore: trace callback data parameter (not used)
998 * @q: queue the io is for
999 * @bio: the source bio
1000 * @dev: target device
1001 * @from: source sector
1002 *
1003 * Description:
1004 * Device mapper or raid target sometimes need to split a bio because
1005 * it spans a stripe (or similar). Add a trace for that action.
1006 *
1007 **/
1008 static void blk_add_trace_bio_remap(void *ignore,
1009 struct request_queue *q, struct bio *bio,
1010 dev_t dev, sector_t from)
1011 {
1012 struct blk_trace *bt = q->blk_trace;
1013 struct blk_io_trace_remap r;
1014
1015 if (likely(!bt))
1016 return;
1017
1018 r.device_from = cpu_to_be32(dev);
1019 r.device_to = cpu_to_be32(bio_dev(bio));
1020 r.sector_from = cpu_to_be64(from);
1021
1022 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1023 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
1024 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1025 }
1026
1027 /**
1028 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1029 * @ignore: trace callback data parameter (not used)
1030 * @q: queue the io is for
1031 * @rq: the source request
1032 * @dev: target device
1033 * @from: source sector
1034 *
1035 * Description:
1036 * Device mapper remaps request to other devices.
1037 * Add a trace for that action.
1038 *
1039 **/
1040 static void blk_add_trace_rq_remap(void *ignore,
1041 struct request_queue *q,
1042 struct request *rq, dev_t dev,
1043 sector_t from)
1044 {
1045 struct blk_trace *bt = q->blk_trace;
1046 struct blk_io_trace_remap r;
1047
1048 if (likely(!bt))
1049 return;
1050
1051 r.device_from = cpu_to_be32(dev);
1052 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1053 r.sector_from = cpu_to_be64(from);
1054
1055 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1056 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1057 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1058 }
1059
1060 /**
1061 * blk_add_driver_data - Add binary message with driver-specific data
1062 * @q: queue the io is for
1063 * @rq: io request
1064 * @data: driver-specific data
1065 * @len: length of driver-specific data
1066 *
1067 * Description:
1068 * Some drivers might want to write driver-specific data per request.
1069 *
1070 **/
1071 void blk_add_driver_data(struct request_queue *q,
1072 struct request *rq,
1073 void *data, size_t len)
1074 {
1075 struct blk_trace *bt = q->blk_trace;
1076
1077 if (likely(!bt))
1078 return;
1079
1080 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1081 BLK_TA_DRV_DATA, 0, len, data,
1082 blk_trace_request_get_cgid(q, rq));
1083 }
1084 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1085
1086 static void blk_register_tracepoints(void)
1087 {
1088 int ret;
1089
1090 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1091 WARN_ON(ret);
1092 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1093 WARN_ON(ret);
1094 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1095 WARN_ON(ret);
1096 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1097 WARN_ON(ret);
1098 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1099 WARN_ON(ret);
1100 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1101 WARN_ON(ret);
1102 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1103 WARN_ON(ret);
1104 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1105 WARN_ON(ret);
1106 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1107 WARN_ON(ret);
1108 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1109 WARN_ON(ret);
1110 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1111 WARN_ON(ret);
1112 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1113 WARN_ON(ret);
1114 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1115 WARN_ON(ret);
1116 ret = register_trace_block_split(blk_add_trace_split, NULL);
1117 WARN_ON(ret);
1118 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1119 WARN_ON(ret);
1120 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1121 WARN_ON(ret);
1122 }
1123
1124 static void blk_unregister_tracepoints(void)
1125 {
1126 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1127 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1128 unregister_trace_block_split(blk_add_trace_split, NULL);
1129 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1130 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1131 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1132 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1133 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1134 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1135 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1136 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1137 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1138 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1139 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1140 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1141 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1142
1143 tracepoint_synchronize_unregister();
1144 }
1145
1146 /*
1147 * struct blk_io_tracer formatting routines
1148 */
1149
1150 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1151 {
1152 int i = 0;
1153 int tc = t->action >> BLK_TC_SHIFT;
1154
1155 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1156 rwbs[i++] = 'N';
1157 goto out;
1158 }
1159
1160 if (tc & BLK_TC_FLUSH)
1161 rwbs[i++] = 'F';
1162
1163 if (tc & BLK_TC_DISCARD)
1164 rwbs[i++] = 'D';
1165 else if (tc & BLK_TC_WRITE)
1166 rwbs[i++] = 'W';
1167 else if (t->bytes)
1168 rwbs[i++] = 'R';
1169 else
1170 rwbs[i++] = 'N';
1171
1172 if (tc & BLK_TC_FUA)
1173 rwbs[i++] = 'F';
1174 if (tc & BLK_TC_AHEAD)
1175 rwbs[i++] = 'A';
1176 if (tc & BLK_TC_SYNC)
1177 rwbs[i++] = 'S';
1178 if (tc & BLK_TC_META)
1179 rwbs[i++] = 'M';
1180 out:
1181 rwbs[i] = '\0';
1182 }
1183
1184 static inline
1185 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1186 {
1187 return (const struct blk_io_trace *)ent;
1188 }
1189
1190 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1191 {
1192 return (void *)(te_blk_io_trace(ent) + 1) +
1193 (has_cg ? sizeof(union kernfs_node_id) : 0);
1194 }
1195
1196 static inline const void *cgid_start(const struct trace_entry *ent)
1197 {
1198 return (void *)(te_blk_io_trace(ent) + 1);
1199 }
1200
1201 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1202 {
1203 return te_blk_io_trace(ent)->pdu_len -
1204 (has_cg ? sizeof(union kernfs_node_id) : 0);
1205 }
1206
1207 static inline u32 t_action(const struct trace_entry *ent)
1208 {
1209 return te_blk_io_trace(ent)->action;
1210 }
1211
1212 static inline u32 t_bytes(const struct trace_entry *ent)
1213 {
1214 return te_blk_io_trace(ent)->bytes;
1215 }
1216
1217 static inline u32 t_sec(const struct trace_entry *ent)
1218 {
1219 return te_blk_io_trace(ent)->bytes >> 9;
1220 }
1221
1222 static inline unsigned long long t_sector(const struct trace_entry *ent)
1223 {
1224 return te_blk_io_trace(ent)->sector;
1225 }
1226
1227 static inline __u16 t_error(const struct trace_entry *ent)
1228 {
1229 return te_blk_io_trace(ent)->error;
1230 }
1231
1232 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1233 {
1234 const __u64 *val = pdu_start(ent, has_cg);
1235 return be64_to_cpu(*val);
1236 }
1237
1238 static void get_pdu_remap(const struct trace_entry *ent,
1239 struct blk_io_trace_remap *r, bool has_cg)
1240 {
1241 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1242 __u64 sector_from = __r->sector_from;
1243
1244 r->device_from = be32_to_cpu(__r->device_from);
1245 r->device_to = be32_to_cpu(__r->device_to);
1246 r->sector_from = be64_to_cpu(sector_from);
1247 }
1248
1249 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1250 bool has_cg);
1251
1252 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1253 bool has_cg)
1254 {
1255 char rwbs[RWBS_LEN];
1256 unsigned long long ts = iter->ts;
1257 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1258 unsigned secs = (unsigned long)ts;
1259 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1260
1261 fill_rwbs(rwbs, t);
1262
1263 trace_seq_printf(&iter->seq,
1264 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1265 MAJOR(t->device), MINOR(t->device), iter->cpu,
1266 secs, nsec_rem, iter->ent->pid, act, rwbs);
1267 }
1268
1269 static void blk_log_action(struct trace_iterator *iter, const char *act,
1270 bool has_cg)
1271 {
1272 char rwbs[RWBS_LEN];
1273 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1274
1275 fill_rwbs(rwbs, t);
1276 if (has_cg) {
1277 const union kernfs_node_id *id = cgid_start(iter->ent);
1278
1279 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1280 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1281
1282 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1283 sizeof(blkcg_name_buf));
1284 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1285 MAJOR(t->device), MINOR(t->device),
1286 blkcg_name_buf, act, rwbs);
1287 } else
1288 trace_seq_printf(&iter->seq,
1289 "%3d,%-3d %x,%-x %2s %3s ",
1290 MAJOR(t->device), MINOR(t->device),
1291 id->ino, id->generation, act, rwbs);
1292 } else
1293 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1294 MAJOR(t->device), MINOR(t->device), act, rwbs);
1295 }
1296
1297 static void blk_log_dump_pdu(struct trace_seq *s,
1298 const struct trace_entry *ent, bool has_cg)
1299 {
1300 const unsigned char *pdu_buf;
1301 int pdu_len;
1302 int i, end;
1303
1304 pdu_buf = pdu_start(ent, has_cg);
1305 pdu_len = pdu_real_len(ent, has_cg);
1306
1307 if (!pdu_len)
1308 return;
1309
1310 /* find the last zero that needs to be printed */
1311 for (end = pdu_len - 1; end >= 0; end--)
1312 if (pdu_buf[end])
1313 break;
1314 end++;
1315
1316 trace_seq_putc(s, '(');
1317
1318 for (i = 0; i < pdu_len; i++) {
1319
1320 trace_seq_printf(s, "%s%02x",
1321 i == 0 ? "" : " ", pdu_buf[i]);
1322
1323 /*
1324 * stop when the rest is just zeroes and indicate so
1325 * with a ".." appended
1326 */
1327 if (i == end && end != pdu_len - 1) {
1328 trace_seq_puts(s, " ..) ");
1329 return;
1330 }
1331 }
1332
1333 trace_seq_puts(s, ") ");
1334 }
1335
1336 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1337 {
1338 char cmd[TASK_COMM_LEN];
1339
1340 trace_find_cmdline(ent->pid, cmd);
1341
1342 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1343 trace_seq_printf(s, "%u ", t_bytes(ent));
1344 blk_log_dump_pdu(s, ent, has_cg);
1345 trace_seq_printf(s, "[%s]\n", cmd);
1346 } else {
1347 if (t_sec(ent))
1348 trace_seq_printf(s, "%llu + %u [%s]\n",
1349 t_sector(ent), t_sec(ent), cmd);
1350 else
1351 trace_seq_printf(s, "[%s]\n", cmd);
1352 }
1353 }
1354
1355 static void blk_log_with_error(struct trace_seq *s,
1356 const struct trace_entry *ent, bool has_cg)
1357 {
1358 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1359 blk_log_dump_pdu(s, ent, has_cg);
1360 trace_seq_printf(s, "[%d]\n", t_error(ent));
1361 } else {
1362 if (t_sec(ent))
1363 trace_seq_printf(s, "%llu + %u [%d]\n",
1364 t_sector(ent),
1365 t_sec(ent), t_error(ent));
1366 else
1367 trace_seq_printf(s, "%llu [%d]\n",
1368 t_sector(ent), t_error(ent));
1369 }
1370 }
1371
1372 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1373 {
1374 struct blk_io_trace_remap r = { .device_from = 0, };
1375
1376 get_pdu_remap(ent, &r, has_cg);
1377 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1378 t_sector(ent), t_sec(ent),
1379 MAJOR(r.device_from), MINOR(r.device_from),
1380 (unsigned long long)r.sector_from);
1381 }
1382
1383 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1384 {
1385 char cmd[TASK_COMM_LEN];
1386
1387 trace_find_cmdline(ent->pid, cmd);
1388
1389 trace_seq_printf(s, "[%s]\n", cmd);
1390 }
1391
1392 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1393 {
1394 char cmd[TASK_COMM_LEN];
1395
1396 trace_find_cmdline(ent->pid, cmd);
1397
1398 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1399 }
1400
1401 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1402 {
1403 char cmd[TASK_COMM_LEN];
1404
1405 trace_find_cmdline(ent->pid, cmd);
1406
1407 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1408 get_pdu_int(ent, has_cg), cmd);
1409 }
1410
1411 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1412 bool has_cg)
1413 {
1414
1415 trace_seq_putmem(s, pdu_start(ent, has_cg),
1416 pdu_real_len(ent, has_cg));
1417 trace_seq_putc(s, '\n');
1418 }
1419
1420 /*
1421 * struct tracer operations
1422 */
1423
1424 static void blk_tracer_print_header(struct seq_file *m)
1425 {
1426 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1427 return;
1428 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1429 "# | | | | | |\n");
1430 }
1431
1432 static void blk_tracer_start(struct trace_array *tr)
1433 {
1434 blk_tracer_enabled = true;
1435 }
1436
1437 static int blk_tracer_init(struct trace_array *tr)
1438 {
1439 blk_tr = tr;
1440 blk_tracer_start(tr);
1441 return 0;
1442 }
1443
1444 static void blk_tracer_stop(struct trace_array *tr)
1445 {
1446 blk_tracer_enabled = false;
1447 }
1448
1449 static void blk_tracer_reset(struct trace_array *tr)
1450 {
1451 blk_tracer_stop(tr);
1452 }
1453
1454 static const struct {
1455 const char *act[2];
1456 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1457 bool has_cg);
1458 } what2act[] = {
1459 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1460 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1461 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1462 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1463 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1464 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1465 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1466 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1467 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1468 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1469 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1470 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1471 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1472 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1473 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1474 };
1475
1476 static enum print_line_t print_one_line(struct trace_iterator *iter,
1477 bool classic)
1478 {
1479 struct trace_array *tr = iter->tr;
1480 struct trace_seq *s = &iter->seq;
1481 const struct blk_io_trace *t;
1482 u16 what;
1483 bool long_act;
1484 blk_log_action_t *log_action;
1485 bool has_cg;
1486
1487 t = te_blk_io_trace(iter->ent);
1488 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1489 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1490 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1491 has_cg = t->action & __BLK_TA_CGROUP;
1492
1493 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1494 log_action(iter, long_act ? "message" : "m", has_cg);
1495 blk_log_msg(s, iter->ent, has_cg);
1496 return trace_handle_return(s);
1497 }
1498
1499 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1500 trace_seq_printf(s, "Unknown action %x\n", what);
1501 else {
1502 log_action(iter, what2act[what].act[long_act], has_cg);
1503 what2act[what].print(s, iter->ent, has_cg);
1504 }
1505
1506 return trace_handle_return(s);
1507 }
1508
1509 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1510 int flags, struct trace_event *event)
1511 {
1512 return print_one_line(iter, false);
1513 }
1514
1515 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1516 {
1517 struct trace_seq *s = &iter->seq;
1518 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1519 const int offset = offsetof(struct blk_io_trace, sector);
1520 struct blk_io_trace old = {
1521 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1522 .time = iter->ts,
1523 };
1524
1525 trace_seq_putmem(s, &old, offset);
1526 trace_seq_putmem(s, &t->sector,
1527 sizeof(old) - offset + t->pdu_len);
1528 }
1529
1530 static enum print_line_t
1531 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1532 struct trace_event *event)
1533 {
1534 blk_trace_synthesize_old_trace(iter);
1535
1536 return trace_handle_return(&iter->seq);
1537 }
1538
1539 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1540 {
1541 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1542 return TRACE_TYPE_UNHANDLED;
1543
1544 return print_one_line(iter, true);
1545 }
1546
1547 static int
1548 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1549 {
1550 /* don't output context-info for blk_classic output */
1551 if (bit == TRACE_BLK_OPT_CLASSIC) {
1552 if (set)
1553 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1554 else
1555 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1556 }
1557 return 0;
1558 }
1559
1560 static struct tracer blk_tracer __read_mostly = {
1561 .name = "blk",
1562 .init = blk_tracer_init,
1563 .reset = blk_tracer_reset,
1564 .start = blk_tracer_start,
1565 .stop = blk_tracer_stop,
1566 .print_header = blk_tracer_print_header,
1567 .print_line = blk_tracer_print_line,
1568 .flags = &blk_tracer_flags,
1569 .set_flag = blk_tracer_set_flag,
1570 };
1571
1572 static struct trace_event_functions trace_blk_event_funcs = {
1573 .trace = blk_trace_event_print,
1574 .binary = blk_trace_event_print_binary,
1575 };
1576
1577 static struct trace_event trace_blk_event = {
1578 .type = TRACE_BLK,
1579 .funcs = &trace_blk_event_funcs,
1580 };
1581
1582 static int __init init_blk_tracer(void)
1583 {
1584 if (!register_trace_event(&trace_blk_event)) {
1585 pr_warn("Warning: could not register block events\n");
1586 return 1;
1587 }
1588
1589 if (register_tracer(&blk_tracer) != 0) {
1590 pr_warn("Warning: could not register the block tracer\n");
1591 unregister_trace_event(&trace_blk_event);
1592 return 1;
1593 }
1594
1595 return 0;
1596 }
1597
1598 device_initcall(init_blk_tracer);
1599
1600 static int blk_trace_remove_queue(struct request_queue *q)
1601 {
1602 struct blk_trace *bt;
1603
1604 bt = xchg(&q->blk_trace, NULL);
1605 if (bt == NULL)
1606 return -EINVAL;
1607
1608 put_probe_ref();
1609 blk_trace_free(bt);
1610 return 0;
1611 }
1612
1613 /*
1614 * Setup everything required to start tracing
1615 */
1616 static int blk_trace_setup_queue(struct request_queue *q,
1617 struct block_device *bdev)
1618 {
1619 struct blk_trace *bt = NULL;
1620 int ret = -ENOMEM;
1621
1622 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1623 if (!bt)
1624 return -ENOMEM;
1625
1626 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1627 if (!bt->msg_data)
1628 goto free_bt;
1629
1630 bt->dev = bdev->bd_dev;
1631 bt->act_mask = (u16)-1;
1632
1633 blk_trace_setup_lba(bt, bdev);
1634
1635 ret = -EBUSY;
1636 if (cmpxchg(&q->blk_trace, NULL, bt))
1637 goto free_bt;
1638
1639 get_probe_ref();
1640 return 0;
1641
1642 free_bt:
1643 blk_trace_free(bt);
1644 return ret;
1645 }
1646
1647 /*
1648 * sysfs interface to enable and configure tracing
1649 */
1650
1651 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1652 struct device_attribute *attr,
1653 char *buf);
1654 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1655 struct device_attribute *attr,
1656 const char *buf, size_t count);
1657 #define BLK_TRACE_DEVICE_ATTR(_name) \
1658 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1659 sysfs_blk_trace_attr_show, \
1660 sysfs_blk_trace_attr_store)
1661
1662 static BLK_TRACE_DEVICE_ATTR(enable);
1663 static BLK_TRACE_DEVICE_ATTR(act_mask);
1664 static BLK_TRACE_DEVICE_ATTR(pid);
1665 static BLK_TRACE_DEVICE_ATTR(start_lba);
1666 static BLK_TRACE_DEVICE_ATTR(end_lba);
1667
1668 static struct attribute *blk_trace_attrs[] = {
1669 &dev_attr_enable.attr,
1670 &dev_attr_act_mask.attr,
1671 &dev_attr_pid.attr,
1672 &dev_attr_start_lba.attr,
1673 &dev_attr_end_lba.attr,
1674 NULL
1675 };
1676
1677 struct attribute_group blk_trace_attr_group = {
1678 .name = "trace",
1679 .attrs = blk_trace_attrs,
1680 };
1681
1682 static const struct {
1683 int mask;
1684 const char *str;
1685 } mask_maps[] = {
1686 { BLK_TC_READ, "read" },
1687 { BLK_TC_WRITE, "write" },
1688 { BLK_TC_FLUSH, "flush" },
1689 { BLK_TC_SYNC, "sync" },
1690 { BLK_TC_QUEUE, "queue" },
1691 { BLK_TC_REQUEUE, "requeue" },
1692 { BLK_TC_ISSUE, "issue" },
1693 { BLK_TC_COMPLETE, "complete" },
1694 { BLK_TC_FS, "fs" },
1695 { BLK_TC_PC, "pc" },
1696 { BLK_TC_NOTIFY, "notify" },
1697 { BLK_TC_AHEAD, "ahead" },
1698 { BLK_TC_META, "meta" },
1699 { BLK_TC_DISCARD, "discard" },
1700 { BLK_TC_DRV_DATA, "drv_data" },
1701 { BLK_TC_FUA, "fua" },
1702 };
1703
1704 static int blk_trace_str2mask(const char *str)
1705 {
1706 int i;
1707 int mask = 0;
1708 char *buf, *s, *token;
1709
1710 buf = kstrdup(str, GFP_KERNEL);
1711 if (buf == NULL)
1712 return -ENOMEM;
1713 s = strstrip(buf);
1714
1715 while (1) {
1716 token = strsep(&s, ",");
1717 if (token == NULL)
1718 break;
1719
1720 if (*token == '\0')
1721 continue;
1722
1723 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1724 if (strcasecmp(token, mask_maps[i].str) == 0) {
1725 mask |= mask_maps[i].mask;
1726 break;
1727 }
1728 }
1729 if (i == ARRAY_SIZE(mask_maps)) {
1730 mask = -EINVAL;
1731 break;
1732 }
1733 }
1734 kfree(buf);
1735
1736 return mask;
1737 }
1738
1739 static ssize_t blk_trace_mask2str(char *buf, int mask)
1740 {
1741 int i;
1742 char *p = buf;
1743
1744 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1745 if (mask & mask_maps[i].mask) {
1746 p += sprintf(p, "%s%s",
1747 (p == buf) ? "" : ",", mask_maps[i].str);
1748 }
1749 }
1750 *p++ = '\n';
1751
1752 return p - buf;
1753 }
1754
1755 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1756 {
1757 if (bdev->bd_disk == NULL)
1758 return NULL;
1759
1760 return bdev_get_queue(bdev);
1761 }
1762
1763 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1764 struct device_attribute *attr,
1765 char *buf)
1766 {
1767 struct hd_struct *p = dev_to_part(dev);
1768 struct request_queue *q;
1769 struct block_device *bdev;
1770 ssize_t ret = -ENXIO;
1771
1772 bdev = bdget(part_devt(p));
1773 if (bdev == NULL)
1774 goto out;
1775
1776 q = blk_trace_get_queue(bdev);
1777 if (q == NULL)
1778 goto out_bdput;
1779
1780 mutex_lock(&q->blk_trace_mutex);
1781
1782 if (attr == &dev_attr_enable) {
1783 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1784 goto out_unlock_bdev;
1785 }
1786
1787 if (q->blk_trace == NULL)
1788 ret = sprintf(buf, "disabled\n");
1789 else if (attr == &dev_attr_act_mask)
1790 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1791 else if (attr == &dev_attr_pid)
1792 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1793 else if (attr == &dev_attr_start_lba)
1794 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1795 else if (attr == &dev_attr_end_lba)
1796 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1797
1798 out_unlock_bdev:
1799 mutex_unlock(&q->blk_trace_mutex);
1800 out_bdput:
1801 bdput(bdev);
1802 out:
1803 return ret;
1804 }
1805
1806 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1807 struct device_attribute *attr,
1808 const char *buf, size_t count)
1809 {
1810 struct block_device *bdev;
1811 struct request_queue *q;
1812 struct hd_struct *p;
1813 u64 value;
1814 ssize_t ret = -EINVAL;
1815
1816 if (count == 0)
1817 goto out;
1818
1819 if (attr == &dev_attr_act_mask) {
1820 if (kstrtoull(buf, 0, &value)) {
1821 /* Assume it is a list of trace category names */
1822 ret = blk_trace_str2mask(buf);
1823 if (ret < 0)
1824 goto out;
1825 value = ret;
1826 }
1827 } else if (kstrtoull(buf, 0, &value))
1828 goto out;
1829
1830 ret = -ENXIO;
1831
1832 p = dev_to_part(dev);
1833 bdev = bdget(part_devt(p));
1834 if (bdev == NULL)
1835 goto out;
1836
1837 q = blk_trace_get_queue(bdev);
1838 if (q == NULL)
1839 goto out_bdput;
1840
1841 mutex_lock(&q->blk_trace_mutex);
1842
1843 if (attr == &dev_attr_enable) {
1844 if (value)
1845 ret = blk_trace_setup_queue(q, bdev);
1846 else
1847 ret = blk_trace_remove_queue(q);
1848 goto out_unlock_bdev;
1849 }
1850
1851 ret = 0;
1852 if (q->blk_trace == NULL)
1853 ret = blk_trace_setup_queue(q, bdev);
1854
1855 if (ret == 0) {
1856 if (attr == &dev_attr_act_mask)
1857 q->blk_trace->act_mask = value;
1858 else if (attr == &dev_attr_pid)
1859 q->blk_trace->pid = value;
1860 else if (attr == &dev_attr_start_lba)
1861 q->blk_trace->start_lba = value;
1862 else if (attr == &dev_attr_end_lba)
1863 q->blk_trace->end_lba = value;
1864 }
1865
1866 out_unlock_bdev:
1867 mutex_unlock(&q->blk_trace_mutex);
1868 out_bdput:
1869 bdput(bdev);
1870 out:
1871 return ret ? ret : count;
1872 }
1873
1874 int blk_trace_init_sysfs(struct device *dev)
1875 {
1876 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1877 }
1878
1879 void blk_trace_remove_sysfs(struct device *dev)
1880 {
1881 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1882 }
1883
1884 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1885
1886 #ifdef CONFIG_EVENT_TRACING
1887
1888 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1889 {
1890 int i = 0;
1891
1892 if (op & REQ_PREFLUSH)
1893 rwbs[i++] = 'F';
1894
1895 switch (op & REQ_OP_MASK) {
1896 case REQ_OP_WRITE:
1897 case REQ_OP_WRITE_SAME:
1898 rwbs[i++] = 'W';
1899 break;
1900 case REQ_OP_DISCARD:
1901 rwbs[i++] = 'D';
1902 break;
1903 case REQ_OP_SECURE_ERASE:
1904 rwbs[i++] = 'D';
1905 rwbs[i++] = 'E';
1906 break;
1907 case REQ_OP_FLUSH:
1908 rwbs[i++] = 'F';
1909 break;
1910 case REQ_OP_READ:
1911 rwbs[i++] = 'R';
1912 break;
1913 default:
1914 rwbs[i++] = 'N';
1915 }
1916
1917 if (op & REQ_FUA)
1918 rwbs[i++] = 'F';
1919 if (op & REQ_RAHEAD)
1920 rwbs[i++] = 'A';
1921 if (op & REQ_SYNC)
1922 rwbs[i++] = 'S';
1923 if (op & REQ_META)
1924 rwbs[i++] = 'M';
1925
1926 rwbs[i] = '\0';
1927 }
1928 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1929
1930 #endif /* CONFIG_EVENT_TRACING */
1931