]> git.ipfire.org Git - thirdparty/linux.git/blob - block/blk-stat.c
Merge tag 'selinux-pr-20200601' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / block / blk-stat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block stat tracking code
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7 #include <linux/kernel.h>
8 #include <linux/rculist.h>
9 #include <linux/blk-mq.h>
10
11 #include "blk-stat.h"
12 #include "blk-mq.h"
13 #include "blk.h"
14
15 struct blk_queue_stats {
16 struct list_head callbacks;
17 spinlock_t lock;
18 bool enable_accounting;
19 };
20
21 void blk_rq_stat_init(struct blk_rq_stat *stat)
22 {
23 stat->min = -1ULL;
24 stat->max = stat->nr_samples = stat->mean = 0;
25 stat->batch = 0;
26 }
27
28 /* src is a per-cpu stat, mean isn't initialized */
29 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30 {
31 if (!src->nr_samples)
32 return;
33
34 dst->min = min(dst->min, src->min);
35 dst->max = max(dst->max, src->max);
36
37 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38 dst->nr_samples + src->nr_samples);
39
40 dst->nr_samples += src->nr_samples;
41 }
42
43 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44 {
45 stat->min = min(stat->min, value);
46 stat->max = max(stat->max, value);
47 stat->batch += value;
48 stat->nr_samples++;
49 }
50
51 void blk_stat_add(struct request *rq, u64 now)
52 {
53 struct request_queue *q = rq->q;
54 struct blk_stat_callback *cb;
55 struct blk_rq_stat *stat;
56 int bucket, cpu;
57 u64 value;
58
59 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
60
61 blk_throtl_stat_add(rq, value);
62
63 rcu_read_lock();
64 cpu = get_cpu();
65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66 if (!blk_stat_is_active(cb))
67 continue;
68
69 bucket = cb->bucket_fn(rq);
70 if (bucket < 0)
71 continue;
72
73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
74 blk_rq_stat_add(stat, value);
75 }
76 put_cpu();
77 rcu_read_unlock();
78 }
79
80 static void blk_stat_timer_fn(struct timer_list *t)
81 {
82 struct blk_stat_callback *cb = from_timer(cb, t, timer);
83 unsigned int bucket;
84 int cpu;
85
86 for (bucket = 0; bucket < cb->buckets; bucket++)
87 blk_rq_stat_init(&cb->stat[bucket]);
88
89 for_each_online_cpu(cpu) {
90 struct blk_rq_stat *cpu_stat;
91
92 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
93 for (bucket = 0; bucket < cb->buckets; bucket++) {
94 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
95 blk_rq_stat_init(&cpu_stat[bucket]);
96 }
97 }
98
99 cb->timer_fn(cb);
100 }
101
102 struct blk_stat_callback *
103 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104 int (*bucket_fn)(const struct request *),
105 unsigned int buckets, void *data)
106 {
107 struct blk_stat_callback *cb;
108
109 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
110 if (!cb)
111 return NULL;
112
113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
114 GFP_KERNEL);
115 if (!cb->stat) {
116 kfree(cb);
117 return NULL;
118 }
119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
120 __alignof__(struct blk_rq_stat));
121 if (!cb->cpu_stat) {
122 kfree(cb->stat);
123 kfree(cb);
124 return NULL;
125 }
126
127 cb->timer_fn = timer_fn;
128 cb->bucket_fn = bucket_fn;
129 cb->data = data;
130 cb->buckets = buckets;
131 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
132
133 return cb;
134 }
135
136 void blk_stat_add_callback(struct request_queue *q,
137 struct blk_stat_callback *cb)
138 {
139 unsigned int bucket;
140 int cpu;
141
142 for_each_possible_cpu(cpu) {
143 struct blk_rq_stat *cpu_stat;
144
145 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
146 for (bucket = 0; bucket < cb->buckets; bucket++)
147 blk_rq_stat_init(&cpu_stat[bucket]);
148 }
149
150 spin_lock(&q->stats->lock);
151 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
152 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
153 spin_unlock(&q->stats->lock);
154 }
155
156 void blk_stat_remove_callback(struct request_queue *q,
157 struct blk_stat_callback *cb)
158 {
159 spin_lock(&q->stats->lock);
160 list_del_rcu(&cb->list);
161 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
163 spin_unlock(&q->stats->lock);
164
165 del_timer_sync(&cb->timer);
166 }
167
168 static void blk_stat_free_callback_rcu(struct rcu_head *head)
169 {
170 struct blk_stat_callback *cb;
171
172 cb = container_of(head, struct blk_stat_callback, rcu);
173 free_percpu(cb->cpu_stat);
174 kfree(cb->stat);
175 kfree(cb);
176 }
177
178 void blk_stat_free_callback(struct blk_stat_callback *cb)
179 {
180 if (cb)
181 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
182 }
183
184 void blk_stat_enable_accounting(struct request_queue *q)
185 {
186 spin_lock(&q->stats->lock);
187 q->stats->enable_accounting = true;
188 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
189 spin_unlock(&q->stats->lock);
190 }
191 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
192
193 struct blk_queue_stats *blk_alloc_queue_stats(void)
194 {
195 struct blk_queue_stats *stats;
196
197 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
198 if (!stats)
199 return NULL;
200
201 INIT_LIST_HEAD(&stats->callbacks);
202 spin_lock_init(&stats->lock);
203 stats->enable_accounting = false;
204
205 return stats;
206 }
207
208 void blk_free_queue_stats(struct blk_queue_stats *stats)
209 {
210 if (!stats)
211 return;
212
213 WARN_ON(!list_empty(&stats->callbacks));
214
215 kfree(stats);
216 }