]> git.ipfire.org Git - thirdparty/linux.git/blob - block/blk-stat.c
io_uring: fix CQ overflow condition
[thirdparty/linux.git] / block / blk-stat.c
1 /*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
9
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13
14 struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
17 bool enable_accounting;
18 };
19
20 void blk_rq_stat_init(struct blk_rq_stat *stat)
21 {
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
24 stat->batch = 0;
25 }
26
27 /* src is a per-cpu stat, mean isn't initialized */
28 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29 {
30 if (!src->nr_samples)
31 return;
32
33 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
39 dst->nr_samples += src->nr_samples;
40 }
41
42 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43 {
44 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
46 stat->batch += value;
47 stat->nr_samples++;
48 }
49
50 void blk_stat_add(struct request *rq, u64 now)
51 {
52 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket;
56 u64 value;
57
58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59
60 blk_throtl_stat_add(rq, value);
61
62 rcu_read_lock();
63 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
64 if (!blk_stat_is_active(cb))
65 continue;
66
67 bucket = cb->bucket_fn(rq);
68 if (bucket < 0)
69 continue;
70
71 stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
72 blk_rq_stat_add(stat, value);
73 put_cpu_ptr(cb->cpu_stat);
74 }
75 rcu_read_unlock();
76 }
77
78 static void blk_stat_timer_fn(struct timer_list *t)
79 {
80 struct blk_stat_callback *cb = from_timer(cb, t, timer);
81 unsigned int bucket;
82 int cpu;
83
84 for (bucket = 0; bucket < cb->buckets; bucket++)
85 blk_rq_stat_init(&cb->stat[bucket]);
86
87 for_each_online_cpu(cpu) {
88 struct blk_rq_stat *cpu_stat;
89
90 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
91 for (bucket = 0; bucket < cb->buckets; bucket++) {
92 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
93 blk_rq_stat_init(&cpu_stat[bucket]);
94 }
95 }
96
97 cb->timer_fn(cb);
98 }
99
100 struct blk_stat_callback *
101 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
102 int (*bucket_fn)(const struct request *),
103 unsigned int buckets, void *data)
104 {
105 struct blk_stat_callback *cb;
106
107 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
108 if (!cb)
109 return NULL;
110
111 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
112 GFP_KERNEL);
113 if (!cb->stat) {
114 kfree(cb);
115 return NULL;
116 }
117 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
118 __alignof__(struct blk_rq_stat));
119 if (!cb->cpu_stat) {
120 kfree(cb->stat);
121 kfree(cb);
122 return NULL;
123 }
124
125 cb->timer_fn = timer_fn;
126 cb->bucket_fn = bucket_fn;
127 cb->data = data;
128 cb->buckets = buckets;
129 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
130
131 return cb;
132 }
133
134 void blk_stat_add_callback(struct request_queue *q,
135 struct blk_stat_callback *cb)
136 {
137 unsigned int bucket;
138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 struct blk_rq_stat *cpu_stat;
142
143 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
144 for (bucket = 0; bucket < cb->buckets; bucket++)
145 blk_rq_stat_init(&cpu_stat[bucket]);
146 }
147
148 spin_lock(&q->stats->lock);
149 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
150 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
151 spin_unlock(&q->stats->lock);
152 }
153
154 void blk_stat_remove_callback(struct request_queue *q,
155 struct blk_stat_callback *cb)
156 {
157 spin_lock(&q->stats->lock);
158 list_del_rcu(&cb->list);
159 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
160 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
161 spin_unlock(&q->stats->lock);
162
163 del_timer_sync(&cb->timer);
164 }
165
166 static void blk_stat_free_callback_rcu(struct rcu_head *head)
167 {
168 struct blk_stat_callback *cb;
169
170 cb = container_of(head, struct blk_stat_callback, rcu);
171 free_percpu(cb->cpu_stat);
172 kfree(cb->stat);
173 kfree(cb);
174 }
175
176 void blk_stat_free_callback(struct blk_stat_callback *cb)
177 {
178 if (cb)
179 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
180 }
181
182 void blk_stat_enable_accounting(struct request_queue *q)
183 {
184 spin_lock(&q->stats->lock);
185 q->stats->enable_accounting = true;
186 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
187 spin_unlock(&q->stats->lock);
188 }
189 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
190
191 struct blk_queue_stats *blk_alloc_queue_stats(void)
192 {
193 struct blk_queue_stats *stats;
194
195 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
196 if (!stats)
197 return NULL;
198
199 INIT_LIST_HEAD(&stats->callbacks);
200 spin_lock_init(&stats->lock);
201 stats->enable_accounting = false;
202
203 return stats;
204 }
205
206 void blk_free_queue_stats(struct blk_queue_stats *stats)
207 {
208 if (!stats)
209 return;
210
211 WARN_ON(!list_empty(&stats->callbacks));
212
213 kfree(stats);
214 }