]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
85e0cbbb | 12 | #include <linux/debugfs.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
d173a251 | 16 | #include "blk-mq-debugfs.h" |
2aa7745b | 17 | #include "blk-mq-sched.h" |
0bc65bd4 | 18 | #include "blk-rq-qos.h" |
87760e5e | 19 | #include "blk-wbt.h" |
672fdcf0 | 20 | #include "blk-cgroup.h" |
a7b36ee6 | 21 | #include "blk-throttle.h" |
8324aa91 JA |
22 | |
23 | struct queue_sysfs_entry { | |
24 | struct attribute attr; | |
25 | ssize_t (*show)(struct request_queue *, char *); | |
26 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
27 | }; | |
28 | ||
29 | static ssize_t | |
9cb308ce | 30 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 31 | { |
9cb308ce | 32 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
33 | } |
34 | ||
35 | static ssize_t | |
36 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
37 | { | |
b1f3b64d DR |
38 | int err; |
39 | unsigned long v; | |
40 | ||
ed751e68 | 41 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
42 | if (err || v > UINT_MAX) |
43 | return -EINVAL; | |
44 | ||
45 | *var = v; | |
8324aa91 | 46 | |
8324aa91 JA |
47 | return count; |
48 | } | |
49 | ||
50 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
51 | { | |
28af7428 | 52 | return queue_var_show(q->nr_requests, page); |
8324aa91 JA |
53 | } |
54 | ||
55 | static ssize_t | |
56 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
57 | { | |
8324aa91 | 58 | unsigned long nr; |
e3a2b3f9 | 59 | int ret, err; |
b8a9ae77 | 60 | |
344e9ffc | 61 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
62 | return -EINVAL; |
63 | ||
64 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
65 | if (ret < 0) |
66 | return ret; | |
67 | ||
8324aa91 JA |
68 | if (nr < BLKDEV_MIN_RQ) |
69 | nr = BLKDEV_MIN_RQ; | |
70 | ||
a1ce35fa | 71 | err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f9 JA |
72 | if (err) |
73 | return err; | |
74 | ||
8324aa91 JA |
75 | return ret; |
76 | } | |
77 | ||
78 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
79 | { | |
edb0872f | 80 | unsigned long ra_kb; |
8324aa91 | 81 | |
d152c682 | 82 | if (!q->disk) |
edb0872f | 83 | return -EINVAL; |
d152c682 | 84 | ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); |
8c390ff9 | 85 | return queue_var_show(ra_kb, page); |
8324aa91 JA |
86 | } |
87 | ||
88 | static ssize_t | |
89 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
90 | { | |
91 | unsigned long ra_kb; | |
edb0872f | 92 | ssize_t ret; |
8324aa91 | 93 | |
d152c682 | 94 | if (!q->disk) |
edb0872f CH |
95 | return -EINVAL; |
96 | ret = queue_var_store(&ra_kb, page, count); | |
b1f3b64d DR |
97 | if (ret < 0) |
98 | return ret; | |
d152c682 | 99 | q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
100 | return ret; |
101 | } | |
102 | ||
103 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
104 | { | |
ae03bf63 | 105 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 | 106 | |
8c390ff9 | 107 | return queue_var_show(max_sectors_kb, page); |
8324aa91 JA |
108 | } |
109 | ||
c77a5710 MP |
110 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
111 | { | |
8c390ff9 | 112 | return queue_var_show(queue_max_segments(q), page); |
c77a5710 MP |
113 | } |
114 | ||
1e739730 CH |
115 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
116 | char *page) | |
117 | { | |
8c390ff9 | 118 | return queue_var_show(queue_max_discard_segments(q), page); |
1e739730 CH |
119 | } |
120 | ||
13f05c8d MP |
121 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
122 | { | |
8c390ff9 | 123 | return queue_var_show(q->limits.max_integrity_segments, page); |
13f05c8d MP |
124 | } |
125 | ||
c77a5710 MP |
126 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
127 | { | |
8c390ff9 | 128 | return queue_var_show(queue_max_segment_size(q), page); |
c77a5710 MP |
129 | } |
130 | ||
e1defc4f | 131 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 132 | { |
e1defc4f | 133 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
134 | } |
135 | ||
c72758f3 MP |
136 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
137 | { | |
138 | return queue_var_show(queue_physical_block_size(q), page); | |
139 | } | |
140 | ||
87caf97c HR |
141 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
142 | { | |
143 | return queue_var_show(q->limits.chunk_sectors, page); | |
144 | } | |
145 | ||
c72758f3 MP |
146 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
147 | { | |
148 | return queue_var_show(queue_io_min(q), page); | |
149 | } | |
150 | ||
151 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
152 | { | |
153 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
154 | } |
155 | ||
86b37281 MP |
156 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
157 | { | |
158 | return queue_var_show(q->limits.discard_granularity, page); | |
159 | } | |
160 | ||
0034af03 JA |
161 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
162 | { | |
0034af03 | 163 | |
18f922d0 A |
164 | return sprintf(page, "%llu\n", |
165 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
166 | } |
167 | ||
86b37281 MP |
168 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
169 | { | |
a934a00a MP |
170 | return sprintf(page, "%llu\n", |
171 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
172 | } |
173 | ||
0034af03 JA |
174 | static ssize_t queue_discard_max_store(struct request_queue *q, |
175 | const char *page, size_t count) | |
176 | { | |
177 | unsigned long max_discard; | |
178 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
179 | ||
180 | if (ret < 0) | |
181 | return ret; | |
182 | ||
183 | if (max_discard & (q->limits.discard_granularity - 1)) | |
184 | return -EINVAL; | |
185 | ||
186 | max_discard >>= 9; | |
187 | if (max_discard > UINT_MAX) | |
188 | return -EINVAL; | |
189 | ||
190 | if (max_discard > q->limits.max_hw_discard_sectors) | |
191 | max_discard = q->limits.max_hw_discard_sectors; | |
192 | ||
193 | q->limits.max_discard_sectors = max_discard; | |
194 | return ret; | |
195 | } | |
196 | ||
98262f27 MP |
197 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
198 | { | |
48920ff2 | 199 | return queue_var_show(0, page); |
98262f27 MP |
200 | } |
201 | ||
4363ac7c MP |
202 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
203 | { | |
73bd66d9 | 204 | return queue_var_show(0, page); |
4363ac7c MP |
205 | } |
206 | ||
a6f0788e CK |
207 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
208 | { | |
209 | return sprintf(page, "%llu\n", | |
210 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
211 | } | |
4363ac7c | 212 | |
a805a4fa DLM |
213 | static ssize_t queue_zone_write_granularity_show(struct request_queue *q, |
214 | char *page) | |
215 | { | |
216 | return queue_var_show(queue_zone_write_granularity(q), page); | |
217 | } | |
218 | ||
0512a75b KB |
219 | static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
220 | { | |
221 | unsigned long long max_sectors = q->limits.max_zone_append_sectors; | |
222 | ||
223 | return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); | |
224 | } | |
225 | ||
8324aa91 JA |
226 | static ssize_t |
227 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
228 | { | |
c9c77418 KB |
229 | unsigned long var; |
230 | unsigned int max_sectors_kb, | |
ae03bf63 | 231 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 232 | page_kb = 1 << (PAGE_SHIFT - 10); |
c9c77418 | 233 | ssize_t ret = queue_var_store(&var, page, count); |
8324aa91 | 234 | |
b1f3b64d DR |
235 | if (ret < 0) |
236 | return ret; | |
237 | ||
c9c77418 KB |
238 | max_sectors_kb = (unsigned int)var; |
239 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, | |
ca369d51 | 240 | q->limits.max_dev_sectors >> 1); |
c9c77418 KB |
241 | if (max_sectors_kb == 0) { |
242 | q->limits.max_user_sectors = 0; | |
243 | max_sectors_kb = min(max_hw_sectors_kb, | |
244 | BLK_DEF_MAX_SECTORS >> 1); | |
245 | } else { | |
246 | if (max_sectors_kb > max_hw_sectors_kb || | |
247 | max_sectors_kb < page_kb) | |
248 | return -EINVAL; | |
249 | q->limits.max_user_sectors = max_sectors_kb << 1; | |
250 | } | |
7c239517 | 251 | |
0d945c1f | 252 | spin_lock_irq(&q->queue_lock); |
c295fc05 | 253 | q->limits.max_sectors = max_sectors_kb << 1; |
d152c682 CH |
254 | if (q->disk) |
255 | q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | |
0d945c1f | 256 | spin_unlock_irq(&q->queue_lock); |
8324aa91 JA |
257 | |
258 | return ret; | |
259 | } | |
260 | ||
261 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
262 | { | |
ae03bf63 | 263 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 | 264 | |
8c390ff9 | 265 | return queue_var_show(max_hw_sectors_kb, page); |
8324aa91 JA |
266 | } |
267 | ||
28af7428 MG |
268 | static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) |
269 | { | |
8c390ff9 | 270 | return queue_var_show(q->limits.virt_boundary_mask, page); |
28af7428 MG |
271 | } |
272 | ||
3850e13f KB |
273 | static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) |
274 | { | |
275 | return queue_var_show(queue_dma_alignment(q), page); | |
276 | } | |
277 | ||
956bcb7c JA |
278 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
279 | static ssize_t \ | |
fc93fe14 | 280 | queue_##name##_show(struct request_queue *q, char *page) \ |
956bcb7c JA |
281 | { \ |
282 | int bit; \ | |
283 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
284 | return queue_var_show(neg ? !bit : bit, page); \ | |
285 | } \ | |
286 | static ssize_t \ | |
fc93fe14 | 287 | queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
956bcb7c JA |
288 | { \ |
289 | unsigned long val; \ | |
290 | ssize_t ret; \ | |
291 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
292 | if (ret < 0) \ |
293 | return ret; \ | |
956bcb7c JA |
294 | if (neg) \ |
295 | val = !val; \ | |
296 | \ | |
956bcb7c | 297 | if (val) \ |
8814ce8a | 298 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 299 | else \ |
8814ce8a | 300 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 301 | return ret; \ |
1308835f BZ |
302 | } |
303 | ||
956bcb7c JA |
304 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
305 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
306 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
1cb039f3 | 307 | QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
956bcb7c | 308 | #undef QUEUE_SYSFS_BIT_FNS |
1308835f | 309 | |
797476b8 DLM |
310 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
311 | { | |
312 | switch (blk_queue_zoned_model(q)) { | |
313 | case BLK_ZONED_HA: | |
314 | return sprintf(page, "host-aware\n"); | |
315 | case BLK_ZONED_HM: | |
316 | return sprintf(page, "host-managed\n"); | |
317 | default: | |
318 | return sprintf(page, "none\n"); | |
319 | } | |
320 | } | |
321 | ||
965b652e DLM |
322 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
323 | { | |
d86e716a | 324 | return queue_var_show(disk_nr_zones(q->disk), page); |
965b652e DLM |
325 | } |
326 | ||
e15864f8 NC |
327 | static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
328 | { | |
1dc01720 | 329 | return queue_var_show(bdev_max_open_zones(q->disk->part0), page); |
e15864f8 NC |
330 | } |
331 | ||
659bf827 NC |
332 | static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
333 | { | |
1dc01720 | 334 | return queue_var_show(bdev_max_active_zones(q->disk->part0), page); |
659bf827 NC |
335 | } |
336 | ||
ac9fafa1 AB |
337 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
338 | { | |
488991e2 AB |
339 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
340 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
341 | } |
342 | ||
343 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
344 | size_t count) | |
345 | { | |
346 | unsigned long nm; | |
347 | ssize_t ret = queue_var_store(&nm, page, count); | |
348 | ||
b1f3b64d DR |
349 | if (ret < 0) |
350 | return ret; | |
351 | ||
57d74df9 CH |
352 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
353 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 354 | if (nm == 2) |
57d74df9 | 355 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 356 | else if (nm) |
57d74df9 | 357 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835f | 358 | |
ac9fafa1 AB |
359 | return ret; |
360 | } | |
361 | ||
c7c22e4d JA |
362 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
363 | { | |
9cb308ce | 364 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 365 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 366 | |
5757a6d7 | 367 | return queue_var_show(set << force, page); |
c7c22e4d JA |
368 | } |
369 | ||
370 | static ssize_t | |
371 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
372 | { | |
373 | ssize_t ret = -EINVAL; | |
0a06ff06 | 374 | #ifdef CONFIG_SMP |
c7c22e4d JA |
375 | unsigned long val; |
376 | ||
377 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
378 | if (ret < 0) |
379 | return ret; | |
380 | ||
e8037d49 | 381 | if (val == 2) { |
57d74df9 CH |
382 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
383 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 384 | } else if (val == 1) { |
57d74df9 CH |
385 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
386 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 387 | } else if (val == 0) { |
57d74df9 CH |
388 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
389 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 390 | } |
c7c22e4d JA |
391 | #endif |
392 | return ret; | |
393 | } | |
8324aa91 | 394 | |
06426adf JA |
395 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
396 | { | |
54bdd67d | 397 | return sprintf(page, "%d\n", -1); |
06426adf JA |
398 | } |
399 | ||
400 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
401 | size_t count) | |
402 | { | |
64f1c21e | 403 | return count; |
06426adf JA |
404 | } |
405 | ||
05229bee JA |
406 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
407 | { | |
408 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
409 | } | |
410 | ||
411 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
412 | size_t count) | |
413 | { | |
a614dd22 | 414 | if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
05229bee | 415 | return -EINVAL; |
a614dd22 CH |
416 | pr_info_ratelimited("writes to the poll attribute are ignored.\n"); |
417 | pr_info_ratelimited("please use driver specific parameters instead.\n"); | |
418 | return count; | |
05229bee JA |
419 | } |
420 | ||
65cd1d13 WZ |
421 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
422 | { | |
423 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); | |
424 | } | |
425 | ||
426 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, | |
427 | size_t count) | |
428 | { | |
429 | unsigned int val; | |
430 | int err; | |
431 | ||
432 | err = kstrtou32(page, 10, &val); | |
433 | if (err || val == 0) | |
434 | return -EINVAL; | |
435 | ||
436 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
437 | ||
438 | return count; | |
439 | } | |
440 | ||
93e9d8e8 JA |
441 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
442 | { | |
443 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
444 | return sprintf(page, "write back\n"); | |
445 | ||
446 | return sprintf(page, "write through\n"); | |
447 | } | |
448 | ||
449 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
450 | size_t count) | |
451 | { | |
43c9835b CH |
452 | if (!strncmp(page, "write back", 10)) { |
453 | if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags)) | |
454 | return -EINVAL; | |
c4e21bcd | 455 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
43c9835b CH |
456 | } else if (!strncmp(page, "write through", 13) || |
457 | !strncmp(page, "none", 4)) { | |
8814ce8a | 458 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
43c9835b | 459 | } else { |
c4e21bcd | 460 | return -EINVAL; |
43c9835b | 461 | } |
93e9d8e8 JA |
462 | |
463 | return count; | |
464 | } | |
465 | ||
6fcefbe5 KO |
466 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
467 | { | |
468 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
469 | } | |
470 | ||
ea6ca600 YK |
471 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
472 | { | |
473 | return queue_var_show(blk_queue_dax(q), page); | |
474 | } | |
475 | ||
35626147 CH |
476 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
477 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
478 | .attr = { .name = _name, .mode = 0444 }, \ | |
479 | .show = _prefix##_show, \ | |
480 | }; | |
481 | ||
482 | #define QUEUE_RW_ENTRY(_prefix, _name) \ | |
483 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
484 | .attr = { .name = _name, .mode = 0644 }, \ | |
485 | .show = _prefix##_show, \ | |
486 | .store = _prefix##_store, \ | |
487 | }; | |
488 | ||
489 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); | |
490 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); | |
491 | QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); | |
492 | QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); | |
493 | QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); | |
494 | QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); | |
495 | QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); | |
496 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); | |
497 | ||
498 | QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); | |
499 | QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); | |
500 | QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); | |
501 | QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); | |
502 | QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); | |
503 | ||
504 | QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); | |
505 | QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); | |
506 | QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); | |
507 | QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); | |
508 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); | |
509 | ||
510 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); | |
511 | QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); | |
512 | QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); | |
a805a4fa | 513 | QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); |
35626147 CH |
514 | |
515 | QUEUE_RO_ENTRY(queue_zoned, "zoned"); | |
516 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); | |
517 | QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); | |
518 | QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); | |
519 | ||
520 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); | |
521 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); | |
522 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); | |
523 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); | |
524 | QUEUE_RW_ENTRY(queue_wc, "write_cache"); | |
525 | QUEUE_RO_ENTRY(queue_fua, "fua"); | |
526 | QUEUE_RO_ENTRY(queue_dax, "dax"); | |
527 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); | |
28af7428 | 528 | QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
3850e13f | 529 | QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); |
8324aa91 | 530 | |
35626147 CH |
531 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
532 | QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); | |
533 | #endif | |
8324aa91 | 534 | |
35626147 | 535 | /* legacy alias for logical_block_size: */ |
e68b903c | 536 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 537 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
538 | .show = queue_logical_block_size_show, |
539 | }; | |
540 | ||
fc93fe14 CH |
541 | QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
542 | QUEUE_RW_ENTRY(queue_iostats, "iostats"); | |
543 | QUEUE_RW_ENTRY(queue_random, "add_random"); | |
1cb039f3 | 544 | QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
e2e1a148 | 545 | |
645a829e YK |
546 | #ifdef CONFIG_BLK_WBT |
547 | static ssize_t queue_var_store64(s64 *var, const char *page) | |
548 | { | |
549 | int err; | |
550 | s64 v; | |
551 | ||
552 | err = kstrtos64(page, 10, &v); | |
553 | if (err < 0) | |
554 | return err; | |
555 | ||
556 | *var = v; | |
557 | return 0; | |
558 | } | |
559 | ||
560 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) | |
561 | { | |
562 | if (!wbt_rq_qos(q)) | |
563 | return -EINVAL; | |
564 | ||
565 | if (wbt_disabled(q)) | |
566 | return sprintf(page, "0\n"); | |
567 | ||
568 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); | |
569 | } | |
570 | ||
571 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
572 | size_t count) | |
573 | { | |
574 | struct rq_qos *rqos; | |
575 | ssize_t ret; | |
576 | s64 val; | |
577 | ||
578 | ret = queue_var_store64(&val, page); | |
579 | if (ret < 0) | |
580 | return ret; | |
581 | if (val < -1) | |
582 | return -EINVAL; | |
583 | ||
584 | rqos = wbt_rq_qos(q); | |
585 | if (!rqos) { | |
586 | ret = wbt_init(q->disk); | |
587 | if (ret) | |
588 | return ret; | |
589 | } | |
590 | ||
591 | if (val == -1) | |
592 | val = wbt_default_latency_nsec(q); | |
593 | else if (val >= 0) | |
594 | val *= 1000ULL; | |
595 | ||
596 | if (wbt_get_min_lat(q) == val) | |
597 | return count; | |
598 | ||
599 | /* | |
600 | * Ensure that the queue is idled, in case the latency update | |
601 | * ends up either enabling or disabling wbt completely. We can't | |
602 | * have IO inflight if that happens. | |
603 | */ | |
604 | blk_mq_freeze_queue(q); | |
605 | blk_mq_quiesce_queue(q); | |
606 | ||
607 | wbt_set_min_lat(q, val); | |
608 | ||
609 | blk_mq_unquiesce_queue(q); | |
610 | blk_mq_unfreeze_queue(q); | |
611 | ||
612 | return count; | |
613 | } | |
614 | ||
615 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); | |
616 | #endif | |
617 | ||
4d25339e | 618 | static struct attribute *queue_attrs[] = { |
8324aa91 JA |
619 | &queue_ra_entry.attr, |
620 | &queue_max_hw_sectors_entry.attr, | |
621 | &queue_max_sectors_entry.attr, | |
c77a5710 | 622 | &queue_max_segments_entry.attr, |
1e739730 | 623 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 624 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 625 | &queue_max_segment_size_entry.attr, |
e68b903c | 626 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 627 | &queue_logical_block_size_entry.attr, |
c72758f3 | 628 | &queue_physical_block_size_entry.attr, |
87caf97c | 629 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
630 | &queue_io_min_entry.attr, |
631 | &queue_io_opt_entry.attr, | |
86b37281 MP |
632 | &queue_discard_granularity_entry.attr, |
633 | &queue_discard_max_entry.attr, | |
0034af03 | 634 | &queue_discard_max_hw_entry.attr, |
98262f27 | 635 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 636 | &queue_write_same_max_entry.attr, |
a6f0788e | 637 | &queue_write_zeroes_max_entry.attr, |
0512a75b | 638 | &queue_zone_append_max_entry.attr, |
a805a4fa | 639 | &queue_zone_write_granularity_entry.attr, |
1308835f | 640 | &queue_nonrot_entry.attr, |
797476b8 | 641 | &queue_zoned_entry.attr, |
965b652e | 642 | &queue_nr_zones_entry.attr, |
e15864f8 | 643 | &queue_max_open_zones_entry.attr, |
659bf827 | 644 | &queue_max_active_zones_entry.attr, |
ac9fafa1 | 645 | &queue_nomerges_entry.attr, |
bc58ba94 | 646 | &queue_iostats_entry.attr, |
1cb039f3 | 647 | &queue_stable_writes_entry.attr, |
e2e1a148 | 648 | &queue_random_entry.attr, |
05229bee | 649 | &queue_poll_entry.attr, |
93e9d8e8 | 650 | &queue_wc_entry.attr, |
6fcefbe5 | 651 | &queue_fua_entry.attr, |
ea6ca600 | 652 | &queue_dax_entry.attr, |
06426adf | 653 | &queue_poll_delay_entry.attr, |
297e3d85 | 654 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
35626147 | 655 | &blk_throtl_sample_time_entry.attr, |
297e3d85 | 656 | #endif |
28af7428 | 657 | &queue_virt_boundary_mask_entry.attr, |
3850e13f | 658 | &queue_dma_alignment_entry.attr, |
8324aa91 JA |
659 | NULL, |
660 | }; | |
661 | ||
6d85ebf9 YK |
662 | static struct attribute *blk_mq_queue_attrs[] = { |
663 | &queue_requests_entry.attr, | |
664 | &elv_iosched_entry.attr, | |
665 | &queue_rq_affinity_entry.attr, | |
666 | &queue_io_timeout_entry.attr, | |
667 | #ifdef CONFIG_BLK_WBT | |
668 | &queue_wb_lat_entry.attr, | |
669 | #endif | |
670 | NULL, | |
671 | }; | |
672 | ||
4d25339e WZ |
673 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
674 | int n) | |
675 | { | |
2bd85221 CH |
676 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
677 | struct request_queue *q = disk->queue; | |
4d25339e | 678 | |
659bf827 NC |
679 | if ((attr == &queue_max_open_zones_entry.attr || |
680 | attr == &queue_max_active_zones_entry.attr) && | |
e15864f8 NC |
681 | !blk_queue_is_zoned(q)) |
682 | return 0; | |
683 | ||
4d25339e WZ |
684 | return attr->mode; |
685 | } | |
686 | ||
6d85ebf9 YK |
687 | static umode_t blk_mq_queue_attr_visible(struct kobject *kobj, |
688 | struct attribute *attr, int n) | |
689 | { | |
690 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); | |
691 | struct request_queue *q = disk->queue; | |
692 | ||
693 | if (!queue_is_mq(q)) | |
694 | return 0; | |
695 | ||
696 | if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) | |
697 | return 0; | |
698 | ||
699 | return attr->mode; | |
700 | } | |
701 | ||
4d25339e WZ |
702 | static struct attribute_group queue_attr_group = { |
703 | .attrs = queue_attrs, | |
704 | .is_visible = queue_attr_visible, | |
705 | }; | |
706 | ||
6d85ebf9 YK |
707 | static struct attribute_group blk_mq_queue_attr_group = { |
708 | .attrs = blk_mq_queue_attrs, | |
709 | .is_visible = blk_mq_queue_attr_visible, | |
710 | }; | |
4d25339e | 711 | |
8324aa91 JA |
712 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
713 | ||
714 | static ssize_t | |
715 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
716 | { | |
717 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 CH |
718 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
719 | struct request_queue *q = disk->queue; | |
8324aa91 JA |
720 | ssize_t res; |
721 | ||
722 | if (!entry->show) | |
723 | return -EIO; | |
724 | mutex_lock(&q->sysfs_lock); | |
8324aa91 JA |
725 | res = entry->show(q, page); |
726 | mutex_unlock(&q->sysfs_lock); | |
727 | return res; | |
728 | } | |
729 | ||
730 | static ssize_t | |
731 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
732 | const char *page, size_t length) | |
733 | { | |
734 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 CH |
735 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
736 | struct request_queue *q = disk->queue; | |
8324aa91 JA |
737 | ssize_t res; |
738 | ||
739 | if (!entry->store) | |
740 | return -EIO; | |
6728cb0e | 741 | |
8324aa91 | 742 | mutex_lock(&q->sysfs_lock); |
8324aa91 JA |
743 | res = entry->store(q, page, length); |
744 | mutex_unlock(&q->sysfs_lock); | |
745 | return res; | |
746 | } | |
747 | ||
52cf25d0 | 748 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
749 | .show = queue_attr_show, |
750 | .store = queue_attr_store, | |
751 | }; | |
752 | ||
4a8d14bb CH |
753 | static const struct attribute_group *blk_queue_attr_groups[] = { |
754 | &queue_attr_group, | |
6d85ebf9 | 755 | &blk_mq_queue_attr_group, |
4a8d14bb CH |
756 | NULL |
757 | }; | |
758 | ||
2bd85221 CH |
759 | static void blk_queue_release(struct kobject *kobj) |
760 | { | |
761 | /* nothing to do here, all data is associated with the parent gendisk */ | |
762 | } | |
763 | ||
5f622417 | 764 | static const struct kobj_type blk_queue_ktype = { |
4a8d14bb | 765 | .default_groups = blk_queue_attr_groups, |
8324aa91 | 766 | .sysfs_ops = &queue_sysfs_ops, |
2bd85221 | 767 | .release = blk_queue_release, |
8324aa91 JA |
768 | }; |
769 | ||
6fc75f30 CH |
770 | static void blk_debugfs_remove(struct gendisk *disk) |
771 | { | |
772 | struct request_queue *q = disk->queue; | |
773 | ||
774 | mutex_lock(&q->debugfs_mutex); | |
775 | blk_trace_shutdown(q); | |
776 | debugfs_remove_recursive(q->debugfs_dir); | |
777 | q->debugfs_dir = NULL; | |
778 | q->sched_debugfs_dir = NULL; | |
779 | q->rqos_debugfs_dir = NULL; | |
780 | mutex_unlock(&q->debugfs_mutex); | |
781 | } | |
782 | ||
2c2086af BVA |
783 | /** |
784 | * blk_register_queue - register a block layer queue with sysfs | |
785 | * @disk: Disk of which the request queue should be registered with sysfs. | |
786 | */ | |
8324aa91 JA |
787 | int blk_register_queue(struct gendisk *disk) |
788 | { | |
8324aa91 | 789 | struct request_queue *q = disk->queue; |
8682b92e | 790 | int ret; |
8324aa91 | 791 | |
cecf5d87 | 792 | mutex_lock(&q->sysfs_dir_lock); |
2bd85221 CH |
793 | kobject_init(&disk->queue_kobj, &blk_queue_ktype); |
794 | ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); | |
cc5c516d | 795 | if (ret < 0) |
2bd85221 | 796 | goto out_put_queue_kobj; |
8324aa91 | 797 | |
40602997 CH |
798 | if (queue_is_mq(q)) { |
799 | ret = blk_mq_sysfs_register(disk); | |
800 | if (ret) | |
2bd85221 | 801 | goto out_put_queue_kobj; |
40602997 | 802 | } |
5cf9c91b CH |
803 | mutex_lock(&q->sysfs_lock); |
804 | ||
85e0cbbb | 805 | mutex_lock(&q->debugfs_mutex); |
2bd85221 | 806 | q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); |
5cf9c91b | 807 | if (queue_is_mq(q)) |
a8ecdd71 | 808 | blk_mq_debugfs_register(q); |
5cf9c91b | 809 | mutex_unlock(&q->debugfs_mutex); |
a2247f19 | 810 | |
22d0c408 | 811 | ret = disk_register_independent_access_ranges(disk); |
a2247f19 | 812 | if (ret) |
40602997 | 813 | goto out_debugfs_remove; |
a2247f19 | 814 | |
344e9ffc | 815 | if (q->elevator) { |
cecf5d87 | 816 | ret = elv_register_queue(q, false); |
a2247f19 | 817 | if (ret) |
40602997 | 818 | goto out_unregister_ia_ranges; |
8324aa91 | 819 | } |
cecf5d87 | 820 | |
450deb93 | 821 | ret = blk_crypto_sysfs_register(disk); |
20f01f16 | 822 | if (ret) |
40602997 | 823 | goto out_elv_unregister; |
20f01f16 | 824 | |
cecf5d87 | 825 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
04aad37b | 826 | wbt_enable_default(disk); |
5f6dc752 | 827 | blk_throtl_register(disk); |
cecf5d87 ML |
828 | |
829 | /* Now everything is ready and send out KOBJ_ADD uevent */ | |
2bd85221 | 830 | kobject_uevent(&disk->queue_kobj, KOBJ_ADD); |
0546858c | 831 | if (q->elevator) |
cecf5d87 ML |
832 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
833 | mutex_unlock(&q->sysfs_lock); | |
cecf5d87 | 834 | mutex_unlock(&q->sysfs_dir_lock); |
a72c374f ML |
835 | |
836 | /* | |
837 | * SCSI probing may synchronously create and destroy a lot of | |
838 | * request_queues for non-existent devices. Shutting down a fully | |
839 | * functional queue takes measureable wallclock time as RCU grace | |
840 | * periods are involved. To avoid excessive latency in these | |
841 | * cases, a request_queue starts out in a degraded mode which is | |
842 | * faster to shut down and is made fully functional here as | |
843 | * request_queues for non-existent devices never get registered. | |
844 | */ | |
845 | if (!blk_queue_init_done(q)) { | |
846 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); | |
847 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
848 | } | |
849 | ||
a2247f19 DLM |
850 | return ret; |
851 | ||
40602997 | 852 | out_elv_unregister: |
20f01f16 | 853 | elv_unregister_queue(q); |
40602997 | 854 | out_unregister_ia_ranges: |
a2247f19 | 855 | disk_unregister_independent_access_ranges(disk); |
40602997 CH |
856 | out_debugfs_remove: |
857 | blk_debugfs_remove(disk); | |
a2247f19 | 858 | mutex_unlock(&q->sysfs_lock); |
2bd85221 CH |
859 | out_put_queue_kobj: |
860 | kobject_put(&disk->queue_kobj); | |
40602997 | 861 | mutex_unlock(&q->sysfs_dir_lock); |
b410aff2 | 862 | return ret; |
8324aa91 JA |
863 | } |
864 | ||
2c2086af BVA |
865 | /** |
866 | * blk_unregister_queue - counterpart of blk_register_queue() | |
867 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
868 | * | |
869 | * Note: the caller is responsible for guaranteeing that this function is called | |
870 | * after blk_register_queue() has finished. | |
871 | */ | |
8324aa91 JA |
872 | void blk_unregister_queue(struct gendisk *disk) |
873 | { | |
874 | struct request_queue *q = disk->queue; | |
875 | ||
fb199746 AM |
876 | if (WARN_ON(!q)) |
877 | return; | |
878 | ||
fa70d2e2 | 879 | /* Return early if disk->queue was never registered. */ |
58c898ba | 880 | if (!blk_queue_registered(q)) |
fa70d2e2 MS |
881 | return; |
882 | ||
667257e8 | 883 | /* |
2c2086af BVA |
884 | * Since sysfs_remove_dir() prevents adding new directory entries |
885 | * before removal of existing entries starts, protect against | |
886 | * concurrent elv_iosched_store() calls. | |
667257e8 | 887 | */ |
e9a823fb | 888 | mutex_lock(&q->sysfs_lock); |
8814ce8a | 889 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 | 890 | mutex_unlock(&q->sysfs_lock); |
02ba8893 | 891 | |
cecf5d87 | 892 | mutex_lock(&q->sysfs_dir_lock); |
2c2086af BVA |
893 | /* |
894 | * Remove the sysfs attributes before unregistering the queue data | |
895 | * structures that can be modified through sysfs. | |
896 | */ | |
344e9ffc | 897 | if (queue_is_mq(q)) |
8682b92e | 898 | blk_mq_sysfs_unregister(disk); |
450deb93 | 899 | blk_crypto_sysfs_unregister(disk); |
667257e8 | 900 | |
b89f625e | 901 | mutex_lock(&q->sysfs_lock); |
f5ec592d | 902 | elv_unregister_queue(q); |
a2247f19 | 903 | disk_unregister_independent_access_ranges(disk); |
b89f625e | 904 | mutex_unlock(&q->sysfs_lock); |
0f692882 EB |
905 | |
906 | /* Now that we've deleted all child objects, we can delete the queue. */ | |
2bd85221 CH |
907 | kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); |
908 | kobject_del(&disk->queue_kobj); | |
cecf5d87 | 909 | mutex_unlock(&q->sysfs_dir_lock); |
2c2086af | 910 | |
6fc75f30 | 911 | blk_debugfs_remove(disk); |
8324aa91 | 912 | } |