]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Functions related to io context handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/sched/task.h> | |
12 | ||
13 | #include "blk.h" | |
14 | ||
15 | /* | |
16 | * For io context allocations | |
17 | */ | |
18 | static struct kmem_cache *iocontext_cachep; | |
19 | ||
20 | /** | |
21 | * get_io_context - increment reference count to io_context | |
22 | * @ioc: io_context to get | |
23 | * | |
24 | * Increment reference count to @ioc. | |
25 | */ | |
26 | void get_io_context(struct io_context *ioc) | |
27 | { | |
28 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | |
29 | atomic_long_inc(&ioc->refcount); | |
30 | } | |
31 | ||
32 | static void icq_free_icq_rcu(struct rcu_head *head) | |
33 | { | |
34 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | |
35 | ||
36 | kmem_cache_free(icq->__rcu_icq_cache, icq); | |
37 | } | |
38 | ||
39 | /* | |
40 | * Exit an icq. Called with ioc locked for blk-mq, and with both ioc | |
41 | * and queue locked for legacy. | |
42 | */ | |
43 | static void ioc_exit_icq(struct io_cq *icq) | |
44 | { | |
45 | struct elevator_type *et = icq->q->elevator->type; | |
46 | ||
47 | if (icq->flags & ICQ_EXITED) | |
48 | return; | |
49 | ||
50 | if (et->ops.exit_icq) | |
51 | et->ops.exit_icq(icq); | |
52 | ||
53 | icq->flags |= ICQ_EXITED; | |
54 | } | |
55 | ||
56 | /* | |
57 | * Release an icq. Called with ioc locked for blk-mq, and with both ioc | |
58 | * and queue locked for legacy. | |
59 | */ | |
60 | static void ioc_destroy_icq(struct io_cq *icq) | |
61 | { | |
62 | struct io_context *ioc = icq->ioc; | |
63 | struct request_queue *q = icq->q; | |
64 | struct elevator_type *et = q->elevator->type; | |
65 | ||
66 | lockdep_assert_held(&ioc->lock); | |
67 | ||
68 | radix_tree_delete(&ioc->icq_tree, icq->q->id); | |
69 | hlist_del_init(&icq->ioc_node); | |
70 | list_del_init(&icq->q_node); | |
71 | ||
72 | /* | |
73 | * Both setting lookup hint to and clearing it from @icq are done | |
74 | * under queue_lock. If it's not pointing to @icq now, it never | |
75 | * will. Hint assignment itself can race safely. | |
76 | */ | |
77 | if (rcu_access_pointer(ioc->icq_hint) == icq) | |
78 | rcu_assign_pointer(ioc->icq_hint, NULL); | |
79 | ||
80 | ioc_exit_icq(icq); | |
81 | ||
82 | /* | |
83 | * @icq->q might have gone away by the time RCU callback runs | |
84 | * making it impossible to determine icq_cache. Record it in @icq. | |
85 | */ | |
86 | icq->__rcu_icq_cache = et->icq_cache; | |
87 | call_rcu(&icq->__rcu_head, icq_free_icq_rcu); | |
88 | } | |
89 | ||
90 | /* | |
91 | * Slow path for ioc release in put_io_context(). Performs double-lock | |
92 | * dancing to unlink all icq's and then frees ioc. | |
93 | */ | |
94 | static void ioc_release_fn(struct work_struct *work) | |
95 | { | |
96 | struct io_context *ioc = container_of(work, struct io_context, | |
97 | release_work); | |
98 | unsigned long flags; | |
99 | ||
100 | /* | |
101 | * Exiting icq may call into put_io_context() through elevator | |
102 | * which will trigger lockdep warning. The ioc's are guaranteed to | |
103 | * be different, use a different locking subclass here. Use | |
104 | * irqsave variant as there's no spin_lock_irq_nested(). | |
105 | */ | |
106 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | |
107 | ||
108 | while (!hlist_empty(&ioc->icq_list)) { | |
109 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, | |
110 | struct io_cq, ioc_node); | |
111 | struct request_queue *q = icq->q; | |
112 | ||
113 | if (spin_trylock(&q->queue_lock)) { | |
114 | ioc_destroy_icq(icq); | |
115 | spin_unlock(&q->queue_lock); | |
116 | } else { | |
117 | spin_unlock_irqrestore(&ioc->lock, flags); | |
118 | cpu_relax(); | |
119 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | |
120 | } | |
121 | } | |
122 | ||
123 | spin_unlock_irqrestore(&ioc->lock, flags); | |
124 | ||
125 | kmem_cache_free(iocontext_cachep, ioc); | |
126 | } | |
127 | ||
128 | /** | |
129 | * put_io_context - put a reference of io_context | |
130 | * @ioc: io_context to put | |
131 | * | |
132 | * Decrement reference count of @ioc and release it if the count reaches | |
133 | * zero. | |
134 | */ | |
135 | void put_io_context(struct io_context *ioc) | |
136 | { | |
137 | unsigned long flags; | |
138 | bool free_ioc = false; | |
139 | ||
140 | if (ioc == NULL) | |
141 | return; | |
142 | ||
143 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | |
144 | ||
145 | /* | |
146 | * Releasing ioc requires reverse order double locking and we may | |
147 | * already be holding a queue_lock. Do it asynchronously from wq. | |
148 | */ | |
149 | if (atomic_long_dec_and_test(&ioc->refcount)) { | |
150 | spin_lock_irqsave(&ioc->lock, flags); | |
151 | if (!hlist_empty(&ioc->icq_list)) | |
152 | queue_work(system_power_efficient_wq, | |
153 | &ioc->release_work); | |
154 | else | |
155 | free_ioc = true; | |
156 | spin_unlock_irqrestore(&ioc->lock, flags); | |
157 | } | |
158 | ||
159 | if (free_ioc) | |
160 | kmem_cache_free(iocontext_cachep, ioc); | |
161 | } | |
162 | ||
163 | /** | |
164 | * put_io_context_active - put active reference on ioc | |
165 | * @ioc: ioc of interest | |
166 | * | |
167 | * Undo get_io_context_active(). If active reference reaches zero after | |
168 | * put, @ioc can never issue further IOs and ioscheds are notified. | |
169 | */ | |
170 | void put_io_context_active(struct io_context *ioc) | |
171 | { | |
172 | unsigned long flags; | |
173 | struct io_cq *icq; | |
174 | ||
175 | if (!atomic_dec_and_test(&ioc->active_ref)) { | |
176 | put_io_context(ioc); | |
177 | return; | |
178 | } | |
179 | ||
180 | /* | |
181 | * Need ioc lock to walk icq_list and q lock to exit icq. Perform | |
182 | * reverse double locking. Read comment in ioc_release_fn() for | |
183 | * explanation on the nested locking annotation. | |
184 | */ | |
185 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | |
186 | hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { | |
187 | if (icq->flags & ICQ_EXITED) | |
188 | continue; | |
189 | ||
190 | ioc_exit_icq(icq); | |
191 | } | |
192 | spin_unlock_irqrestore(&ioc->lock, flags); | |
193 | ||
194 | put_io_context(ioc); | |
195 | } | |
196 | ||
197 | /* Called by the exiting task */ | |
198 | void exit_io_context(struct task_struct *task) | |
199 | { | |
200 | struct io_context *ioc; | |
201 | ||
202 | task_lock(task); | |
203 | ioc = task->io_context; | |
204 | task->io_context = NULL; | |
205 | task_unlock(task); | |
206 | ||
207 | atomic_dec(&ioc->nr_tasks); | |
208 | put_io_context_active(ioc); | |
209 | } | |
210 | ||
211 | static void __ioc_clear_queue(struct list_head *icq_list) | |
212 | { | |
213 | unsigned long flags; | |
214 | ||
215 | while (!list_empty(icq_list)) { | |
216 | struct io_cq *icq = list_entry(icq_list->next, | |
217 | struct io_cq, q_node); | |
218 | struct io_context *ioc = icq->ioc; | |
219 | ||
220 | spin_lock_irqsave(&ioc->lock, flags); | |
221 | ioc_destroy_icq(icq); | |
222 | spin_unlock_irqrestore(&ioc->lock, flags); | |
223 | } | |
224 | } | |
225 | ||
226 | /** | |
227 | * ioc_clear_queue - break any ioc association with the specified queue | |
228 | * @q: request_queue being cleared | |
229 | * | |
230 | * Walk @q->icq_list and exit all io_cq's. | |
231 | */ | |
232 | void ioc_clear_queue(struct request_queue *q) | |
233 | { | |
234 | LIST_HEAD(icq_list); | |
235 | ||
236 | spin_lock_irq(&q->queue_lock); | |
237 | list_splice_init(&q->icq_list, &icq_list); | |
238 | spin_unlock_irq(&q->queue_lock); | |
239 | ||
240 | __ioc_clear_queue(&icq_list); | |
241 | } | |
242 | ||
243 | int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) | |
244 | { | |
245 | struct io_context *ioc; | |
246 | int ret; | |
247 | ||
248 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, | |
249 | node); | |
250 | if (unlikely(!ioc)) | |
251 | return -ENOMEM; | |
252 | ||
253 | /* initialize */ | |
254 | atomic_long_set(&ioc->refcount, 1); | |
255 | atomic_set(&ioc->nr_tasks, 1); | |
256 | atomic_set(&ioc->active_ref, 1); | |
257 | spin_lock_init(&ioc->lock); | |
258 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); | |
259 | INIT_HLIST_HEAD(&ioc->icq_list); | |
260 | INIT_WORK(&ioc->release_work, ioc_release_fn); | |
261 | ||
262 | /* | |
263 | * Try to install. ioc shouldn't be installed if someone else | |
264 | * already did or @task, which isn't %current, is exiting. Note | |
265 | * that we need to allow ioc creation on exiting %current as exit | |
266 | * path may issue IOs from e.g. exit_files(). The exit path is | |
267 | * responsible for not issuing IO after exit_io_context(). | |
268 | */ | |
269 | task_lock(task); | |
270 | if (!task->io_context && | |
271 | (task == current || !(task->flags & PF_EXITING))) | |
272 | task->io_context = ioc; | |
273 | else | |
274 | kmem_cache_free(iocontext_cachep, ioc); | |
275 | ||
276 | ret = task->io_context ? 0 : -EBUSY; | |
277 | ||
278 | task_unlock(task); | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
283 | /** | |
284 | * get_task_io_context - get io_context of a task | |
285 | * @task: task of interest | |
286 | * @gfp_flags: allocation flags, used if allocation is necessary | |
287 | * @node: allocation node, used if allocation is necessary | |
288 | * | |
289 | * Return io_context of @task. If it doesn't exist, it is created with | |
290 | * @gfp_flags and @node. The returned io_context has its reference count | |
291 | * incremented. | |
292 | * | |
293 | * This function always goes through task_lock() and it's better to use | |
294 | * %current->io_context + get_io_context() for %current. | |
295 | */ | |
296 | struct io_context *get_task_io_context(struct task_struct *task, | |
297 | gfp_t gfp_flags, int node) | |
298 | { | |
299 | struct io_context *ioc; | |
300 | ||
301 | might_sleep_if(gfpflags_allow_blocking(gfp_flags)); | |
302 | ||
303 | do { | |
304 | task_lock(task); | |
305 | ioc = task->io_context; | |
306 | if (likely(ioc)) { | |
307 | get_io_context(ioc); | |
308 | task_unlock(task); | |
309 | return ioc; | |
310 | } | |
311 | task_unlock(task); | |
312 | } while (!create_task_io_context(task, gfp_flags, node)); | |
313 | ||
314 | return NULL; | |
315 | } | |
316 | ||
317 | /** | |
318 | * ioc_lookup_icq - lookup io_cq from ioc | |
319 | * @ioc: the associated io_context | |
320 | * @q: the associated request_queue | |
321 | * | |
322 | * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called | |
323 | * with @q->queue_lock held. | |
324 | */ | |
325 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) | |
326 | { | |
327 | struct io_cq *icq; | |
328 | ||
329 | lockdep_assert_held(&q->queue_lock); | |
330 | ||
331 | /* | |
332 | * icq's are indexed from @ioc using radix tree and hint pointer, | |
333 | * both of which are protected with RCU. All removals are done | |
334 | * holding both q and ioc locks, and we're holding q lock - if we | |
335 | * find a icq which points to us, it's guaranteed to be valid. | |
336 | */ | |
337 | rcu_read_lock(); | |
338 | icq = rcu_dereference(ioc->icq_hint); | |
339 | if (icq && icq->q == q) | |
340 | goto out; | |
341 | ||
342 | icq = radix_tree_lookup(&ioc->icq_tree, q->id); | |
343 | if (icq && icq->q == q) | |
344 | rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ | |
345 | else | |
346 | icq = NULL; | |
347 | out: | |
348 | rcu_read_unlock(); | |
349 | return icq; | |
350 | } | |
351 | EXPORT_SYMBOL(ioc_lookup_icq); | |
352 | ||
353 | /** | |
354 | * ioc_create_icq - create and link io_cq | |
355 | * @ioc: io_context of interest | |
356 | * @q: request_queue of interest | |
357 | * @gfp_mask: allocation mask | |
358 | * | |
359 | * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they | |
360 | * will be created using @gfp_mask. | |
361 | * | |
362 | * The caller is responsible for ensuring @ioc won't go away and @q is | |
363 | * alive and will stay alive until this function returns. | |
364 | */ | |
365 | struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | |
366 | gfp_t gfp_mask) | |
367 | { | |
368 | struct elevator_type *et = q->elevator->type; | |
369 | struct io_cq *icq; | |
370 | ||
371 | /* allocate stuff */ | |
372 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, | |
373 | q->node); | |
374 | if (!icq) | |
375 | return NULL; | |
376 | ||
377 | if (radix_tree_maybe_preload(gfp_mask) < 0) { | |
378 | kmem_cache_free(et->icq_cache, icq); | |
379 | return NULL; | |
380 | } | |
381 | ||
382 | icq->ioc = ioc; | |
383 | icq->q = q; | |
384 | INIT_LIST_HEAD(&icq->q_node); | |
385 | INIT_HLIST_NODE(&icq->ioc_node); | |
386 | ||
387 | /* lock both q and ioc and try to link @icq */ | |
388 | spin_lock_irq(&q->queue_lock); | |
389 | spin_lock(&ioc->lock); | |
390 | ||
391 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { | |
392 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); | |
393 | list_add(&icq->q_node, &q->icq_list); | |
394 | if (et->ops.init_icq) | |
395 | et->ops.init_icq(icq); | |
396 | } else { | |
397 | kmem_cache_free(et->icq_cache, icq); | |
398 | icq = ioc_lookup_icq(ioc, q); | |
399 | if (!icq) | |
400 | printk(KERN_ERR "cfq: icq link failed!\n"); | |
401 | } | |
402 | ||
403 | spin_unlock(&ioc->lock); | |
404 | spin_unlock_irq(&q->queue_lock); | |
405 | radix_tree_preload_end(); | |
406 | return icq; | |
407 | } | |
408 | ||
409 | static int __init blk_ioc_init(void) | |
410 | { | |
411 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | |
412 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | |
413 | return 0; | |
414 | } | |
415 | subsys_initcall(blk_ioc_init); |