]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_defer.c
xfs: reset dfops to initial state after finish
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_defer.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "libxfs_priv.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_trans.h"
17 #include "xfs_trace.h"
18
19 /*
20 * Deferred Operations in XFS
21 *
22 * Due to the way locking rules work in XFS, certain transactions (block
23 * mapping and unmapping, typically) have permanent reservations so that
24 * we can roll the transaction to adhere to AG locking order rules and
25 * to unlock buffers between metadata updates. Prior to rmap/reflink,
26 * the mapping code had a mechanism to perform these deferrals for
27 * extents that were going to be freed; this code makes that facility
28 * more generic.
29 *
30 * When adding the reverse mapping and reflink features, it became
31 * necessary to perform complex remapping multi-transactions to comply
32 * with AG locking order rules, and to be able to spread a single
33 * refcount update operation (an operation on an n-block extent can
34 * update as many as n records!) among multiple transactions. XFS can
35 * roll a transaction to facilitate this, but using this facility
36 * requires us to log "intent" items in case log recovery needs to
37 * redo the operation, and to log "done" items to indicate that redo
38 * is not necessary.
39 *
40 * Deferred work is tracked in xfs_defer_pending items. Each pending
41 * item tracks one type of deferred work. Incoming work items (which
42 * have not yet had an intent logged) are attached to a pending item
43 * on the dop_intake list, where they wait for the caller to finish
44 * the deferred operations.
45 *
46 * Finishing a set of deferred operations is an involved process. To
47 * start, we define "rolling a deferred-op transaction" as follows:
48 *
49 * > For each xfs_defer_pending item on the dop_intake list,
50 * - Sort the work items in AG order. XFS locking
51 * order rules require us to lock buffers in AG order.
52 * - Create a log intent item for that type.
53 * - Attach it to the pending item.
54 * - Move the pending item from the dop_intake list to the
55 * dop_pending list.
56 * > Roll the transaction.
57 *
58 * NOTE: To avoid exceeding the transaction reservation, we limit the
59 * number of items that we attach to a given xfs_defer_pending.
60 *
61 * The actual finishing process looks like this:
62 *
63 * > For each xfs_defer_pending in the dop_pending list,
64 * - Roll the deferred-op transaction as above.
65 * - Create a log done item for that type, and attach it to the
66 * log intent item.
67 * - For each work item attached to the log intent item,
68 * * Perform the described action.
69 * * Attach the work item to the log done item.
70 * * If the result of doing the work was -EAGAIN, ->finish work
71 * wants a new transaction. See the "Requesting a Fresh
72 * Transaction while Finishing Deferred Work" section below for
73 * details.
74 *
75 * The key here is that we must log an intent item for all pending
76 * work items every time we roll the transaction, and that we must log
77 * a done item as soon as the work is completed. With this mechanism
78 * we can perform complex remapping operations, chaining intent items
79 * as needed.
80 *
81 * Requesting a Fresh Transaction while Finishing Deferred Work
82 *
83 * If ->finish_item decides that it needs a fresh transaction to
84 * finish the work, it must ask its caller (xfs_defer_finish) for a
85 * continuation. The most likely cause of this circumstance are the
86 * refcount adjust functions deciding that they've logged enough items
87 * to be at risk of exceeding the transaction reservation.
88 *
89 * To get a fresh transaction, we want to log the existing log done
90 * item to prevent the log intent item from replaying, immediately log
91 * a new log intent item with the unfinished work items, roll the
92 * transaction, and re-call ->finish_item wherever it left off. The
93 * log done item and the new log intent item must be in the same
94 * transaction or atomicity cannot be guaranteed; defer_finish ensures
95 * that this happens.
96 *
97 * This requires some coordination between ->finish_item and
98 * defer_finish. Upon deciding to request a new transaction,
99 * ->finish_item should update the current work item to reflect the
100 * unfinished work. Next, it should reset the log done item's list
101 * count to the number of items finished, and return -EAGAIN.
102 * defer_finish sees the -EAGAIN, logs the new log intent item
103 * with the remaining work items, and leaves the xfs_defer_pending
104 * item at the head of the dop_work queue. Then it rolls the
105 * transaction and picks up processing where it left off. It is
106 * required that ->finish_item must be careful to leave enough
107 * transaction reservation to fit the new log intent item.
108 *
109 * This is an example of remapping the extent (E, E+B) into file X at
110 * offset A and dealing with the extent (C, C+B) already being mapped
111 * there:
112 * +-------------------------------------------------+
113 * | Unmap file X startblock C offset A length B | t0
114 * | Intent to reduce refcount for extent (C, B) |
115 * | Intent to remove rmap (X, C, A, B) |
116 * | Intent to free extent (D, 1) (bmbt block) |
117 * | Intent to map (X, A, B) at startblock E |
118 * +-------------------------------------------------+
119 * | Map file X startblock E offset A length B | t1
120 * | Done mapping (X, E, A, B) |
121 * | Intent to increase refcount for extent (E, B) |
122 * | Intent to add rmap (X, E, A, B) |
123 * +-------------------------------------------------+
124 * | Reduce refcount for extent (C, B) | t2
125 * | Done reducing refcount for extent (C, 9) |
126 * | Intent to reduce refcount for extent (C+9, B-9) |
127 * | (ran out of space after 9 refcount updates) |
128 * +-------------------------------------------------+
129 * | Reduce refcount for extent (C+9, B+9) | t3
130 * | Done reducing refcount for extent (C+9, B-9) |
131 * | Increase refcount for extent (E, B) |
132 * | Done increasing refcount for extent (E, B) |
133 * | Intent to free extent (C, B) |
134 * | Intent to free extent (F, 1) (refcountbt block) |
135 * | Intent to remove rmap (F, 1, REFC) |
136 * +-------------------------------------------------+
137 * | Remove rmap (X, C, A, B) | t4
138 * | Done removing rmap (X, C, A, B) |
139 * | Add rmap (X, E, A, B) |
140 * | Done adding rmap (X, E, A, B) |
141 * | Remove rmap (F, 1, REFC) |
142 * | Done removing rmap (F, 1, REFC) |
143 * +-------------------------------------------------+
144 * | Free extent (C, B) | t5
145 * | Done freeing extent (C, B) |
146 * | Free extent (D, 1) |
147 * | Done freeing extent (D, 1) |
148 * | Free extent (F, 1) |
149 * | Done freeing extent (F, 1) |
150 * +-------------------------------------------------+
151 *
152 * If we should crash before t2 commits, log recovery replays
153 * the following intent items:
154 *
155 * - Intent to reduce refcount for extent (C, B)
156 * - Intent to remove rmap (X, C, A, B)
157 * - Intent to free extent (D, 1) (bmbt block)
158 * - Intent to increase refcount for extent (E, B)
159 * - Intent to add rmap (X, E, A, B)
160 *
161 * In the process of recovering, it should also generate and take care
162 * of these intent items:
163 *
164 * - Intent to free extent (C, B)
165 * - Intent to free extent (F, 1) (refcountbt block)
166 * - Intent to remove rmap (F, 1, REFC)
167 *
168 * Note that the continuation requested between t2 and t3 is likely to
169 * reoccur.
170 */
171
172 static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
173
174 /*
175 * For each pending item in the intake list, log its intent item and the
176 * associated extents, then add the entire intake list to the end of
177 * the pending list.
178 */
179 STATIC void
180 xfs_defer_intake_work(
181 struct xfs_trans *tp,
182 struct xfs_defer_ops *dop)
183 {
184 struct list_head *li;
185 struct xfs_defer_pending *dfp;
186
187 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
188 dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
189 dfp->dfp_count);
190 trace_xfs_defer_intake_work(tp->t_mountp, dfp);
191 list_sort(tp->t_mountp, &dfp->dfp_work,
192 dfp->dfp_type->diff_items);
193 list_for_each(li, &dfp->dfp_work)
194 dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
195 }
196
197 list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
198 }
199
200 /* Abort all the intents that were committed. */
201 STATIC void
202 xfs_defer_trans_abort(
203 struct xfs_trans *tp,
204 struct xfs_defer_ops *dop,
205 int error)
206 {
207 struct xfs_defer_pending *dfp;
208
209 trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
210
211 /* Abort intent items that don't have a done item. */
212 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
213 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
214 if (dfp->dfp_intent && !dfp->dfp_done) {
215 dfp->dfp_type->abort_intent(dfp->dfp_intent);
216 dfp->dfp_intent = NULL;
217 }
218 }
219
220 /* Shut down FS. */
221 xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
222 SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
223 }
224
225 /* Roll a transaction so we can do some deferred op processing. */
226 STATIC int
227 xfs_defer_trans_roll(
228 struct xfs_trans **tp)
229 {
230 struct xfs_defer_ops *dop = (*tp)->t_dfops;
231 int i;
232 int error;
233
234 /* Log all the joined inodes. */
235 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
236 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
237
238 /* Hold the (previously bjoin'd) buffer locked across the roll. */
239 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
240 xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
241
242 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
243
244 /* Roll the transaction. */
245 error = xfs_trans_roll(tp);
246 dop = (*tp)->t_dfops;
247 if (error) {
248 trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
249 xfs_defer_trans_abort(*tp, dop, error);
250 return error;
251 }
252
253 /* Rejoin the joined inodes. */
254 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
255 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
256
257 /* Rejoin the buffers and dirty them so the log moves forward. */
258 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
259 xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
260 xfs_trans_bhold(*tp, dop->dop_bufs[i]);
261 }
262
263 return error;
264 }
265
266 /* Do we have any work items to finish? */
267 bool
268 xfs_defer_has_unfinished_work(
269 struct xfs_defer_ops *dop)
270 {
271 return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
272 }
273
274 /*
275 * Add this inode to the deferred op. Each joined inode is relogged
276 * each time we roll the transaction.
277 */
278 int
279 xfs_defer_ijoin(
280 struct xfs_defer_ops *dop,
281 struct xfs_inode *ip)
282 {
283 int i;
284
285 for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
286 if (dop->dop_inodes[i] == ip)
287 return 0;
288 else if (dop->dop_inodes[i] == NULL) {
289 dop->dop_inodes[i] = ip;
290 return 0;
291 }
292 }
293
294 ASSERT(0);
295 return -EFSCORRUPTED;
296 }
297
298 /*
299 * Add this buffer to the deferred op. Each joined buffer is relogged
300 * each time we roll the transaction.
301 */
302 int
303 xfs_defer_bjoin(
304 struct xfs_defer_ops *dop,
305 struct xfs_buf *bp)
306 {
307 int i;
308
309 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
310 if (dop->dop_bufs[i] == bp)
311 return 0;
312 else if (dop->dop_bufs[i] == NULL) {
313 dop->dop_bufs[i] = bp;
314 return 0;
315 }
316 }
317
318 ASSERT(0);
319 return -EFSCORRUPTED;
320 }
321
322 /*
323 * Reset an already used dfops after finish.
324 */
325 static void
326 xfs_defer_reset(
327 struct xfs_defer_ops *dop)
328 {
329 ASSERT(!xfs_defer_has_unfinished_work(dop));
330 dop->dop_low = false;
331 memset(dop->dop_inodes, 0, sizeof(dop->dop_inodes));
332 memset(dop->dop_bufs, 0, sizeof(dop->dop_bufs));
333 }
334
335 /*
336 * Finish all the pending work. This involves logging intent items for
337 * any work items that wandered in since the last transaction roll (if
338 * one has even happened), rolling the transaction, and finishing the
339 * work items in the first item on the logged-and-pending list.
340 *
341 * If an inode is provided, relog it to the new transaction.
342 */
343 int
344 xfs_defer_finish(
345 struct xfs_trans **tp,
346 struct xfs_defer_ops *dop)
347 {
348 struct xfs_defer_pending *dfp;
349 struct list_head *li;
350 struct list_head *n;
351 void *state;
352 int error = 0;
353 void (*cleanup_fn)(struct xfs_trans *, void *, int);
354
355 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
356 ASSERT((*tp)->t_dfops == dop);
357
358 trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_);
359
360 /* Until we run out of pending work to finish... */
361 while (xfs_defer_has_unfinished_work(dop)) {
362 /* Log intents for work items sitting in the intake. */
363 xfs_defer_intake_work(*tp, dop);
364
365 /*
366 * Roll the transaction and update dop in case dfops was
367 * embedded in the transaction.
368 */
369 error = xfs_defer_trans_roll(tp);
370 if (error)
371 goto out;
372 dop = (*tp)->t_dfops;
373
374 /* Log an intent-done item for the first pending item. */
375 dfp = list_first_entry(&dop->dop_pending,
376 struct xfs_defer_pending, dfp_list);
377 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
378 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
379 dfp->dfp_count);
380 cleanup_fn = dfp->dfp_type->finish_cleanup;
381
382 /* Finish the work items. */
383 state = NULL;
384 list_for_each_safe(li, n, &dfp->dfp_work) {
385 list_del(li);
386 dfp->dfp_count--;
387 error = dfp->dfp_type->finish_item(*tp, dop, li,
388 dfp->dfp_done, &state);
389 if (error == -EAGAIN) {
390 /*
391 * Caller wants a fresh transaction;
392 * put the work item back on the list
393 * and jump out.
394 */
395 list_add(li, &dfp->dfp_work);
396 dfp->dfp_count++;
397 break;
398 } else if (error) {
399 /*
400 * Clean up after ourselves and jump out.
401 * xfs_defer_cancel will take care of freeing
402 * all these lists and stuff.
403 */
404 if (cleanup_fn)
405 cleanup_fn(*tp, state, error);
406 xfs_defer_trans_abort(*tp, dop, error);
407 goto out;
408 }
409 }
410 if (error == -EAGAIN) {
411 /*
412 * Caller wants a fresh transaction, so log a
413 * new log intent item to replace the old one
414 * and roll the transaction. See "Requesting
415 * a Fresh Transaction while Finishing
416 * Deferred Work" above.
417 */
418 dfp->dfp_intent = dfp->dfp_type->create_intent(*tp,
419 dfp->dfp_count);
420 dfp->dfp_done = NULL;
421 list_for_each(li, &dfp->dfp_work)
422 dfp->dfp_type->log_item(*tp, dfp->dfp_intent,
423 li);
424 } else {
425 /* Done with the dfp, free it. */
426 list_del(&dfp->dfp_list);
427 kmem_free(dfp);
428 }
429
430 if (cleanup_fn)
431 cleanup_fn(*tp, state, error);
432 }
433
434 /*
435 * Roll the transaction once more to avoid returning to the caller
436 * with a dirty transaction.
437 */
438 if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
439 error = xfs_defer_trans_roll(tp);
440 dop = (*tp)->t_dfops;
441 }
442 out:
443 if (error) {
444 trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
445 } else {
446 trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_);
447 xfs_defer_reset(dop);
448 }
449
450 return error;
451 }
452
453 /*
454 * Free up any items left in the list.
455 */
456 void
457 xfs_defer_cancel(
458 struct xfs_defer_ops *dop)
459 {
460 struct xfs_defer_pending *dfp;
461 struct xfs_defer_pending *pli;
462 struct list_head *pwi;
463 struct list_head *n;
464
465 trace_xfs_defer_cancel(NULL, dop, _RET_IP_);
466
467 /*
468 * Free the pending items. Caller should already have arranged
469 * for the intent items to be released.
470 */
471 list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
472 trace_xfs_defer_intake_cancel(NULL, dfp);
473 list_del(&dfp->dfp_list);
474 list_for_each_safe(pwi, n, &dfp->dfp_work) {
475 list_del(pwi);
476 dfp->dfp_count--;
477 dfp->dfp_type->cancel_item(pwi);
478 }
479 ASSERT(dfp->dfp_count == 0);
480 kmem_free(dfp);
481 }
482 list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
483 trace_xfs_defer_pending_cancel(NULL, dfp);
484 list_del(&dfp->dfp_list);
485 list_for_each_safe(pwi, n, &dfp->dfp_work) {
486 list_del(pwi);
487 dfp->dfp_count--;
488 dfp->dfp_type->cancel_item(pwi);
489 }
490 ASSERT(dfp->dfp_count == 0);
491 kmem_free(dfp);
492 }
493 }
494
495 /* Add an item for later deferred processing. */
496 void
497 xfs_defer_add(
498 struct xfs_defer_ops *dop,
499 enum xfs_defer_ops_type type,
500 struct list_head *li)
501 {
502 struct xfs_defer_pending *dfp = NULL;
503
504 /*
505 * Add the item to a pending item at the end of the intake list.
506 * If the last pending item has the same type, reuse it. Else,
507 * create a new pending item at the end of the intake list.
508 */
509 if (!list_empty(&dop->dop_intake)) {
510 dfp = list_last_entry(&dop->dop_intake,
511 struct xfs_defer_pending, dfp_list);
512 if (dfp->dfp_type->type != type ||
513 (dfp->dfp_type->max_items &&
514 dfp->dfp_count >= dfp->dfp_type->max_items))
515 dfp = NULL;
516 }
517 if (!dfp) {
518 dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
519 KM_SLEEP | KM_NOFS);
520 dfp->dfp_type = defer_op_types[type];
521 dfp->dfp_intent = NULL;
522 dfp->dfp_done = NULL;
523 dfp->dfp_count = 0;
524 INIT_LIST_HEAD(&dfp->dfp_work);
525 list_add_tail(&dfp->dfp_list, &dop->dop_intake);
526 }
527
528 list_add_tail(li, &dfp->dfp_work);
529 dfp->dfp_count++;
530 }
531
532 /* Initialize a deferred operation list. */
533 void
534 xfs_defer_init_op_type(
535 const struct xfs_defer_op_type *type)
536 {
537 defer_op_types[type->type] = type;
538 }
539
540 /* Initialize a deferred operation. */
541 void
542 xfs_defer_init(
543 struct xfs_trans *tp,
544 struct xfs_defer_ops *dop)
545 {
546 struct xfs_mount *mp = NULL;
547
548 memset(dop, 0, sizeof(struct xfs_defer_ops));
549 INIT_LIST_HEAD(&dop->dop_intake);
550 INIT_LIST_HEAD(&dop->dop_pending);
551 if (tp) {
552 ASSERT(tp->t_firstblock == NULLFSBLOCK);
553 tp->t_dfops = dop;
554 mp = tp->t_mountp;
555 }
556 trace_xfs_defer_init(mp, dop, _RET_IP_);
557 }