]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/xfs_defer.c
xfs: fix transaction leak on remote attr set/remove failure
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_defer.c
CommitLineData
37b3b4d6 1// SPDX-License-Identifier: GPL-2.0+
a18e1f79
DW
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
a18e1f79 4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
a18e1f79
DW
5 */
6#include "libxfs_priv.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_trans.h"
17#include "xfs_trace.h"
18
19/*
20 * Deferred Operations in XFS
21 *
22 * Due to the way locking rules work in XFS, certain transactions (block
23 * mapping and unmapping, typically) have permanent reservations so that
24 * we can roll the transaction to adhere to AG locking order rules and
25 * to unlock buffers between metadata updates. Prior to rmap/reflink,
26 * the mapping code had a mechanism to perform these deferrals for
27 * extents that were going to be freed; this code makes that facility
28 * more generic.
29 *
30 * When adding the reverse mapping and reflink features, it became
31 * necessary to perform complex remapping multi-transactions to comply
32 * with AG locking order rules, and to be able to spread a single
33 * refcount update operation (an operation on an n-block extent can
34 * update as many as n records!) among multiple transactions. XFS can
35 * roll a transaction to facilitate this, but using this facility
36 * requires us to log "intent" items in case log recovery needs to
37 * redo the operation, and to log "done" items to indicate that redo
38 * is not necessary.
39 *
40 * Deferred work is tracked in xfs_defer_pending items. Each pending
41 * item tracks one type of deferred work. Incoming work items (which
42 * have not yet had an intent logged) are attached to a pending item
43 * on the dop_intake list, where they wait for the caller to finish
44 * the deferred operations.
45 *
46 * Finishing a set of deferred operations is an involved process. To
47 * start, we define "rolling a deferred-op transaction" as follows:
48 *
49 * > For each xfs_defer_pending item on the dop_intake list,
50 * - Sort the work items in AG order. XFS locking
51 * order rules require us to lock buffers in AG order.
52 * - Create a log intent item for that type.
53 * - Attach it to the pending item.
54 * - Move the pending item from the dop_intake list to the
55 * dop_pending list.
56 * > Roll the transaction.
57 *
58 * NOTE: To avoid exceeding the transaction reservation, we limit the
59 * number of items that we attach to a given xfs_defer_pending.
60 *
61 * The actual finishing process looks like this:
62 *
63 * > For each xfs_defer_pending in the dop_pending list,
64 * - Roll the deferred-op transaction as above.
65 * - Create a log done item for that type, and attach it to the
66 * log intent item.
67 * - For each work item attached to the log intent item,
68 * * Perform the described action.
69 * * Attach the work item to the log done item.
0590692e
DW
70 * * If the result of doing the work was -EAGAIN, ->finish work
71 * wants a new transaction. See the "Requesting a Fresh
72 * Transaction while Finishing Deferred Work" section below for
73 * details.
a18e1f79
DW
74 *
75 * The key here is that we must log an intent item for all pending
76 * work items every time we roll the transaction, and that we must log
77 * a done item as soon as the work is completed. With this mechanism
78 * we can perform complex remapping operations, chaining intent items
79 * as needed.
80 *
0590692e
DW
81 * Requesting a Fresh Transaction while Finishing Deferred Work
82 *
83 * If ->finish_item decides that it needs a fresh transaction to
84 * finish the work, it must ask its caller (xfs_defer_finish) for a
85 * continuation. The most likely cause of this circumstance are the
86 * refcount adjust functions deciding that they've logged enough items
87 * to be at risk of exceeding the transaction reservation.
88 *
89 * To get a fresh transaction, we want to log the existing log done
90 * item to prevent the log intent item from replaying, immediately log
91 * a new log intent item with the unfinished work items, roll the
92 * transaction, and re-call ->finish_item wherever it left off. The
93 * log done item and the new log intent item must be in the same
94 * transaction or atomicity cannot be guaranteed; defer_finish ensures
95 * that this happens.
96 *
97 * This requires some coordination between ->finish_item and
98 * defer_finish. Upon deciding to request a new transaction,
99 * ->finish_item should update the current work item to reflect the
100 * unfinished work. Next, it should reset the log done item's list
101 * count to the number of items finished, and return -EAGAIN.
102 * defer_finish sees the -EAGAIN, logs the new log intent item
103 * with the remaining work items, and leaves the xfs_defer_pending
104 * item at the head of the dop_work queue. Then it rolls the
105 * transaction and picks up processing where it left off. It is
106 * required that ->finish_item must be careful to leave enough
107 * transaction reservation to fit the new log intent item.
108 *
a18e1f79
DW
109 * This is an example of remapping the extent (E, E+B) into file X at
110 * offset A and dealing with the extent (C, C+B) already being mapped
111 * there:
112 * +-------------------------------------------------+
113 * | Unmap file X startblock C offset A length B | t0
114 * | Intent to reduce refcount for extent (C, B) |
115 * | Intent to remove rmap (X, C, A, B) |
116 * | Intent to free extent (D, 1) (bmbt block) |
117 * | Intent to map (X, A, B) at startblock E |
118 * +-------------------------------------------------+
119 * | Map file X startblock E offset A length B | t1
120 * | Done mapping (X, E, A, B) |
121 * | Intent to increase refcount for extent (E, B) |
122 * | Intent to add rmap (X, E, A, B) |
123 * +-------------------------------------------------+
124 * | Reduce refcount for extent (C, B) | t2
0590692e
DW
125 * | Done reducing refcount for extent (C, 9) |
126 * | Intent to reduce refcount for extent (C+9, B-9) |
127 * | (ran out of space after 9 refcount updates) |
128 * +-------------------------------------------------+
129 * | Reduce refcount for extent (C+9, B+9) | t3
130 * | Done reducing refcount for extent (C+9, B-9) |
a18e1f79
DW
131 * | Increase refcount for extent (E, B) |
132 * | Done increasing refcount for extent (E, B) |
133 * | Intent to free extent (C, B) |
134 * | Intent to free extent (F, 1) (refcountbt block) |
135 * | Intent to remove rmap (F, 1, REFC) |
136 * +-------------------------------------------------+
0590692e 137 * | Remove rmap (X, C, A, B) | t4
a18e1f79
DW
138 * | Done removing rmap (X, C, A, B) |
139 * | Add rmap (X, E, A, B) |
140 * | Done adding rmap (X, E, A, B) |
141 * | Remove rmap (F, 1, REFC) |
142 * | Done removing rmap (F, 1, REFC) |
143 * +-------------------------------------------------+
0590692e 144 * | Free extent (C, B) | t5
a18e1f79
DW
145 * | Done freeing extent (C, B) |
146 * | Free extent (D, 1) |
147 * | Done freeing extent (D, 1) |
148 * | Free extent (F, 1) |
149 * | Done freeing extent (F, 1) |
150 * +-------------------------------------------------+
151 *
152 * If we should crash before t2 commits, log recovery replays
153 * the following intent items:
154 *
155 * - Intent to reduce refcount for extent (C, B)
156 * - Intent to remove rmap (X, C, A, B)
157 * - Intent to free extent (D, 1) (bmbt block)
158 * - Intent to increase refcount for extent (E, B)
159 * - Intent to add rmap (X, E, A, B)
160 *
161 * In the process of recovering, it should also generate and take care
162 * of these intent items:
163 *
164 * - Intent to free extent (C, B)
165 * - Intent to free extent (F, 1) (refcountbt block)
166 * - Intent to remove rmap (F, 1, REFC)
0590692e
DW
167 *
168 * Note that the continuation requested between t2 and t3 is likely to
169 * reoccur.
a18e1f79
DW
170 */
171
172static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
173
174/*
175 * For each pending item in the intake list, log its intent item and the
176 * associated extents, then add the entire intake list to the end of
177 * the pending list.
178 */
179STATIC void
180xfs_defer_intake_work(
181 struct xfs_trans *tp,
182 struct xfs_defer_ops *dop)
183{
184 struct list_head *li;
185 struct xfs_defer_pending *dfp;
186
187 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
188 dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
189 dfp->dfp_count);
077dd509 190 trace_xfs_defer_intake_work(tp->t_mountp, dfp);
a18e1f79
DW
191 list_sort(tp->t_mountp, &dfp->dfp_work,
192 dfp->dfp_type->diff_items);
193 list_for_each(li, &dfp->dfp_work)
194 dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
195 }
196
197 list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
198}
199
200/* Abort all the intents that were committed. */
201STATIC void
202xfs_defer_trans_abort(
203 struct xfs_trans *tp,
204 struct xfs_defer_ops *dop,
205 int error)
206{
207 struct xfs_defer_pending *dfp;
208
f11d5edd 209 trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
a18e1f79 210
077dd509 211 /* Abort intent items that don't have a done item. */
a18e1f79 212 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
30ad7d6e 213 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
077dd509 214 if (dfp->dfp_intent && !dfp->dfp_done) {
a18e1f79 215 dfp->dfp_type->abort_intent(dfp->dfp_intent);
077dd509
DW
216 dfp->dfp_intent = NULL;
217 }
a18e1f79
DW
218 }
219
220 /* Shut down FS. */
221 xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
222 SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
223}
224
225/* Roll a transaction so we can do some deferred op processing. */
226STATIC int
227xfs_defer_trans_roll(
228 struct xfs_trans **tp,
d67406c9 229 struct xfs_defer_ops *dop)
a18e1f79
DW
230{
231 int i;
232 int error;
233
d67406c9
CH
234 /* Log all the joined inodes. */
235 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
a18e1f79 236 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
a18e1f79 237
6f67c32d
DW
238 /* Hold the (previously bjoin'd) buffer locked across the roll. */
239 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
240 xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
241
f11d5edd 242 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
30ad7d6e 243
a18e1f79 244 /* Roll the transaction. */
d67406c9 245 error = xfs_trans_roll(tp);
a18e1f79 246 if (error) {
30ad7d6e 247 trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
a18e1f79
DW
248 xfs_defer_trans_abort(*tp, dop, error);
249 return error;
250 }
251 dop->dop_committed = true;
252
d67406c9
CH
253 /* Rejoin the joined inodes. */
254 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
a18e1f79 255 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
a18e1f79 256
6f67c32d
DW
257 /* Rejoin the buffers and dirty them so the log moves forward. */
258 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
259 xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
260 xfs_trans_bhold(*tp, dop->dop_bufs[i]);
261 }
262
a18e1f79
DW
263 return error;
264}
265
266/* Do we have any work items to finish? */
267bool
268xfs_defer_has_unfinished_work(
269 struct xfs_defer_ops *dop)
270{
271 return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
272}
273
274/*
275 * Add this inode to the deferred op. Each joined inode is relogged
5c33baee 276 * each time we roll the transaction.
a18e1f79
DW
277 */
278int
277d3c3a 279xfs_defer_ijoin(
a18e1f79
DW
280 struct xfs_defer_ops *dop,
281 struct xfs_inode *ip)
282{
283 int i;
284
285 for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
286 if (dop->dop_inodes[i] == ip)
287 return 0;
288 else if (dop->dop_inodes[i] == NULL) {
289 dop->dop_inodes[i] = ip;
290 return 0;
291 }
292 }
293
6f67c32d
DW
294 ASSERT(0);
295 return -EFSCORRUPTED;
296}
297
298/*
299 * Add this buffer to the deferred op. Each joined buffer is relogged
300 * each time we roll the transaction.
301 */
302int
303xfs_defer_bjoin(
304 struct xfs_defer_ops *dop,
305 struct xfs_buf *bp)
306{
307 int i;
308
309 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
310 if (dop->dop_bufs[i] == bp)
311 return 0;
312 else if (dop->dop_bufs[i] == NULL) {
313 dop->dop_bufs[i] = bp;
314 return 0;
315 }
316 }
317
318 ASSERT(0);
a18e1f79
DW
319 return -EFSCORRUPTED;
320}
321
322/*
323 * Finish all the pending work. This involves logging intent items for
324 * any work items that wandered in since the last transaction roll (if
325 * one has even happened), rolling the transaction, and finishing the
326 * work items in the first item on the logged-and-pending list.
327 *
328 * If an inode is provided, relog it to the new transaction.
329 */
330int
331xfs_defer_finish(
332 struct xfs_trans **tp,
5c33baee 333 struct xfs_defer_ops *dop)
a18e1f79
DW
334{
335 struct xfs_defer_pending *dfp;
336 struct list_head *li;
337 struct list_head *n;
a18e1f79
DW
338 void *state;
339 int error = 0;
340 void (*cleanup_fn)(struct xfs_trans *, void *, int);
eb94763b 341 struct xfs_defer_ops *orig_dop;
a18e1f79
DW
342
343 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
344
f11d5edd 345 trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_);
30ad7d6e 346
eb94763b
BF
347 /*
348 * Attach dfops to the transaction during deferred ops processing. This
349 * explicitly causes calls into the allocator to defer AGFL block frees.
350 * Note that this code can go away once all dfops users attach to the
351 * associated tp.
352 */
55fb19bb
BF
353 ASSERT(!(*tp)->t_dfops || ((*tp)->t_dfops == dop));
354 orig_dop = (*tp)->t_dfops;
355 (*tp)->t_dfops = dop;
eb94763b 356
a18e1f79
DW
357 /* Until we run out of pending work to finish... */
358 while (xfs_defer_has_unfinished_work(dop)) {
359 /* Log intents for work items sitting in the intake. */
360 xfs_defer_intake_work(*tp, dop);
361
362 /* Roll the transaction. */
d67406c9 363 error = xfs_defer_trans_roll(tp, dop);
a18e1f79
DW
364 if (error)
365 goto out;
366
a18e1f79
DW
367 /* Log an intent-done item for the first pending item. */
368 dfp = list_first_entry(&dop->dop_pending,
369 struct xfs_defer_pending, dfp_list);
30ad7d6e 370 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
11d87237 371 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
a18e1f79
DW
372 dfp->dfp_count);
373 cleanup_fn = dfp->dfp_type->finish_cleanup;
374
375 /* Finish the work items. */
376 state = NULL;
377 list_for_each_safe(li, n, &dfp->dfp_work) {
378 list_del(li);
379 dfp->dfp_count--;
380 error = dfp->dfp_type->finish_item(*tp, dop, li,
11d87237 381 dfp->dfp_done, &state);
0590692e
DW
382 if (error == -EAGAIN) {
383 /*
384 * Caller wants a fresh transaction;
385 * put the work item back on the list
386 * and jump out.
387 */
388 list_add(li, &dfp->dfp_work);
389 dfp->dfp_count++;
390 break;
391 } else if (error) {
a18e1f79
DW
392 /*
393 * Clean up after ourselves and jump out.
394 * xfs_defer_cancel will take care of freeing
395 * all these lists and stuff.
396 */
397 if (cleanup_fn)
398 cleanup_fn(*tp, state, error);
399 xfs_defer_trans_abort(*tp, dop, error);
400 goto out;
401 }
402 }
0590692e
DW
403 if (error == -EAGAIN) {
404 /*
405 * Caller wants a fresh transaction, so log a
406 * new log intent item to replace the old one
407 * and roll the transaction. See "Requesting
408 * a Fresh Transaction while Finishing
409 * Deferred Work" above.
410 */
411 dfp->dfp_intent = dfp->dfp_type->create_intent(*tp,
412 dfp->dfp_count);
413 dfp->dfp_done = NULL;
414 list_for_each(li, &dfp->dfp_work)
415 dfp->dfp_type->log_item(*tp, dfp->dfp_intent,
416 li);
417 } else {
418 /* Done with the dfp, free it. */
419 list_del(&dfp->dfp_list);
420 kmem_free(dfp);
421 }
a18e1f79
DW
422
423 if (cleanup_fn)
424 cleanup_fn(*tp, state, error);
425 }
426
04f6507c
DW
427 /*
428 * Roll the transaction once more to avoid returning to the caller
429 * with a dirty transaction.
430 */
431 if ((*tp)->t_flags & XFS_TRANS_DIRTY)
432 error = xfs_defer_trans_roll(tp, dop);
a18e1f79 433out:
55fb19bb 434 (*tp)->t_dfops = orig_dop;
30ad7d6e
DW
435 if (error)
436 trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
437 else
f11d5edd 438 trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_);
a18e1f79
DW
439 return error;
440}
441
442/*
443 * Free up any items left in the list.
444 */
445void
446xfs_defer_cancel(
447 struct xfs_defer_ops *dop)
448{
449 struct xfs_defer_pending *dfp;
450 struct xfs_defer_pending *pli;
451 struct list_head *pwi;
452 struct list_head *n;
453
f11d5edd 454 trace_xfs_defer_cancel(NULL, dop, _RET_IP_);
30ad7d6e 455
a18e1f79
DW
456 /*
457 * Free the pending items. Caller should already have arranged
458 * for the intent items to be released.
459 */
460 list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
30ad7d6e 461 trace_xfs_defer_intake_cancel(NULL, dfp);
a18e1f79
DW
462 list_del(&dfp->dfp_list);
463 list_for_each_safe(pwi, n, &dfp->dfp_work) {
464 list_del(pwi);
465 dfp->dfp_count--;
466 dfp->dfp_type->cancel_item(pwi);
467 }
468 ASSERT(dfp->dfp_count == 0);
469 kmem_free(dfp);
470 }
471 list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
30ad7d6e 472 trace_xfs_defer_pending_cancel(NULL, dfp);
a18e1f79
DW
473 list_del(&dfp->dfp_list);
474 list_for_each_safe(pwi, n, &dfp->dfp_work) {
475 list_del(pwi);
476 dfp->dfp_count--;
477 dfp->dfp_type->cancel_item(pwi);
478 }
479 ASSERT(dfp->dfp_count == 0);
480 kmem_free(dfp);
481 }
482}
483
484/* Add an item for later deferred processing. */
485void
486xfs_defer_add(
487 struct xfs_defer_ops *dop,
488 enum xfs_defer_ops_type type,
489 struct list_head *li)
490{
491 struct xfs_defer_pending *dfp = NULL;
492
493 /*
494 * Add the item to a pending item at the end of the intake list.
495 * If the last pending item has the same type, reuse it. Else,
496 * create a new pending item at the end of the intake list.
497 */
498 if (!list_empty(&dop->dop_intake)) {
499 dfp = list_last_entry(&dop->dop_intake,
500 struct xfs_defer_pending, dfp_list);
501 if (dfp->dfp_type->type != type ||
502 (dfp->dfp_type->max_items &&
503 dfp->dfp_count >= dfp->dfp_type->max_items))
504 dfp = NULL;
505 }
506 if (!dfp) {
507 dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
508 KM_SLEEP | KM_NOFS);
509 dfp->dfp_type = defer_op_types[type];
a18e1f79 510 dfp->dfp_intent = NULL;
11d87237 511 dfp->dfp_done = NULL;
a18e1f79
DW
512 dfp->dfp_count = 0;
513 INIT_LIST_HEAD(&dfp->dfp_work);
514 list_add_tail(&dfp->dfp_list, &dop->dop_intake);
515 }
516
517 list_add_tail(li, &dfp->dfp_work);
518 dfp->dfp_count++;
519}
520
521/* Initialize a deferred operation list. */
522void
523xfs_defer_init_op_type(
524 const struct xfs_defer_op_type *type)
525{
526 defer_op_types[type->type] = type;
527}
528
529/* Initialize a deferred operation. */
530void
531xfs_defer_init(
fe891182 532 struct xfs_trans *tp,
5cebfb23 533 struct xfs_defer_ops *dop)
a18e1f79 534{
fe891182
BF
535 struct xfs_mount *mp = NULL;
536
6f67c32d 537 memset(dop, 0, sizeof(struct xfs_defer_ops));
a18e1f79
DW
538 INIT_LIST_HEAD(&dop->dop_intake);
539 INIT_LIST_HEAD(&dop->dop_pending);
fe891182 540 if (tp) {
5cebfb23 541 ASSERT(tp->t_firstblock == NULLFSBLOCK);
fe891182
BF
542 tp->t_dfops = dop;
543 mp = tp->t_mountp;
544 }
545 trace_xfs_defer_init(mp, dop, _RET_IP_);
a18e1f79 546}