]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/defer_item.c
xfsprogs: Release v6.7.0
[thirdparty/xfsprogs-dev.git] / libxfs / defer_item.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "libxfs_priv.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_trans.h"
17 #include "xfs_bmap.h"
18 #include "xfs_alloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_refcount.h"
21 #include "xfs_bmap.h"
22 #include "xfs_inode.h"
23
24 /* Dummy defer item ops, since we don't do logging. */
25
26 /* Extent Freeing */
27
28 /* Sort bmap items by AG. */
29 static int
30 xfs_extent_free_diff_items(
31 void *priv,
32 struct list_head *a,
33 struct list_head *b)
34 {
35 struct xfs_mount *mp = priv;
36 struct xfs_extent_free_item *ra;
37 struct xfs_extent_free_item *rb;
38
39 ra = container_of(a, struct xfs_extent_free_item, xefi_list);
40 rb = container_of(b, struct xfs_extent_free_item, xefi_list);
41 return XFS_FSB_TO_AGNO(mp, ra->xefi_startblock) -
42 XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
43 }
44
45 /* Get an EFI. */
46 STATIC void *
47 xfs_extent_free_create_intent(
48 struct xfs_trans *tp,
49 unsigned int count)
50 {
51 return NULL;
52 }
53
54 /* Log a free extent to the intent item. */
55 STATIC void
56 xfs_extent_free_log_item(
57 struct xfs_trans *tp,
58 void *intent,
59 struct list_head *item)
60 {
61 }
62
63 /* Get an EFD so we can process all the free extents. */
64 STATIC void *
65 xfs_extent_free_create_done(
66 struct xfs_trans *tp,
67 void *intent,
68 unsigned int count)
69 {
70 return NULL;
71 }
72
73 /* Process a free extent. */
74 STATIC int
75 xfs_extent_free_finish_item(
76 struct xfs_trans *tp,
77 struct list_head *item,
78 void *done_item,
79 void **state)
80 {
81 struct xfs_extent_free_item *free;
82 int error;
83
84 free = container_of(item, struct xfs_extent_free_item, xefi_list);
85 error = xfs_free_extent(tp, free->xefi_startblock,
86 free->xefi_blockcount, &free->xefi_oinfo,
87 XFS_AG_RESV_NONE);
88 kmem_free(free);
89 return error;
90 }
91
92 /* Abort all pending EFIs. */
93 STATIC void
94 xfs_extent_free_abort_intent(
95 void *intent)
96 {
97 }
98
99 /* Cancel a free extent. */
100 STATIC void
101 xfs_extent_free_cancel_item(
102 struct list_head *item)
103 {
104 struct xfs_extent_free_item *free;
105
106 free = container_of(item, struct xfs_extent_free_item, xefi_list);
107 kmem_free(free);
108 }
109
110 const struct xfs_defer_op_type xfs_extent_free_defer_type = {
111 .diff_items = xfs_extent_free_diff_items,
112 .create_intent = xfs_extent_free_create_intent,
113 .abort_intent = xfs_extent_free_abort_intent,
114 .log_item = xfs_extent_free_log_item,
115 .create_done = xfs_extent_free_create_done,
116 .finish_item = xfs_extent_free_finish_item,
117 .cancel_item = xfs_extent_free_cancel_item,
118 };
119
120 /*
121 * AGFL blocks are accounted differently in the reserve pools and are not
122 * inserted into the busy extent list.
123 */
124 STATIC int
125 xfs_agfl_free_finish_item(
126 struct xfs_trans *tp,
127 struct list_head *item,
128 void *done_item,
129 void **state)
130 {
131 struct xfs_mount *mp = tp->t_mountp;
132 struct xfs_extent_free_item *free;
133 struct xfs_buf *agbp;
134 int error;
135 xfs_agnumber_t agno;
136 xfs_agblock_t agbno;
137
138 free = container_of(item, struct xfs_extent_free_item, xefi_list);
139 ASSERT(free->xefi_blockcount == 1);
140 agno = XFS_FSB_TO_AGNO(mp, free->xefi_startblock);
141 agbno = XFS_FSB_TO_AGBNO(mp, free->xefi_startblock);
142
143 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
144 if (!error)
145 error = xfs_free_agfl_block(tp, agno, agbno, agbp,
146 &free->xefi_oinfo);
147 kmem_free(free);
148 return error;
149 }
150
151 /* sub-type with special handling for AGFL deferred frees */
152 const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
153 .diff_items = xfs_extent_free_diff_items,
154 .create_intent = xfs_extent_free_create_intent,
155 .abort_intent = xfs_extent_free_abort_intent,
156 .log_item = xfs_extent_free_log_item,
157 .create_done = xfs_extent_free_create_done,
158 .finish_item = xfs_agfl_free_finish_item,
159 .cancel_item = xfs_extent_free_cancel_item,
160 };
161
162 /* Reverse Mapping */
163
164 /* Sort rmap intents by AG. */
165 static int
166 xfs_rmap_update_diff_items(
167 void *priv,
168 struct list_head *a,
169 struct list_head *b)
170 {
171 struct xfs_mount *mp = priv;
172 struct xfs_rmap_intent *ra;
173 struct xfs_rmap_intent *rb;
174
175 ra = container_of(a, struct xfs_rmap_intent, ri_list);
176 rb = container_of(b, struct xfs_rmap_intent, ri_list);
177 return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
178 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
179 }
180
181 /* Get an RUI. */
182 STATIC void *
183 xfs_rmap_update_create_intent(
184 struct xfs_trans *tp,
185 unsigned int count)
186 {
187 return NULL;
188 }
189
190 /* Log rmap updates in the intent item. */
191 STATIC void
192 xfs_rmap_update_log_item(
193 struct xfs_trans *tp,
194 void *intent,
195 struct list_head *item)
196 {
197 }
198
199 /* Get an RUD so we can process all the deferred rmap updates. */
200 STATIC void *
201 xfs_rmap_update_create_done(
202 struct xfs_trans *tp,
203 void *intent,
204 unsigned int count)
205 {
206 return NULL;
207 }
208
209 /* Process a deferred rmap update. */
210 STATIC int
211 xfs_rmap_update_finish_item(
212 struct xfs_trans *tp,
213 struct list_head *item,
214 void *done_item,
215 void **state)
216 {
217 struct xfs_rmap_intent *rmap;
218 int error;
219
220 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
221 error = xfs_rmap_finish_one(tp,
222 rmap->ri_type,
223 rmap->ri_owner, rmap->ri_whichfork,
224 rmap->ri_bmap.br_startoff,
225 rmap->ri_bmap.br_startblock,
226 rmap->ri_bmap.br_blockcount,
227 rmap->ri_bmap.br_state,
228 (struct xfs_btree_cur **)state);
229 kmem_free(rmap);
230 return error;
231 }
232
233 /* Clean up after processing deferred rmaps. */
234 STATIC void
235 xfs_rmap_update_finish_cleanup(
236 struct xfs_trans *tp,
237 void *state,
238 int error)
239 {
240 struct xfs_btree_cur *rcur = state;
241
242 xfs_rmap_finish_one_cleanup(tp, rcur, error);
243 }
244
245 /* Abort all pending RUIs. */
246 STATIC void
247 xfs_rmap_update_abort_intent(
248 void *intent)
249 {
250 }
251
252 /* Cancel a deferred rmap update. */
253 STATIC void
254 xfs_rmap_update_cancel_item(
255 struct list_head *item)
256 {
257 struct xfs_rmap_intent *rmap;
258
259 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
260 kmem_free(rmap);
261 }
262
263 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
264 .diff_items = xfs_rmap_update_diff_items,
265 .create_intent = xfs_rmap_update_create_intent,
266 .abort_intent = xfs_rmap_update_abort_intent,
267 .log_item = xfs_rmap_update_log_item,
268 .create_done = xfs_rmap_update_create_done,
269 .finish_item = xfs_rmap_update_finish_item,
270 .finish_cleanup = xfs_rmap_update_finish_cleanup,
271 .cancel_item = xfs_rmap_update_cancel_item,
272 };
273
274 /* Reference Counting */
275
276 /* Sort refcount intents by AG. */
277 static int
278 xfs_refcount_update_diff_items(
279 void *priv,
280 struct list_head *a,
281 struct list_head *b)
282 {
283 struct xfs_mount *mp = priv;
284 struct xfs_refcount_intent *ra;
285 struct xfs_refcount_intent *rb;
286
287 ra = container_of(a, struct xfs_refcount_intent, ri_list);
288 rb = container_of(b, struct xfs_refcount_intent, ri_list);
289 return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
290 XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
291 }
292
293 /* Get an CUI. */
294 STATIC void *
295 xfs_refcount_update_create_intent(
296 struct xfs_trans *tp,
297 unsigned int count)
298 {
299 return NULL;
300 }
301
302 /* Log refcount updates in the intent item. */
303 STATIC void
304 xfs_refcount_update_log_item(
305 struct xfs_trans *tp,
306 void *intent,
307 struct list_head *item)
308 {
309 }
310
311 /* Get an CUD so we can process all the deferred refcount updates. */
312 STATIC void *
313 xfs_refcount_update_create_done(
314 struct xfs_trans *tp,
315 void *intent,
316 unsigned int count)
317 {
318 return NULL;
319 }
320
321 /* Process a deferred refcount update. */
322 STATIC int
323 xfs_refcount_update_finish_item(
324 struct xfs_trans *tp,
325 struct list_head *item,
326 void *done_item,
327 void **state)
328 {
329 struct xfs_refcount_intent *refc;
330 xfs_fsblock_t new_fsb;
331 xfs_extlen_t new_aglen;
332 int error;
333
334 refc = container_of(item, struct xfs_refcount_intent, ri_list);
335 error = xfs_refcount_finish_one(tp,
336 refc->ri_type,
337 refc->ri_startblock,
338 refc->ri_blockcount,
339 &new_fsb, &new_aglen,
340 (struct xfs_btree_cur **)state);
341 /* Did we run out of reservation? Requeue what we didn't finish. */
342 if (!error && new_aglen > 0) {
343 ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
344 refc->ri_type == XFS_REFCOUNT_DECREASE);
345 refc->ri_startblock = new_fsb;
346 refc->ri_blockcount = new_aglen;
347 return -EAGAIN;
348 }
349 kmem_free(refc);
350 return error;
351 }
352
353 /* Clean up after processing deferred refcounts. */
354 STATIC void
355 xfs_refcount_update_finish_cleanup(
356 struct xfs_trans *tp,
357 void *state,
358 int error)
359 {
360 struct xfs_btree_cur *rcur = state;
361
362 xfs_refcount_finish_one_cleanup(tp, rcur, error);
363 }
364
365 /* Abort all pending CUIs. */
366 STATIC void
367 xfs_refcount_update_abort_intent(
368 void *intent)
369 {
370 }
371
372 /* Cancel a deferred refcount update. */
373 STATIC void
374 xfs_refcount_update_cancel_item(
375 struct list_head *item)
376 {
377 struct xfs_refcount_intent *refc;
378
379 refc = container_of(item, struct xfs_refcount_intent, ri_list);
380 kmem_free(refc);
381 }
382
383 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
384 .diff_items = xfs_refcount_update_diff_items,
385 .create_intent = xfs_refcount_update_create_intent,
386 .abort_intent = xfs_refcount_update_abort_intent,
387 .log_item = xfs_refcount_update_log_item,
388 .create_done = xfs_refcount_update_create_done,
389 .finish_item = xfs_refcount_update_finish_item,
390 .finish_cleanup = xfs_refcount_update_finish_cleanup,
391 .cancel_item = xfs_refcount_update_cancel_item,
392 };
393
394 /* Inode Block Mapping */
395
396 /* Sort bmap intents by inode. */
397 static int
398 xfs_bmap_update_diff_items(
399 void *priv,
400 struct list_head *a,
401 struct list_head *b)
402 {
403 struct xfs_bmap_intent *ba;
404 struct xfs_bmap_intent *bb;
405
406 ba = container_of(a, struct xfs_bmap_intent, bi_list);
407 bb = container_of(b, struct xfs_bmap_intent, bi_list);
408 return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
409 }
410
411 /* Get an BUI. */
412 STATIC void *
413 xfs_bmap_update_create_intent(
414 struct xfs_trans *tp,
415 unsigned int count)
416 {
417 return NULL;
418 }
419
420 /* Log bmap updates in the intent item. */
421 STATIC void
422 xfs_bmap_update_log_item(
423 struct xfs_trans *tp,
424 void *intent,
425 struct list_head *item)
426 {
427 }
428
429 /* Get an BUD so we can process all the deferred rmap updates. */
430 STATIC void *
431 xfs_bmap_update_create_done(
432 struct xfs_trans *tp,
433 void *intent,
434 unsigned int count)
435 {
436 return NULL;
437 }
438
439 /* Process a deferred rmap update. */
440 STATIC int
441 xfs_bmap_update_finish_item(
442 struct xfs_trans *tp,
443 struct list_head *item,
444 void *done_item,
445 void **state)
446 {
447 struct xfs_bmap_intent *bmap;
448 xfs_filblks_t count;
449 int error;
450
451 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
452 count = bmap->bi_bmap.br_blockcount;
453 error = xfs_bmap_finish_one(tp,
454 bmap->bi_owner,
455 bmap->bi_type, bmap->bi_whichfork,
456 bmap->bi_bmap.br_startoff,
457 bmap->bi_bmap.br_startblock,
458 &count,
459 bmap->bi_bmap.br_state);
460 if (!error && count > 0) {
461 ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
462 bmap->bi_bmap.br_blockcount = count;
463 return -EAGAIN;
464 }
465 kmem_free(bmap);
466 return error;
467 }
468
469 /* Abort all pending BUIs. */
470 STATIC void
471 xfs_bmap_update_abort_intent(
472 void *intent)
473 {
474 }
475
476 /* Cancel a deferred rmap update. */
477 STATIC void
478 xfs_bmap_update_cancel_item(
479 struct list_head *item)
480 {
481 struct xfs_bmap_intent *bmap;
482
483 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
484 kmem_free(bmap);
485 }
486
487 const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
488 .diff_items = xfs_bmap_update_diff_items,
489 .create_intent = xfs_bmap_update_create_intent,
490 .abort_intent = xfs_bmap_update_abort_intent,
491 .log_item = xfs_bmap_update_log_item,
492 .create_done = xfs_bmap_update_create_done,
493 .finish_item = xfs_bmap_update_finish_item,
494 .cancel_item = xfs_bmap_update_cancel_item,
495 };