1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 #include "libxfs_priv.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_trans.h"
18 #include "xfs_alloc.h"
20 #include "xfs_refcount.h"
22 #include "xfs_inode.h"
24 /* Dummy defer item ops, since we don't do logging. */
28 /* Sort bmap items by AG. */
30 xfs_extent_free_diff_items(
35 struct xfs_mount
*mp
= priv
;
36 struct xfs_extent_free_item
*ra
;
37 struct xfs_extent_free_item
*rb
;
39 ra
= container_of(a
, struct xfs_extent_free_item
, xefi_list
);
40 rb
= container_of(b
, struct xfs_extent_free_item
, xefi_list
);
41 return XFS_FSB_TO_AGNO(mp
, ra
->xefi_startblock
) -
42 XFS_FSB_TO_AGNO(mp
, rb
->xefi_startblock
);
47 xfs_extent_free_create_intent(
54 /* Log a free extent to the intent item. */
56 xfs_extent_free_log_item(
59 struct list_head
*item
)
63 /* Get an EFD so we can process all the free extents. */
65 xfs_extent_free_create_done(
73 /* Process a free extent. */
75 xfs_extent_free_finish_item(
77 struct list_head
*item
,
81 struct xfs_extent_free_item
*free
;
84 free
= container_of(item
, struct xfs_extent_free_item
, xefi_list
);
85 error
= xfs_free_extent(tp
, free
->xefi_startblock
,
86 free
->xefi_blockcount
, &free
->xefi_oinfo
,
92 /* Abort all pending EFIs. */
94 xfs_extent_free_abort_intent(
99 /* Cancel a free extent. */
101 xfs_extent_free_cancel_item(
102 struct list_head
*item
)
104 struct xfs_extent_free_item
*free
;
106 free
= container_of(item
, struct xfs_extent_free_item
, xefi_list
);
110 const struct xfs_defer_op_type xfs_extent_free_defer_type
= {
111 .diff_items
= xfs_extent_free_diff_items
,
112 .create_intent
= xfs_extent_free_create_intent
,
113 .abort_intent
= xfs_extent_free_abort_intent
,
114 .log_item
= xfs_extent_free_log_item
,
115 .create_done
= xfs_extent_free_create_done
,
116 .finish_item
= xfs_extent_free_finish_item
,
117 .cancel_item
= xfs_extent_free_cancel_item
,
121 * AGFL blocks are accounted differently in the reserve pools and are not
122 * inserted into the busy extent list.
125 xfs_agfl_free_finish_item(
126 struct xfs_trans
*tp
,
127 struct list_head
*item
,
131 struct xfs_mount
*mp
= tp
->t_mountp
;
132 struct xfs_extent_free_item
*free
;
133 struct xfs_buf
*agbp
;
138 free
= container_of(item
, struct xfs_extent_free_item
, xefi_list
);
139 ASSERT(free
->xefi_blockcount
== 1);
140 agno
= XFS_FSB_TO_AGNO(mp
, free
->xefi_startblock
);
141 agbno
= XFS_FSB_TO_AGBNO(mp
, free
->xefi_startblock
);
143 error
= xfs_alloc_read_agf(mp
, tp
, agno
, 0, &agbp
);
145 error
= xfs_free_agfl_block(tp
, agno
, agbno
, agbp
,
151 /* sub-type with special handling for AGFL deferred frees */
152 const struct xfs_defer_op_type xfs_agfl_free_defer_type
= {
153 .diff_items
= xfs_extent_free_diff_items
,
154 .create_intent
= xfs_extent_free_create_intent
,
155 .abort_intent
= xfs_extent_free_abort_intent
,
156 .log_item
= xfs_extent_free_log_item
,
157 .create_done
= xfs_extent_free_create_done
,
158 .finish_item
= xfs_agfl_free_finish_item
,
159 .cancel_item
= xfs_extent_free_cancel_item
,
162 /* Reverse Mapping */
164 /* Sort rmap intents by AG. */
166 xfs_rmap_update_diff_items(
171 struct xfs_mount
*mp
= priv
;
172 struct xfs_rmap_intent
*ra
;
173 struct xfs_rmap_intent
*rb
;
175 ra
= container_of(a
, struct xfs_rmap_intent
, ri_list
);
176 rb
= container_of(b
, struct xfs_rmap_intent
, ri_list
);
177 return XFS_FSB_TO_AGNO(mp
, ra
->ri_bmap
.br_startblock
) -
178 XFS_FSB_TO_AGNO(mp
, rb
->ri_bmap
.br_startblock
);
183 xfs_rmap_update_create_intent(
184 struct xfs_trans
*tp
,
190 /* Log rmap updates in the intent item. */
192 xfs_rmap_update_log_item(
193 struct xfs_trans
*tp
,
195 struct list_head
*item
)
199 /* Get an RUD so we can process all the deferred rmap updates. */
201 xfs_rmap_update_create_done(
202 struct xfs_trans
*tp
,
209 /* Process a deferred rmap update. */
211 xfs_rmap_update_finish_item(
212 struct xfs_trans
*tp
,
213 struct list_head
*item
,
217 struct xfs_rmap_intent
*rmap
;
220 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
221 error
= xfs_rmap_finish_one(tp
,
223 rmap
->ri_owner
, rmap
->ri_whichfork
,
224 rmap
->ri_bmap
.br_startoff
,
225 rmap
->ri_bmap
.br_startblock
,
226 rmap
->ri_bmap
.br_blockcount
,
227 rmap
->ri_bmap
.br_state
,
228 (struct xfs_btree_cur
**)state
);
233 /* Clean up after processing deferred rmaps. */
235 xfs_rmap_update_finish_cleanup(
236 struct xfs_trans
*tp
,
240 struct xfs_btree_cur
*rcur
= state
;
242 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
245 /* Abort all pending RUIs. */
247 xfs_rmap_update_abort_intent(
252 /* Cancel a deferred rmap update. */
254 xfs_rmap_update_cancel_item(
255 struct list_head
*item
)
257 struct xfs_rmap_intent
*rmap
;
259 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
263 const struct xfs_defer_op_type xfs_rmap_update_defer_type
= {
264 .diff_items
= xfs_rmap_update_diff_items
,
265 .create_intent
= xfs_rmap_update_create_intent
,
266 .abort_intent
= xfs_rmap_update_abort_intent
,
267 .log_item
= xfs_rmap_update_log_item
,
268 .create_done
= xfs_rmap_update_create_done
,
269 .finish_item
= xfs_rmap_update_finish_item
,
270 .finish_cleanup
= xfs_rmap_update_finish_cleanup
,
271 .cancel_item
= xfs_rmap_update_cancel_item
,
274 /* Reference Counting */
276 /* Sort refcount intents by AG. */
278 xfs_refcount_update_diff_items(
283 struct xfs_mount
*mp
= priv
;
284 struct xfs_refcount_intent
*ra
;
285 struct xfs_refcount_intent
*rb
;
287 ra
= container_of(a
, struct xfs_refcount_intent
, ri_list
);
288 rb
= container_of(b
, struct xfs_refcount_intent
, ri_list
);
289 return XFS_FSB_TO_AGNO(mp
, ra
->ri_startblock
) -
290 XFS_FSB_TO_AGNO(mp
, rb
->ri_startblock
);
295 xfs_refcount_update_create_intent(
296 struct xfs_trans
*tp
,
302 /* Log refcount updates in the intent item. */
304 xfs_refcount_update_log_item(
305 struct xfs_trans
*tp
,
307 struct list_head
*item
)
311 /* Get an CUD so we can process all the deferred refcount updates. */
313 xfs_refcount_update_create_done(
314 struct xfs_trans
*tp
,
321 /* Process a deferred refcount update. */
323 xfs_refcount_update_finish_item(
324 struct xfs_trans
*tp
,
325 struct list_head
*item
,
329 struct xfs_refcount_intent
*refc
;
330 xfs_fsblock_t new_fsb
;
331 xfs_extlen_t new_aglen
;
334 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
335 error
= xfs_refcount_finish_one(tp
,
339 &new_fsb
, &new_aglen
,
340 (struct xfs_btree_cur
**)state
);
341 /* Did we run out of reservation? Requeue what we didn't finish. */
342 if (!error
&& new_aglen
> 0) {
343 ASSERT(refc
->ri_type
== XFS_REFCOUNT_INCREASE
||
344 refc
->ri_type
== XFS_REFCOUNT_DECREASE
);
345 refc
->ri_startblock
= new_fsb
;
346 refc
->ri_blockcount
= new_aglen
;
353 /* Clean up after processing deferred refcounts. */
355 xfs_refcount_update_finish_cleanup(
356 struct xfs_trans
*tp
,
360 struct xfs_btree_cur
*rcur
= state
;
362 xfs_refcount_finish_one_cleanup(tp
, rcur
, error
);
365 /* Abort all pending CUIs. */
367 xfs_refcount_update_abort_intent(
372 /* Cancel a deferred refcount update. */
374 xfs_refcount_update_cancel_item(
375 struct list_head
*item
)
377 struct xfs_refcount_intent
*refc
;
379 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
383 const struct xfs_defer_op_type xfs_refcount_update_defer_type
= {
384 .diff_items
= xfs_refcount_update_diff_items
,
385 .create_intent
= xfs_refcount_update_create_intent
,
386 .abort_intent
= xfs_refcount_update_abort_intent
,
387 .log_item
= xfs_refcount_update_log_item
,
388 .create_done
= xfs_refcount_update_create_done
,
389 .finish_item
= xfs_refcount_update_finish_item
,
390 .finish_cleanup
= xfs_refcount_update_finish_cleanup
,
391 .cancel_item
= xfs_refcount_update_cancel_item
,
394 /* Inode Block Mapping */
396 /* Sort bmap intents by inode. */
398 xfs_bmap_update_diff_items(
403 struct xfs_bmap_intent
*ba
;
404 struct xfs_bmap_intent
*bb
;
406 ba
= container_of(a
, struct xfs_bmap_intent
, bi_list
);
407 bb
= container_of(b
, struct xfs_bmap_intent
, bi_list
);
408 return ba
->bi_owner
->i_ino
- bb
->bi_owner
->i_ino
;
413 xfs_bmap_update_create_intent(
414 struct xfs_trans
*tp
,
420 /* Log bmap updates in the intent item. */
422 xfs_bmap_update_log_item(
423 struct xfs_trans
*tp
,
425 struct list_head
*item
)
429 /* Get an BUD so we can process all the deferred rmap updates. */
431 xfs_bmap_update_create_done(
432 struct xfs_trans
*tp
,
439 /* Process a deferred rmap update. */
441 xfs_bmap_update_finish_item(
442 struct xfs_trans
*tp
,
443 struct list_head
*item
,
447 struct xfs_bmap_intent
*bmap
;
451 bmap
= container_of(item
, struct xfs_bmap_intent
, bi_list
);
452 count
= bmap
->bi_bmap
.br_blockcount
;
453 error
= xfs_bmap_finish_one(tp
,
455 bmap
->bi_type
, bmap
->bi_whichfork
,
456 bmap
->bi_bmap
.br_startoff
,
457 bmap
->bi_bmap
.br_startblock
,
459 bmap
->bi_bmap
.br_state
);
460 if (!error
&& count
> 0) {
461 ASSERT(bmap
->bi_type
== XFS_BMAP_UNMAP
);
462 bmap
->bi_bmap
.br_blockcount
= count
;
469 /* Abort all pending BUIs. */
471 xfs_bmap_update_abort_intent(
476 /* Cancel a deferred rmap update. */
478 xfs_bmap_update_cancel_item(
479 struct list_head
*item
)
481 struct xfs_bmap_intent
*bmap
;
483 bmap
= container_of(item
, struct xfs_bmap_intent
, bi_list
);
487 const struct xfs_defer_op_type xfs_bmap_update_defer_type
= {
488 .diff_items
= xfs_bmap_update_diff_items
,
489 .create_intent
= xfs_bmap_update_create_intent
,
490 .abort_intent
= xfs_bmap_update_abort_intent
,
491 .log_item
= xfs_bmap_update_log_item
,
492 .create_done
= xfs_bmap_update_create_done
,
493 .finish_item
= xfs_bmap_update_finish_item
,
494 .cancel_item
= xfs_bmap_update_cancel_item
,