2 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include "libxfs_priv.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_defer.h"
30 #include "xfs_trans.h"
32 #include "xfs_alloc.h"
34 #include "xfs_refcount.h"
36 #include "xfs_inode.h"
38 /* Dummy defer item ops, since we don't do logging. */
42 /* Sort bmap items by AG. */
44 xfs_extent_free_diff_items(
49 struct xfs_mount
*mp
= priv
;
50 struct xfs_extent_free_item
*ra
;
51 struct xfs_extent_free_item
*rb
;
53 ra
= container_of(a
, struct xfs_extent_free_item
, xefi_list
);
54 rb
= container_of(b
, struct xfs_extent_free_item
, xefi_list
);
55 return XFS_FSB_TO_AGNO(mp
, ra
->xefi_startblock
) -
56 XFS_FSB_TO_AGNO(mp
, rb
->xefi_startblock
);
61 xfs_extent_free_create_intent(
68 /* Log a free extent to the intent item. */
70 xfs_extent_free_log_item(
73 struct list_head
*item
)
77 /* Get an EFD so we can process all the free extents. */
79 xfs_extent_free_create_done(
87 /* Process a free extent. */
89 xfs_extent_free_finish_item(
91 struct xfs_defer_ops
*dop
,
92 struct list_head
*item
,
96 struct xfs_extent_free_item
*free
;
99 free
= container_of(item
, struct xfs_extent_free_item
, xefi_list
);
100 error
= xfs_free_extent(tp
, free
->xefi_startblock
,
101 free
->xefi_blockcount
, &free
->xefi_oinfo
,
107 /* Abort all pending EFIs. */
109 xfs_extent_free_abort_intent(
114 /* Cancel a free extent. */
116 xfs_extent_free_cancel_item(
117 struct list_head
*item
)
119 struct xfs_extent_free_item
*free
;
121 free
= container_of(item
, struct xfs_extent_free_item
, xefi_list
);
125 static const struct xfs_defer_op_type xfs_extent_free_defer_type
= {
126 .type
= XFS_DEFER_OPS_TYPE_FREE
,
127 .diff_items
= xfs_extent_free_diff_items
,
128 .create_intent
= xfs_extent_free_create_intent
,
129 .abort_intent
= xfs_extent_free_abort_intent
,
130 .log_item
= xfs_extent_free_log_item
,
131 .create_done
= xfs_extent_free_create_done
,
132 .finish_item
= xfs_extent_free_finish_item
,
133 .cancel_item
= xfs_extent_free_cancel_item
,
136 /* Register the deferred op type. */
138 xfs_extent_free_init_defer_op(void)
140 xfs_defer_init_op_type(&xfs_extent_free_defer_type
);
143 /* Reverse Mapping */
145 /* Sort rmap intents by AG. */
147 xfs_rmap_update_diff_items(
152 struct xfs_mount
*mp
= priv
;
153 struct xfs_rmap_intent
*ra
;
154 struct xfs_rmap_intent
*rb
;
156 ra
= container_of(a
, struct xfs_rmap_intent
, ri_list
);
157 rb
= container_of(b
, struct xfs_rmap_intent
, ri_list
);
158 return XFS_FSB_TO_AGNO(mp
, ra
->ri_bmap
.br_startblock
) -
159 XFS_FSB_TO_AGNO(mp
, rb
->ri_bmap
.br_startblock
);
164 xfs_rmap_update_create_intent(
165 struct xfs_trans
*tp
,
171 /* Log rmap updates in the intent item. */
173 xfs_rmap_update_log_item(
174 struct xfs_trans
*tp
,
176 struct list_head
*item
)
180 /* Get an RUD so we can process all the deferred rmap updates. */
182 xfs_rmap_update_create_done(
183 struct xfs_trans
*tp
,
190 /* Process a deferred rmap update. */
192 xfs_rmap_update_finish_item(
193 struct xfs_trans
*tp
,
194 struct xfs_defer_ops
*dop
,
195 struct list_head
*item
,
199 struct xfs_rmap_intent
*rmap
;
202 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
203 error
= xfs_rmap_finish_one(tp
,
205 rmap
->ri_owner
, rmap
->ri_whichfork
,
206 rmap
->ri_bmap
.br_startoff
,
207 rmap
->ri_bmap
.br_startblock
,
208 rmap
->ri_bmap
.br_blockcount
,
209 rmap
->ri_bmap
.br_state
,
210 (struct xfs_btree_cur
**)state
);
215 /* Clean up after processing deferred rmaps. */
217 xfs_rmap_update_finish_cleanup(
218 struct xfs_trans
*tp
,
222 struct xfs_btree_cur
*rcur
= state
;
224 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
227 /* Abort all pending RUIs. */
229 xfs_rmap_update_abort_intent(
234 /* Cancel a deferred rmap update. */
236 xfs_rmap_update_cancel_item(
237 struct list_head
*item
)
239 struct xfs_rmap_intent
*rmap
;
241 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
245 static const struct xfs_defer_op_type xfs_rmap_update_defer_type
= {
246 .type
= XFS_DEFER_OPS_TYPE_RMAP
,
247 .diff_items
= xfs_rmap_update_diff_items
,
248 .create_intent
= xfs_rmap_update_create_intent
,
249 .abort_intent
= xfs_rmap_update_abort_intent
,
250 .log_item
= xfs_rmap_update_log_item
,
251 .create_done
= xfs_rmap_update_create_done
,
252 .finish_item
= xfs_rmap_update_finish_item
,
253 .finish_cleanup
= xfs_rmap_update_finish_cleanup
,
254 .cancel_item
= xfs_rmap_update_cancel_item
,
257 /* Register the deferred op type. */
259 xfs_rmap_update_init_defer_op(void)
261 xfs_defer_init_op_type(&xfs_rmap_update_defer_type
);
264 /* Reference Counting */
266 /* Sort refcount intents by AG. */
268 xfs_refcount_update_diff_items(
273 struct xfs_mount
*mp
= priv
;
274 struct xfs_refcount_intent
*ra
;
275 struct xfs_refcount_intent
*rb
;
277 ra
= container_of(a
, struct xfs_refcount_intent
, ri_list
);
278 rb
= container_of(b
, struct xfs_refcount_intent
, ri_list
);
279 return XFS_FSB_TO_AGNO(mp
, ra
->ri_startblock
) -
280 XFS_FSB_TO_AGNO(mp
, rb
->ri_startblock
);
285 xfs_refcount_update_create_intent(
286 struct xfs_trans
*tp
,
292 /* Log refcount updates in the intent item. */
294 xfs_refcount_update_log_item(
295 struct xfs_trans
*tp
,
297 struct list_head
*item
)
301 /* Get an CUD so we can process all the deferred refcount updates. */
303 xfs_refcount_update_create_done(
304 struct xfs_trans
*tp
,
311 /* Process a deferred refcount update. */
313 xfs_refcount_update_finish_item(
314 struct xfs_trans
*tp
,
315 struct xfs_defer_ops
*dop
,
316 struct list_head
*item
,
320 struct xfs_refcount_intent
*refc
;
321 xfs_fsblock_t new_fsb
;
322 xfs_extlen_t new_aglen
;
325 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
326 error
= xfs_refcount_finish_one(tp
, dop
,
330 &new_fsb
, &new_aglen
,
331 (struct xfs_btree_cur
**)state
);
332 /* Did we run out of reservation? Requeue what we didn't finish. */
333 if (!error
&& new_aglen
> 0) {
334 ASSERT(refc
->ri_type
== XFS_REFCOUNT_INCREASE
||
335 refc
->ri_type
== XFS_REFCOUNT_DECREASE
);
336 refc
->ri_startblock
= new_fsb
;
337 refc
->ri_blockcount
= new_aglen
;
344 /* Clean up after processing deferred refcounts. */
346 xfs_refcount_update_finish_cleanup(
347 struct xfs_trans
*tp
,
351 struct xfs_btree_cur
*rcur
= state
;
353 xfs_refcount_finish_one_cleanup(tp
, rcur
, error
);
356 /* Abort all pending CUIs. */
358 xfs_refcount_update_abort_intent(
363 /* Cancel a deferred refcount update. */
365 xfs_refcount_update_cancel_item(
366 struct list_head
*item
)
368 struct xfs_refcount_intent
*refc
;
370 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
374 static const struct xfs_defer_op_type xfs_refcount_update_defer_type
= {
375 .type
= XFS_DEFER_OPS_TYPE_REFCOUNT
,
376 .diff_items
= xfs_refcount_update_diff_items
,
377 .create_intent
= xfs_refcount_update_create_intent
,
378 .abort_intent
= xfs_refcount_update_abort_intent
,
379 .log_item
= xfs_refcount_update_log_item
,
380 .create_done
= xfs_refcount_update_create_done
,
381 .finish_item
= xfs_refcount_update_finish_item
,
382 .finish_cleanup
= xfs_refcount_update_finish_cleanup
,
383 .cancel_item
= xfs_refcount_update_cancel_item
,
386 /* Register the deferred op type. */
388 xfs_refcount_update_init_defer_op(void)
390 xfs_defer_init_op_type(&xfs_refcount_update_defer_type
);
393 /* Inode Block Mapping */
395 /* Sort bmap intents by inode. */
397 xfs_bmap_update_diff_items(
402 struct xfs_bmap_intent
*ba
;
403 struct xfs_bmap_intent
*bb
;
405 ba
= container_of(a
, struct xfs_bmap_intent
, bi_list
);
406 bb
= container_of(b
, struct xfs_bmap_intent
, bi_list
);
407 return ba
->bi_owner
->i_ino
- bb
->bi_owner
->i_ino
;
412 xfs_bmap_update_create_intent(
413 struct xfs_trans
*tp
,
419 /* Log bmap updates in the intent item. */
421 xfs_bmap_update_log_item(
422 struct xfs_trans
*tp
,
424 struct list_head
*item
)
428 /* Get an BUD so we can process all the deferred rmap updates. */
430 xfs_bmap_update_create_done(
431 struct xfs_trans
*tp
,
438 /* Process a deferred rmap update. */
440 xfs_bmap_update_finish_item(
441 struct xfs_trans
*tp
,
442 struct xfs_defer_ops
*dop
,
443 struct list_head
*item
,
447 struct xfs_bmap_intent
*bmap
;
451 bmap
= container_of(item
, struct xfs_bmap_intent
, bi_list
);
452 count
= bmap
->bi_bmap
.br_blockcount
;
453 error
= xfs_bmap_finish_one(tp
, dop
,
455 bmap
->bi_type
, bmap
->bi_whichfork
,
456 bmap
->bi_bmap
.br_startoff
,
457 bmap
->bi_bmap
.br_startblock
,
459 bmap
->bi_bmap
.br_state
);
460 if (!error
&& count
> 0) {
461 ASSERT(bmap
->bi_type
== XFS_BMAP_UNMAP
);
462 bmap
->bi_bmap
.br_blockcount
= count
;
469 /* Abort all pending BUIs. */
471 xfs_bmap_update_abort_intent(
476 /* Cancel a deferred rmap update. */
478 xfs_bmap_update_cancel_item(
479 struct list_head
*item
)
481 struct xfs_bmap_intent
*bmap
;
483 bmap
= container_of(item
, struct xfs_bmap_intent
, bi_list
);
487 static const struct xfs_defer_op_type xfs_bmap_update_defer_type
= {
488 .type
= XFS_DEFER_OPS_TYPE_BMAP
,
489 .diff_items
= xfs_bmap_update_diff_items
,
490 .create_intent
= xfs_bmap_update_create_intent
,
491 .abort_intent
= xfs_bmap_update_abort_intent
,
492 .log_item
= xfs_bmap_update_log_item
,
493 .create_done
= xfs_bmap_update_create_done
,
494 .finish_item
= xfs_bmap_update_finish_item
,
495 .cancel_item
= xfs_bmap_update_cancel_item
,
498 /* Register the deferred op type. */
500 xfs_bmap_update_init_defer_op(void)
502 xfs_defer_init_op_type(&xfs_bmap_update_defer_type
);