]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/defer_item.c
xfs: add BMAPI_NORMAP flag to perform block remapping without updating rmapbt
[thirdparty/xfsprogs-dev.git] / libxfs / defer_item.c
1 /*
2 * Copyright (C) 2016 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include "libxfs_priv.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_bit.h"
27 #include "xfs_sb.h"
28 #include "xfs_mount.h"
29 #include "xfs_defer.h"
30 #include "xfs_trans.h"
31 #include "xfs_bmap.h"
32 #include "xfs_alloc.h"
33 #include "xfs_rmap.h"
34 #include "xfs_refcount.h"
35 #include "xfs_bmap.h"
36 #include "xfs_inode.h"
37
38 /* Dummy defer item ops, since we don't do logging. */
39
40 /* Extent Freeing */
41
42 /* Sort bmap items by AG. */
43 static int
44 xfs_extent_free_diff_items(
45 void *priv,
46 struct list_head *a,
47 struct list_head *b)
48 {
49 struct xfs_mount *mp = priv;
50 struct xfs_extent_free_item *ra;
51 struct xfs_extent_free_item *rb;
52
53 ra = container_of(a, struct xfs_extent_free_item, xefi_list);
54 rb = container_of(b, struct xfs_extent_free_item, xefi_list);
55 return XFS_FSB_TO_AGNO(mp, ra->xefi_startblock) -
56 XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
57 }
58
59 /* Get an EFI. */
60 STATIC void *
61 xfs_extent_free_create_intent(
62 struct xfs_trans *tp,
63 unsigned int count)
64 {
65 return NULL;
66 }
67
68 /* Log a free extent to the intent item. */
69 STATIC void
70 xfs_extent_free_log_item(
71 struct xfs_trans *tp,
72 void *intent,
73 struct list_head *item)
74 {
75 }
76
77 /* Get an EFD so we can process all the free extents. */
78 STATIC void *
79 xfs_extent_free_create_done(
80 struct xfs_trans *tp,
81 void *intent,
82 unsigned int count)
83 {
84 return NULL;
85 }
86
87 /* Process a free extent. */
88 STATIC int
89 xfs_extent_free_finish_item(
90 struct xfs_trans *tp,
91 struct xfs_defer_ops *dop,
92 struct list_head *item,
93 void *done_item,
94 void **state)
95 {
96 struct xfs_extent_free_item *free;
97 int error;
98
99 free = container_of(item, struct xfs_extent_free_item, xefi_list);
100 error = xfs_free_extent(tp, free->xefi_startblock,
101 free->xefi_blockcount, &free->xefi_oinfo,
102 XFS_AG_RESV_NONE);
103 kmem_free(free);
104 return error;
105 }
106
107 /* Abort all pending EFIs. */
108 STATIC void
109 xfs_extent_free_abort_intent(
110 void *intent)
111 {
112 }
113
114 /* Cancel a free extent. */
115 STATIC void
116 xfs_extent_free_cancel_item(
117 struct list_head *item)
118 {
119 struct xfs_extent_free_item *free;
120
121 free = container_of(item, struct xfs_extent_free_item, xefi_list);
122 kmem_free(free);
123 }
124
125 static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
126 .type = XFS_DEFER_OPS_TYPE_FREE,
127 .diff_items = xfs_extent_free_diff_items,
128 .create_intent = xfs_extent_free_create_intent,
129 .abort_intent = xfs_extent_free_abort_intent,
130 .log_item = xfs_extent_free_log_item,
131 .create_done = xfs_extent_free_create_done,
132 .finish_item = xfs_extent_free_finish_item,
133 .cancel_item = xfs_extent_free_cancel_item,
134 };
135
136 /* Register the deferred op type. */
137 void
138 xfs_extent_free_init_defer_op(void)
139 {
140 xfs_defer_init_op_type(&xfs_extent_free_defer_type);
141 }
142
143 /* Reverse Mapping */
144
145 /* Sort rmap intents by AG. */
146 static int
147 xfs_rmap_update_diff_items(
148 void *priv,
149 struct list_head *a,
150 struct list_head *b)
151 {
152 struct xfs_mount *mp = priv;
153 struct xfs_rmap_intent *ra;
154 struct xfs_rmap_intent *rb;
155
156 ra = container_of(a, struct xfs_rmap_intent, ri_list);
157 rb = container_of(b, struct xfs_rmap_intent, ri_list);
158 return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
159 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
160 }
161
162 /* Get an RUI. */
163 STATIC void *
164 xfs_rmap_update_create_intent(
165 struct xfs_trans *tp,
166 unsigned int count)
167 {
168 return NULL;
169 }
170
171 /* Log rmap updates in the intent item. */
172 STATIC void
173 xfs_rmap_update_log_item(
174 struct xfs_trans *tp,
175 void *intent,
176 struct list_head *item)
177 {
178 }
179
180 /* Get an RUD so we can process all the deferred rmap updates. */
181 STATIC void *
182 xfs_rmap_update_create_done(
183 struct xfs_trans *tp,
184 void *intent,
185 unsigned int count)
186 {
187 return NULL;
188 }
189
190 /* Process a deferred rmap update. */
191 STATIC int
192 xfs_rmap_update_finish_item(
193 struct xfs_trans *tp,
194 struct xfs_defer_ops *dop,
195 struct list_head *item,
196 void *done_item,
197 void **state)
198 {
199 struct xfs_rmap_intent *rmap;
200 int error;
201
202 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
203 error = xfs_rmap_finish_one(tp,
204 rmap->ri_type,
205 rmap->ri_owner, rmap->ri_whichfork,
206 rmap->ri_bmap.br_startoff,
207 rmap->ri_bmap.br_startblock,
208 rmap->ri_bmap.br_blockcount,
209 rmap->ri_bmap.br_state,
210 (struct xfs_btree_cur **)state);
211 kmem_free(rmap);
212 return error;
213 }
214
215 /* Clean up after processing deferred rmaps. */
216 STATIC void
217 xfs_rmap_update_finish_cleanup(
218 struct xfs_trans *tp,
219 void *state,
220 int error)
221 {
222 struct xfs_btree_cur *rcur = state;
223
224 xfs_rmap_finish_one_cleanup(tp, rcur, error);
225 }
226
227 /* Abort all pending RUIs. */
228 STATIC void
229 xfs_rmap_update_abort_intent(
230 void *intent)
231 {
232 }
233
234 /* Cancel a deferred rmap update. */
235 STATIC void
236 xfs_rmap_update_cancel_item(
237 struct list_head *item)
238 {
239 struct xfs_rmap_intent *rmap;
240
241 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
242 kmem_free(rmap);
243 }
244
245 static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
246 .type = XFS_DEFER_OPS_TYPE_RMAP,
247 .diff_items = xfs_rmap_update_diff_items,
248 .create_intent = xfs_rmap_update_create_intent,
249 .abort_intent = xfs_rmap_update_abort_intent,
250 .log_item = xfs_rmap_update_log_item,
251 .create_done = xfs_rmap_update_create_done,
252 .finish_item = xfs_rmap_update_finish_item,
253 .finish_cleanup = xfs_rmap_update_finish_cleanup,
254 .cancel_item = xfs_rmap_update_cancel_item,
255 };
256
257 /* Register the deferred op type. */
258 void
259 xfs_rmap_update_init_defer_op(void)
260 {
261 xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
262 }
263
264 /* Reference Counting */
265
266 /* Sort refcount intents by AG. */
267 static int
268 xfs_refcount_update_diff_items(
269 void *priv,
270 struct list_head *a,
271 struct list_head *b)
272 {
273 struct xfs_mount *mp = priv;
274 struct xfs_refcount_intent *ra;
275 struct xfs_refcount_intent *rb;
276
277 ra = container_of(a, struct xfs_refcount_intent, ri_list);
278 rb = container_of(b, struct xfs_refcount_intent, ri_list);
279 return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
280 XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
281 }
282
283 /* Get an CUI. */
284 STATIC void *
285 xfs_refcount_update_create_intent(
286 struct xfs_trans *tp,
287 unsigned int count)
288 {
289 return NULL;
290 }
291
292 /* Log refcount updates in the intent item. */
293 STATIC void
294 xfs_refcount_update_log_item(
295 struct xfs_trans *tp,
296 void *intent,
297 struct list_head *item)
298 {
299 }
300
301 /* Get an CUD so we can process all the deferred refcount updates. */
302 STATIC void *
303 xfs_refcount_update_create_done(
304 struct xfs_trans *tp,
305 void *intent,
306 unsigned int count)
307 {
308 return NULL;
309 }
310
311 /* Process a deferred refcount update. */
312 STATIC int
313 xfs_refcount_update_finish_item(
314 struct xfs_trans *tp,
315 struct xfs_defer_ops *dop,
316 struct list_head *item,
317 void *done_item,
318 void **state)
319 {
320 struct xfs_refcount_intent *refc;
321 xfs_fsblock_t new_fsb;
322 xfs_extlen_t new_aglen;
323 int error;
324
325 refc = container_of(item, struct xfs_refcount_intent, ri_list);
326 error = xfs_refcount_finish_one(tp, dop,
327 refc->ri_type,
328 refc->ri_startblock,
329 refc->ri_blockcount,
330 &new_fsb, &new_aglen,
331 (struct xfs_btree_cur **)state);
332 /* Did we run out of reservation? Requeue what we didn't finish. */
333 if (!error && new_aglen > 0) {
334 ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
335 refc->ri_type == XFS_REFCOUNT_DECREASE);
336 refc->ri_startblock = new_fsb;
337 refc->ri_blockcount = new_aglen;
338 return -EAGAIN;
339 }
340 kmem_free(refc);
341 return error;
342 }
343
344 /* Clean up after processing deferred refcounts. */
345 STATIC void
346 xfs_refcount_update_finish_cleanup(
347 struct xfs_trans *tp,
348 void *state,
349 int error)
350 {
351 struct xfs_btree_cur *rcur = state;
352
353 xfs_refcount_finish_one_cleanup(tp, rcur, error);
354 }
355
356 /* Abort all pending CUIs. */
357 STATIC void
358 xfs_refcount_update_abort_intent(
359 void *intent)
360 {
361 }
362
363 /* Cancel a deferred refcount update. */
364 STATIC void
365 xfs_refcount_update_cancel_item(
366 struct list_head *item)
367 {
368 struct xfs_refcount_intent *refc;
369
370 refc = container_of(item, struct xfs_refcount_intent, ri_list);
371 kmem_free(refc);
372 }
373
374 static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
375 .type = XFS_DEFER_OPS_TYPE_REFCOUNT,
376 .diff_items = xfs_refcount_update_diff_items,
377 .create_intent = xfs_refcount_update_create_intent,
378 .abort_intent = xfs_refcount_update_abort_intent,
379 .log_item = xfs_refcount_update_log_item,
380 .create_done = xfs_refcount_update_create_done,
381 .finish_item = xfs_refcount_update_finish_item,
382 .finish_cleanup = xfs_refcount_update_finish_cleanup,
383 .cancel_item = xfs_refcount_update_cancel_item,
384 };
385
386 /* Register the deferred op type. */
387 void
388 xfs_refcount_update_init_defer_op(void)
389 {
390 xfs_defer_init_op_type(&xfs_refcount_update_defer_type);
391 }
392
393 /* Inode Block Mapping */
394
395 /* Sort bmap intents by inode. */
396 static int
397 xfs_bmap_update_diff_items(
398 void *priv,
399 struct list_head *a,
400 struct list_head *b)
401 {
402 struct xfs_bmap_intent *ba;
403 struct xfs_bmap_intent *bb;
404
405 ba = container_of(a, struct xfs_bmap_intent, bi_list);
406 bb = container_of(b, struct xfs_bmap_intent, bi_list);
407 return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
408 }
409
410 /* Get an BUI. */
411 STATIC void *
412 xfs_bmap_update_create_intent(
413 struct xfs_trans *tp,
414 unsigned int count)
415 {
416 return NULL;
417 }
418
419 /* Log bmap updates in the intent item. */
420 STATIC void
421 xfs_bmap_update_log_item(
422 struct xfs_trans *tp,
423 void *intent,
424 struct list_head *item)
425 {
426 }
427
428 /* Get an BUD so we can process all the deferred rmap updates. */
429 STATIC void *
430 xfs_bmap_update_create_done(
431 struct xfs_trans *tp,
432 void *intent,
433 unsigned int count)
434 {
435 return NULL;
436 }
437
438 /* Process a deferred rmap update. */
439 STATIC int
440 xfs_bmap_update_finish_item(
441 struct xfs_trans *tp,
442 struct xfs_defer_ops *dop,
443 struct list_head *item,
444 void *done_item,
445 void **state)
446 {
447 struct xfs_bmap_intent *bmap;
448 xfs_filblks_t count;
449 int error;
450
451 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
452 count = bmap->bi_bmap.br_blockcount;
453 error = xfs_bmap_finish_one(tp, dop,
454 bmap->bi_owner,
455 bmap->bi_type, bmap->bi_whichfork,
456 bmap->bi_bmap.br_startoff,
457 bmap->bi_bmap.br_startblock,
458 &count,
459 bmap->bi_bmap.br_state);
460 if (!error && count > 0) {
461 ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
462 bmap->bi_bmap.br_blockcount = count;
463 return -EAGAIN;
464 }
465 kmem_free(bmap);
466 return error;
467 }
468
469 /* Abort all pending BUIs. */
470 STATIC void
471 xfs_bmap_update_abort_intent(
472 void *intent)
473 {
474 }
475
476 /* Cancel a deferred rmap update. */
477 STATIC void
478 xfs_bmap_update_cancel_item(
479 struct list_head *item)
480 {
481 struct xfs_bmap_intent *bmap;
482
483 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
484 kmem_free(bmap);
485 }
486
487 static const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
488 .type = XFS_DEFER_OPS_TYPE_BMAP,
489 .diff_items = xfs_bmap_update_diff_items,
490 .create_intent = xfs_bmap_update_create_intent,
491 .abort_intent = xfs_bmap_update_abort_intent,
492 .log_item = xfs_bmap_update_log_item,
493 .create_done = xfs_bmap_update_create_done,
494 .finish_item = xfs_bmap_update_finish_item,
495 .cancel_item = xfs_bmap_update_cancel_item,
496 };
497
498 /* Register the deferred op type. */
499 void
500 xfs_bmap_update_init_defer_op(void)
501 {
502 xfs_defer_init_op_type(&xfs_bmap_update_defer_type);
503 }