]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/agbtree.c
xfs_repair: try to fill the AGFL before we fix the freelist
[thirdparty/xfsprogs-dev.git] / repair / agbtree.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include <libxfs.h>
7 #include "err_protos.h"
8 #include "libfrog/bitmap.h"
9 #include "slab.h"
10 #include "rmap.h"
11 #include "incore.h"
12 #include "bulkload.h"
13 #include "agbtree.h"
14
15 /* Initialize a btree rebuild context. */
16 static void
17 init_rebuild(
18 struct repair_ctx *sc,
19 const struct xfs_owner_info *oinfo,
20 xfs_agblock_t free_space,
21 struct bt_rebuild *btr)
22 {
23 memset(btr, 0, sizeof(struct bt_rebuild));
24
25 bulkload_init_ag(&btr->newbt, sc, oinfo);
26 bulkload_estimate_ag_slack(sc, &btr->bload, free_space);
27 }
28
29 /*
30 * Update this free space record to reflect the blocks we stole from the
31 * beginning of the record.
32 */
33 static void
34 consume_freespace(
35 xfs_agnumber_t agno,
36 struct extent_tree_node *ext_ptr,
37 uint32_t len)
38 {
39 struct extent_tree_node *bno_ext_ptr;
40 xfs_agblock_t new_start = ext_ptr->ex_startblock + len;
41 xfs_extlen_t new_len = ext_ptr->ex_blockcount - len;
42
43 /* Delete the used-up extent from both extent trees. */
44 #ifdef XR_BLD_FREE_TRACE
45 fprintf(stderr, "releasing extent: %u [%u %u]\n", agno,
46 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
47 #endif
48 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
49 ASSERT(bno_ext_ptr != NULL);
50 get_bno_extent(agno, bno_ext_ptr);
51 release_extent_tree_node(bno_ext_ptr);
52
53 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
54 ext_ptr->ex_blockcount);
55 release_extent_tree_node(ext_ptr);
56
57 /*
58 * If we only used part of this last extent, then we must reinsert the
59 * extent to maintain proper sorting order.
60 */
61 if (new_len > 0) {
62 add_bno_extent(agno, new_start, new_len);
63 add_bcnt_extent(agno, new_start, new_len);
64 }
65 }
66
67 /*
68 * Reserve blocks for the new per-AG structures. Returns true if all blocks
69 * were allocated, and false if we ran out of space.
70 */
71 static bool
72 reserve_agblocks(
73 struct xfs_mount *mp,
74 xfs_agnumber_t agno,
75 struct bt_rebuild *btr,
76 uint32_t nr_blocks)
77 {
78 struct extent_tree_node *ext_ptr;
79 uint32_t blocks_allocated = 0;
80 uint32_t len;
81 int error;
82
83 while (blocks_allocated < nr_blocks) {
84 xfs_fsblock_t fsbno;
85
86 /*
87 * Grab the smallest extent and use it up, then get the
88 * next smallest. This mimics the init_*_cursor code.
89 */
90 ext_ptr = findfirst_bcnt_extent(agno);
91 if (!ext_ptr)
92 break;
93
94 /* Use up the extent we've got. */
95 len = min(ext_ptr->ex_blockcount, nr_blocks - blocks_allocated);
96 fsbno = XFS_AGB_TO_FSB(mp, agno, ext_ptr->ex_startblock);
97 error = bulkload_add_blocks(&btr->newbt, fsbno, len);
98 if (error)
99 do_error(_("could not set up btree reservation: %s\n"),
100 strerror(-error));
101
102 error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, len,
103 btr->newbt.oinfo.oi_owner);
104 if (error)
105 do_error(_("could not set up btree rmaps: %s\n"),
106 strerror(-error));
107
108 consume_freespace(agno, ext_ptr, len);
109 blocks_allocated += len;
110 }
111 #ifdef XR_BLD_FREE_TRACE
112 fprintf(stderr, "blocks_allocated = %d\n",
113 blocks_allocated);
114 #endif
115 return blocks_allocated == nr_blocks;
116 }
117
118 static inline void
119 reserve_btblocks(
120 struct xfs_mount *mp,
121 xfs_agnumber_t agno,
122 struct bt_rebuild *btr,
123 uint32_t nr_blocks)
124 {
125 if (!reserve_agblocks(mp, agno, btr, nr_blocks))
126 do_error(
127 _("error - not enough free space in filesystem, AG %u\n"),
128 agno);
129 }
130
131 /* Feed one of the new btree blocks to the bulk loader. */
132 static int
133 rebuild_claim_block(
134 struct xfs_btree_cur *cur,
135 union xfs_btree_ptr *ptr,
136 void *priv)
137 {
138 struct bt_rebuild *btr = priv;
139
140 return bulkload_claim_block(cur, &btr->newbt, ptr);
141 }
142
143 /*
144 * Scoop up leftovers from a rebuild cursor for later freeing, then free the
145 * rebuild context.
146 */
147 void
148 finish_rebuild(
149 struct xfs_mount *mp,
150 struct bt_rebuild *btr,
151 struct bitmap *lost_blocks)
152 {
153 struct bulkload_resv *resv, *n;
154 int error;
155
156 for_each_bulkload_reservation(&btr->newbt, resv, n) {
157 if (resv->used == resv->len)
158 continue;
159
160 error = bitmap_set(lost_blocks, resv->fsbno + resv->used,
161 resv->len - resv->used);
162 if (error)
163 do_error(
164 _("Insufficient memory saving lost blocks, err=%d.\n"), error);
165 resv->used = resv->len;
166 }
167
168 bulkload_destroy(&btr->newbt, 0);
169 }
170
171 /*
172 * Free Space Btrees
173 *
174 * We need to leave some free records in the tree for the corner case of
175 * setting up the AGFL. This may require allocation of blocks, and as
176 * such can require insertion of new records into the tree (e.g. moving
177 * a record in the by-count tree when a long extent is shortened). If we
178 * pack the records into the leaves with no slack space, this requires a
179 * leaf split to occur and a block to be allocated from the free list.
180 * If we don't have any blocks on the free list (because we are setting
181 * it up!), then we fail, and the filesystem will fail with the same
182 * failure at runtime. Hence leave a couple of records slack space in
183 * each block to allow immediate modification of the tree without
184 * requiring splits to be done.
185 */
186
187 /*
188 * Return the next free space extent tree record from the previous value we
189 * saw.
190 */
191 static inline struct extent_tree_node *
192 get_bno_rec(
193 struct xfs_btree_cur *cur,
194 struct extent_tree_node *prev_value)
195 {
196 xfs_agnumber_t agno = cur->bc_ag.agno;
197
198 if (cur->bc_btnum == XFS_BTNUM_BNO) {
199 if (!prev_value)
200 return findfirst_bno_extent(agno);
201 return findnext_bno_extent(prev_value);
202 }
203
204 /* cnt btree */
205 if (!prev_value)
206 return findfirst_bcnt_extent(agno);
207 return findnext_bcnt_extent(agno, prev_value);
208 }
209
210 /* Grab one bnobt record and put it in the btree cursor. */
211 static int
212 get_bnobt_record(
213 struct xfs_btree_cur *cur,
214 void *priv)
215 {
216 struct bt_rebuild *btr = priv;
217 struct xfs_alloc_rec_incore *arec = &cur->bc_rec.a;
218
219 btr->bno_rec = get_bno_rec(cur, btr->bno_rec);
220 arec->ar_startblock = btr->bno_rec->ex_startblock;
221 arec->ar_blockcount = btr->bno_rec->ex_blockcount;
222 btr->freeblks += btr->bno_rec->ex_blockcount;
223 return 0;
224 }
225
226 void
227 init_freespace_cursors(
228 struct repair_ctx *sc,
229 xfs_agnumber_t agno,
230 unsigned int free_space,
231 unsigned int *nr_extents,
232 int *extra_blocks,
233 struct bt_rebuild *btr_bno,
234 struct bt_rebuild *btr_cnt)
235 {
236 unsigned int agfl_goal;
237 int error;
238
239 agfl_goal = libxfs_alloc_min_freelist(sc->mp, NULL);
240
241 init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno);
242 init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_cnt);
243
244 btr_bno->cur = libxfs_allocbt_stage_cursor(sc->mp,
245 &btr_bno->newbt.afake, agno, XFS_BTNUM_BNO);
246 btr_cnt->cur = libxfs_allocbt_stage_cursor(sc->mp,
247 &btr_cnt->newbt.afake, agno, XFS_BTNUM_CNT);
248
249 btr_bno->bload.get_record = get_bnobt_record;
250 btr_bno->bload.claim_block = rebuild_claim_block;
251
252 btr_cnt->bload.get_record = get_bnobt_record;
253 btr_cnt->bload.claim_block = rebuild_claim_block;
254
255 /*
256 * Now we need to allocate blocks for the free space btrees using the
257 * free space records we're about to put in them. Every record we use
258 * can change the shape of the free space trees, so we recompute the
259 * btree shape until we stop needing /more/ blocks. If we have any
260 * left over we'll stash them in the AGFL when we're done.
261 */
262 do {
263 unsigned int num_freeblocks;
264 int delta_bno, delta_cnt;
265 int agfl_wanted;
266
267 /* Compute how many bnobt blocks we'll need. */
268 error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
269 &btr_bno->bload, *nr_extents);
270 if (error)
271 do_error(
272 _("Unable to compute free space by block btree geometry, error %d.\n"), -error);
273
274 /* Compute how many cntbt blocks we'll need. */
275 error = -libxfs_btree_bload_compute_geometry(btr_cnt->cur,
276 &btr_cnt->bload, *nr_extents);
277 if (error)
278 do_error(
279 _("Unable to compute free space by length btree geometry, error %d.\n"), -error);
280
281 /*
282 * Compute the deficit between the number of blocks reserved
283 * and the number of blocks we think we need for the btree.
284 */
285 delta_bno = (int)btr_bno->newbt.nr_reserved -
286 btr_bno->bload.nr_blocks;
287 delta_cnt = (int)btr_cnt->newbt.nr_reserved -
288 btr_cnt->bload.nr_blocks;
289
290 /* We don't need any more blocks, so we're done. */
291 if (delta_bno >= 0 && delta_cnt >= 0 &&
292 delta_bno + delta_cnt >= agfl_goal) {
293 *extra_blocks = delta_bno + delta_cnt;
294 break;
295 }
296
297 /* Allocate however many more blocks we need this time. */
298 if (delta_bno < 0) {
299 reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
300 delta_bno = 0;
301 }
302 if (delta_cnt < 0) {
303 reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
304 delta_cnt = 0;
305 }
306
307 /*
308 * Try to fill the bnobt cursor with extra blocks to populate
309 * the AGFL. If we don't get all the blocks we want, stop
310 * trying to fill the AGFL because the AG is totally out of
311 * space.
312 */
313 agfl_wanted = agfl_goal - (delta_bno + delta_cnt);
314 if (agfl_wanted > 0 &&
315 !reserve_agblocks(sc->mp, agno, btr_bno, agfl_wanted))
316 agfl_goal = 0;
317
318 /* Ok, now how many free space records do we have? */
319 *nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
320 } while (1);
321 }
322
323 /* Rebuild the free space btrees. */
324 void
325 build_freespace_btrees(
326 struct repair_ctx *sc,
327 xfs_agnumber_t agno,
328 struct bt_rebuild *btr_bno,
329 struct bt_rebuild *btr_cnt)
330 {
331 int error;
332
333 /* Add all observed bnobt records. */
334 error = -libxfs_btree_bload(btr_bno->cur, &btr_bno->bload, btr_bno);
335 if (error)
336 do_error(
337 _("Error %d while creating bnobt btree for AG %u.\n"), error, agno);
338
339 /* Add all observed cntbt records. */
340 error = -libxfs_btree_bload(btr_cnt->cur, &btr_cnt->bload, btr_cnt);
341 if (error)
342 do_error(
343 _("Error %d while creating cntbt btree for AG %u.\n"), error, agno);
344
345 /* Since we're not writing the AGF yet, no need to commit the cursor */
346 libxfs_btree_del_cursor(btr_bno->cur, 0);
347 libxfs_btree_del_cursor(btr_cnt->cur, 0);
348 }
349
350 /* Inode Btrees */
351
352 static inline struct ino_tree_node *
353 get_ino_rec(
354 struct xfs_btree_cur *cur,
355 struct ino_tree_node *prev_value)
356 {
357 xfs_agnumber_t agno = cur->bc_ag.agno;
358
359 if (cur->bc_btnum == XFS_BTNUM_INO) {
360 if (!prev_value)
361 return findfirst_inode_rec(agno);
362 return next_ino_rec(prev_value);
363 }
364
365 /* finobt */
366 if (!prev_value)
367 return findfirst_free_inode_rec(agno);
368 return next_free_ino_rec(prev_value);
369 }
370
371 /* Grab one inobt record. */
372 static int
373 get_inobt_record(
374 struct xfs_btree_cur *cur,
375 void *priv)
376 {
377 struct bt_rebuild *btr = priv;
378 struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i;
379 struct ino_tree_node *ino_rec;
380 int inocnt = 0;
381 int finocnt = 0;
382 int k;
383
384 btr->ino_rec = ino_rec = get_ino_rec(cur, btr->ino_rec);
385
386 /* Transform the incore record into an on-disk record. */
387 irec->ir_startino = ino_rec->ino_startnum;
388 irec->ir_free = ino_rec->ir_free;
389
390 for (k = 0; k < sizeof(xfs_inofree_t) * NBBY; k++) {
391 ASSERT(is_inode_confirmed(ino_rec, k));
392
393 if (is_inode_sparse(ino_rec, k))
394 continue;
395 if (is_inode_free(ino_rec, k))
396 finocnt++;
397 inocnt++;
398 }
399
400 irec->ir_count = inocnt;
401 irec->ir_freecount = finocnt;
402
403 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
404 uint64_t sparse;
405 int spmask;
406 uint16_t holemask;
407
408 /*
409 * Convert the 64-bit in-core sparse inode state to the
410 * 16-bit on-disk holemask.
411 */
412 holemask = 0;
413 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
414 sparse = ino_rec->ir_sparse;
415 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
416 if (sparse & spmask) {
417 ASSERT((sparse & spmask) == spmask);
418 holemask |= (1 << k);
419 } else
420 ASSERT((sparse & spmask) == 0);
421 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
422 }
423
424 irec->ir_holemask = holemask;
425 } else {
426 irec->ir_holemask = 0;
427 }
428
429 if (btr->first_agino == NULLAGINO)
430 btr->first_agino = ino_rec->ino_startnum;
431 btr->freecount += finocnt;
432 btr->count += inocnt;
433 return 0;
434 }
435
436 /* Initialize both inode btree cursors as needed. */
437 void
438 init_ino_cursors(
439 struct repair_ctx *sc,
440 xfs_agnumber_t agno,
441 unsigned int free_space,
442 uint64_t *num_inos,
443 uint64_t *num_free_inos,
444 struct bt_rebuild *btr_ino,
445 struct bt_rebuild *btr_fino)
446 {
447 struct ino_tree_node *ino_rec;
448 unsigned int ino_recs = 0;
449 unsigned int fino_recs = 0;
450 bool finobt;
451 int error;
452
453 finobt = xfs_sb_version_hasfinobt(&sc->mp->m_sb);
454 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, free_space, btr_ino);
455
456 /* Compute inode statistics. */
457 *num_free_inos = 0;
458 *num_inos = 0;
459 for (ino_rec = findfirst_inode_rec(agno);
460 ino_rec != NULL;
461 ino_rec = next_ino_rec(ino_rec)) {
462 unsigned int rec_ninos = 0;
463 unsigned int rec_nfinos = 0;
464 int i;
465
466 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
467 ASSERT(is_inode_confirmed(ino_rec, i));
468 /*
469 * sparse inodes are not factored into superblock (free)
470 * inode counts
471 */
472 if (is_inode_sparse(ino_rec, i))
473 continue;
474 if (is_inode_free(ino_rec, i))
475 rec_nfinos++;
476 rec_ninos++;
477 }
478
479 *num_free_inos += rec_nfinos;
480 *num_inos += rec_ninos;
481 ino_recs++;
482
483 /* finobt only considers records with free inodes */
484 if (rec_nfinos)
485 fino_recs++;
486 }
487
488 btr_ino->cur = libxfs_inobt_stage_cursor(sc->mp, &btr_ino->newbt.afake,
489 agno, XFS_BTNUM_INO);
490
491 btr_ino->bload.get_record = get_inobt_record;
492 btr_ino->bload.claim_block = rebuild_claim_block;
493 btr_ino->first_agino = NULLAGINO;
494
495 /* Compute how many inobt blocks we'll need. */
496 error = -libxfs_btree_bload_compute_geometry(btr_ino->cur,
497 &btr_ino->bload, ino_recs);
498 if (error)
499 do_error(
500 _("Unable to compute inode btree geometry, error %d.\n"), error);
501
502 reserve_btblocks(sc->mp, agno, btr_ino, btr_ino->bload.nr_blocks);
503
504 if (!finobt)
505 return;
506
507 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, free_space, btr_fino);
508 btr_fino->cur = libxfs_inobt_stage_cursor(sc->mp,
509 &btr_fino->newbt.afake, agno, XFS_BTNUM_FINO);
510
511 btr_fino->bload.get_record = get_inobt_record;
512 btr_fino->bload.claim_block = rebuild_claim_block;
513 btr_fino->first_agino = NULLAGINO;
514
515 /* Compute how many finobt blocks we'll need. */
516 error = -libxfs_btree_bload_compute_geometry(btr_fino->cur,
517 &btr_fino->bload, fino_recs);
518 if (error)
519 do_error(
520 _("Unable to compute free inode btree geometry, error %d.\n"), error);
521
522 reserve_btblocks(sc->mp, agno, btr_fino, btr_fino->bload.nr_blocks);
523 }
524
525 /* Rebuild the inode btrees. */
526 void
527 build_inode_btrees(
528 struct repair_ctx *sc,
529 xfs_agnumber_t agno,
530 struct bt_rebuild *btr_ino,
531 struct bt_rebuild *btr_fino)
532 {
533 int error;
534
535 /* Add all observed inobt records. */
536 error = -libxfs_btree_bload(btr_ino->cur, &btr_ino->bload, btr_ino);
537 if (error)
538 do_error(
539 _("Error %d while creating inobt btree for AG %u.\n"), error, agno);
540
541 /* Since we're not writing the AGI yet, no need to commit the cursor */
542 libxfs_btree_del_cursor(btr_ino->cur, 0);
543
544 if (!xfs_sb_version_hasfinobt(&sc->mp->m_sb))
545 return;
546
547 /* Add all observed finobt records. */
548 error = -libxfs_btree_bload(btr_fino->cur, &btr_fino->bload, btr_fino);
549 if (error)
550 do_error(
551 _("Error %d while creating finobt btree for AG %u.\n"), error, agno);
552
553 /* Since we're not writing the AGI yet, no need to commit the cursor */
554 libxfs_btree_del_cursor(btr_fino->cur, 0);
555 }
556
557 /* rebuild the rmap tree */
558
559 /* Grab one rmap record. */
560 static int
561 get_rmapbt_record(
562 struct xfs_btree_cur *cur,
563 void *priv)
564 {
565 struct xfs_rmap_irec *rec;
566 struct bt_rebuild *btr = priv;
567
568 rec = pop_slab_cursor(btr->slab_cursor);
569 memcpy(&cur->bc_rec.r, rec, sizeof(struct xfs_rmap_irec));
570 return 0;
571 }
572
573 /* Set up the rmap rebuild parameters. */
574 void
575 init_rmapbt_cursor(
576 struct repair_ctx *sc,
577 xfs_agnumber_t agno,
578 unsigned int free_space,
579 struct bt_rebuild *btr)
580 {
581 int error;
582
583 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb))
584 return;
585
586 init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr);
587 btr->cur = libxfs_rmapbt_stage_cursor(sc->mp, &btr->newbt.afake, agno);
588
589 btr->bload.get_record = get_rmapbt_record;
590 btr->bload.claim_block = rebuild_claim_block;
591
592 /* Compute how many blocks we'll need. */
593 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
594 rmap_record_count(sc->mp, agno));
595 if (error)
596 do_error(
597 _("Unable to compute rmap btree geometry, error %d.\n"), error);
598
599 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
600 }
601
602 /* Rebuild a rmap btree. */
603 void
604 build_rmap_tree(
605 struct repair_ctx *sc,
606 xfs_agnumber_t agno,
607 struct bt_rebuild *btr)
608 {
609 int error;
610
611 error = rmap_init_cursor(agno, &btr->slab_cursor);
612 if (error)
613 do_error(
614 _("Insufficient memory to construct rmap cursor.\n"));
615
616 /* Add all observed rmap records. */
617 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
618 if (error)
619 do_error(
620 _("Error %d while creating rmap btree for AG %u.\n"), error, agno);
621
622 /* Since we're not writing the AGF yet, no need to commit the cursor */
623 libxfs_btree_del_cursor(btr->cur, 0);
624 free_slab_cursor(&btr->slab_cursor);
625 }
626
627 /* rebuild the refcount tree */
628
629 /* Grab one refcount record. */
630 static int
631 get_refcountbt_record(
632 struct xfs_btree_cur *cur,
633 void *priv)
634 {
635 struct xfs_refcount_irec *rec;
636 struct bt_rebuild *btr = priv;
637
638 rec = pop_slab_cursor(btr->slab_cursor);
639 memcpy(&cur->bc_rec.rc, rec, sizeof(struct xfs_refcount_irec));
640 return 0;
641 }
642
643 /* Set up the refcount rebuild parameters. */
644 void
645 init_refc_cursor(
646 struct repair_ctx *sc,
647 xfs_agnumber_t agno,
648 unsigned int free_space,
649 struct bt_rebuild *btr)
650 {
651 int error;
652
653 if (!xfs_sb_version_hasreflink(&sc->mp->m_sb))
654 return;
655
656 init_rebuild(sc, &XFS_RMAP_OINFO_REFC, free_space, btr);
657 btr->cur = libxfs_refcountbt_stage_cursor(sc->mp, &btr->newbt.afake,
658 agno);
659
660 btr->bload.get_record = get_refcountbt_record;
661 btr->bload.claim_block = rebuild_claim_block;
662
663 /* Compute how many blocks we'll need. */
664 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
665 refcount_record_count(sc->mp, agno));
666 if (error)
667 do_error(
668 _("Unable to compute refcount btree geometry, error %d.\n"), error);
669
670 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
671 }
672
673 /* Rebuild a refcount btree. */
674 void
675 build_refcount_tree(
676 struct repair_ctx *sc,
677 xfs_agnumber_t agno,
678 struct bt_rebuild *btr)
679 {
680 int error;
681
682 error = init_refcount_cursor(agno, &btr->slab_cursor);
683 if (error)
684 do_error(
685 _("Insufficient memory to construct refcount cursor.\n"));
686
687 /* Add all observed refcount records. */
688 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
689 if (error)
690 do_error(
691 _("Error %d while creating refcount btree for AG %u.\n"), error, agno);
692
693 /* Since we're not writing the AGF yet, no need to commit the cursor */
694 libxfs_btree_del_cursor(btr->cur, 0);
695 free_slab_cursor(&btr->slab_cursor);
696 }