]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/agbtree.c
xfs: read leaf blocks when computing keys for bulkloading into node blocks
[thirdparty/xfsprogs-dev.git] / repair / agbtree.c
CommitLineData
79f86c9d
DW
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2020 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include <libxfs.h>
7#include "err_protos.h"
c94d40ce 8#include "libfrog/bitmap.h"
79f86c9d
DW
9#include "slab.h"
10#include "rmap.h"
11#include "incore.h"
12#include "bulkload.h"
13#include "agbtree.h"
14
15/* Initialize a btree rebuild context. */
16static void
17init_rebuild(
18 struct repair_ctx *sc,
19 const struct xfs_owner_info *oinfo,
76555964 20 xfs_agblock_t est_agfreeblocks,
79f86c9d
DW
21 struct bt_rebuild *btr)
22{
23 memset(btr, 0, sizeof(struct bt_rebuild));
24
25 bulkload_init_ag(&btr->newbt, sc, oinfo);
76555964 26 bulkload_estimate_ag_slack(sc, &btr->bload, est_agfreeblocks);
79f86c9d
DW
27}
28
29/*
30 * Update this free space record to reflect the blocks we stole from the
31 * beginning of the record.
32 */
33static void
34consume_freespace(
35 xfs_agnumber_t agno,
36 struct extent_tree_node *ext_ptr,
37 uint32_t len)
38{
39 struct extent_tree_node *bno_ext_ptr;
40 xfs_agblock_t new_start = ext_ptr->ex_startblock + len;
41 xfs_extlen_t new_len = ext_ptr->ex_blockcount - len;
42
43 /* Delete the used-up extent from both extent trees. */
44#ifdef XR_BLD_FREE_TRACE
45 fprintf(stderr, "releasing extent: %u [%u %u]\n", agno,
46 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
47#endif
48 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
49 ASSERT(bno_ext_ptr != NULL);
50 get_bno_extent(agno, bno_ext_ptr);
51 release_extent_tree_node(bno_ext_ptr);
52
53 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
54 ext_ptr->ex_blockcount);
55 release_extent_tree_node(ext_ptr);
56
57 /*
58 * If we only used part of this last extent, then we must reinsert the
59 * extent to maintain proper sorting order.
60 */
61 if (new_len > 0) {
62 add_bno_extent(agno, new_start, new_len);
63 add_bcnt_extent(agno, new_start, new_len);
64 }
65}
66
41865980
DW
67/*
68 * Reserve blocks for the new per-AG structures. Returns true if all blocks
69 * were allocated, and false if we ran out of space.
70 */
71static bool
72reserve_agblocks(
79f86c9d
DW
73 struct xfs_mount *mp,
74 xfs_agnumber_t agno,
75 struct bt_rebuild *btr,
76 uint32_t nr_blocks)
77{
78 struct extent_tree_node *ext_ptr;
79 uint32_t blocks_allocated = 0;
80 uint32_t len;
81 int error;
82
83 while (blocks_allocated < nr_blocks) {
84 xfs_fsblock_t fsbno;
85
86 /*
87 * Grab the smallest extent and use it up, then get the
88 * next smallest. This mimics the init_*_cursor code.
89 */
90 ext_ptr = findfirst_bcnt_extent(agno);
91 if (!ext_ptr)
41865980 92 break;
79f86c9d
DW
93
94 /* Use up the extent we've got. */
95 len = min(ext_ptr->ex_blockcount, nr_blocks - blocks_allocated);
96 fsbno = XFS_AGB_TO_FSB(mp, agno, ext_ptr->ex_startblock);
97 error = bulkload_add_blocks(&btr->newbt, fsbno, len);
98 if (error)
99 do_error(_("could not set up btree reservation: %s\n"),
100 strerror(-error));
101
102 error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, len,
103 btr->newbt.oinfo.oi_owner);
104 if (error)
105 do_error(_("could not set up btree rmaps: %s\n"),
106 strerror(-error));
107
108 consume_freespace(agno, ext_ptr, len);
109 blocks_allocated += len;
110 }
111#ifdef XR_BLD_FREE_TRACE
112 fprintf(stderr, "blocks_allocated = %d\n",
113 blocks_allocated);
114#endif
41865980
DW
115 return blocks_allocated == nr_blocks;
116}
117
118static inline void
119reserve_btblocks(
120 struct xfs_mount *mp,
121 xfs_agnumber_t agno,
122 struct bt_rebuild *btr,
123 uint32_t nr_blocks)
124{
125 if (!reserve_agblocks(mp, agno, btr, nr_blocks))
126 do_error(
127 _("error - not enough free space in filesystem, AG %u\n"),
128 agno);
79f86c9d
DW
129}
130
131/* Feed one of the new btree blocks to the bulk loader. */
132static int
133rebuild_claim_block(
134 struct xfs_btree_cur *cur,
135 union xfs_btree_ptr *ptr,
136 void *priv)
137{
138 struct bt_rebuild *btr = priv;
139
140 return bulkload_claim_block(cur, &btr->newbt, ptr);
141}
142
143/*
144 * Scoop up leftovers from a rebuild cursor for later freeing, then free the
145 * rebuild context.
146 */
147void
148finish_rebuild(
149 struct xfs_mount *mp,
150 struct bt_rebuild *btr,
c94d40ce 151 struct bitmap *lost_blocks)
79f86c9d
DW
152{
153 struct bulkload_resv *resv, *n;
c94d40ce 154 int error;
79f86c9d
DW
155
156 for_each_bulkload_reservation(&btr->newbt, resv, n) {
c94d40ce
DW
157 if (resv->used == resv->len)
158 continue;
159
160 error = bitmap_set(lost_blocks, resv->fsbno + resv->used,
161 resv->len - resv->used);
162 if (error)
163 do_error(
164_("Insufficient memory saving lost blocks, err=%d.\n"), error);
165 resv->used = resv->len;
79f86c9d
DW
166 }
167
168 bulkload_destroy(&btr->newbt, 0);
169}
7e5ec4e4
DW
170
171/*
172 * Free Space Btrees
173 *
174 * We need to leave some free records in the tree for the corner case of
175 * setting up the AGFL. This may require allocation of blocks, and as
176 * such can require insertion of new records into the tree (e.g. moving
177 * a record in the by-count tree when a long extent is shortened). If we
178 * pack the records into the leaves with no slack space, this requires a
179 * leaf split to occur and a block to be allocated from the free list.
180 * If we don't have any blocks on the free list (because we are setting
181 * it up!), then we fail, and the filesystem will fail with the same
182 * failure at runtime. Hence leave a couple of records slack space in
183 * each block to allow immediate modification of the tree without
184 * requiring splits to be done.
185 */
186
187/*
188 * Return the next free space extent tree record from the previous value we
189 * saw.
190 */
191static inline struct extent_tree_node *
192get_bno_rec(
193 struct xfs_btree_cur *cur,
194 struct extent_tree_node *prev_value)
195{
a0577dbb 196 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
7e5ec4e4
DW
197
198 if (cur->bc_btnum == XFS_BTNUM_BNO) {
199 if (!prev_value)
200 return findfirst_bno_extent(agno);
201 return findnext_bno_extent(prev_value);
202 }
203
204 /* cnt btree */
205 if (!prev_value)
206 return findfirst_bcnt_extent(agno);
207 return findnext_bcnt_extent(agno, prev_value);
208}
209
210/* Grab one bnobt record and put it in the btree cursor. */
211static int
212get_bnobt_record(
213 struct xfs_btree_cur *cur,
214 void *priv)
215{
216 struct bt_rebuild *btr = priv;
217 struct xfs_alloc_rec_incore *arec = &cur->bc_rec.a;
218
219 btr->bno_rec = get_bno_rec(cur, btr->bno_rec);
220 arec->ar_startblock = btr->bno_rec->ex_startblock;
221 arec->ar_blockcount = btr->bno_rec->ex_blockcount;
222 btr->freeblks += btr->bno_rec->ex_blockcount;
223 return 0;
224}
225
226void
227init_freespace_cursors(
228 struct repair_ctx *sc,
ecb44e84 229 struct xfs_perag *pag,
76555964 230 unsigned int est_agfreeblocks,
7e5ec4e4
DW
231 unsigned int *nr_extents,
232 int *extra_blocks,
233 struct bt_rebuild *btr_bno,
234 struct bt_rebuild *btr_cnt)
235{
ecb44e84 236 xfs_agnumber_t agno = pag->pag_agno;
41865980 237 unsigned int agfl_goal;
7e5ec4e4
DW
238 int error;
239
41865980
DW
240 agfl_goal = libxfs_alloc_min_freelist(sc->mp, NULL);
241
76555964
DW
242 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr_bno);
243 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr_cnt);
7e5ec4e4
DW
244
245 btr_bno->cur = libxfs_allocbt_stage_cursor(sc->mp,
ecb44e84 246 &btr_bno->newbt.afake, pag, XFS_BTNUM_BNO);
7e5ec4e4 247 btr_cnt->cur = libxfs_allocbt_stage_cursor(sc->mp,
ecb44e84 248 &btr_cnt->newbt.afake, pag, XFS_BTNUM_CNT);
7e5ec4e4
DW
249
250 btr_bno->bload.get_record = get_bnobt_record;
251 btr_bno->bload.claim_block = rebuild_claim_block;
252
253 btr_cnt->bload.get_record = get_bnobt_record;
254 btr_cnt->bload.claim_block = rebuild_claim_block;
255
256 /*
257 * Now we need to allocate blocks for the free space btrees using the
258 * free space records we're about to put in them. Every record we use
259 * can change the shape of the free space trees, so we recompute the
260 * btree shape until we stop needing /more/ blocks. If we have any
261 * left over we'll stash them in the AGFL when we're done.
262 */
263 do {
264 unsigned int num_freeblocks;
6ffc9523 265 int delta_bno, delta_cnt;
41865980 266 int agfl_wanted;
7e5ec4e4
DW
267
268 /* Compute how many bnobt blocks we'll need. */
269 error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
270 &btr_bno->bload, *nr_extents);
271 if (error)
272 do_error(
273_("Unable to compute free space by block btree geometry, error %d.\n"), -error);
274
275 /* Compute how many cntbt blocks we'll need. */
276 error = -libxfs_btree_bload_compute_geometry(btr_cnt->cur,
277 &btr_cnt->bload, *nr_extents);
278 if (error)
279 do_error(
280_("Unable to compute free space by length btree geometry, error %d.\n"), -error);
281
6ffc9523
DW
282 /*
283 * Compute the deficit between the number of blocks reserved
284 * and the number of blocks we think we need for the btree.
285 */
286 delta_bno = (int)btr_bno->newbt.nr_reserved -
287 btr_bno->bload.nr_blocks;
288 delta_cnt = (int)btr_cnt->newbt.nr_reserved -
289 btr_cnt->bload.nr_blocks;
290
7e5ec4e4 291 /* We don't need any more blocks, so we're done. */
41865980
DW
292 if (delta_bno >= 0 && delta_cnt >= 0 &&
293 delta_bno + delta_cnt >= agfl_goal) {
6ffc9523 294 *extra_blocks = delta_bno + delta_cnt;
7e5ec4e4 295 break;
6ffc9523 296 }
7e5ec4e4
DW
297
298 /* Allocate however many more blocks we need this time. */
41865980 299 if (delta_bno < 0) {
6ffc9523 300 reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
41865980
DW
301 delta_bno = 0;
302 }
303 if (delta_cnt < 0) {
6ffc9523 304 reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
41865980
DW
305 delta_cnt = 0;
306 }
307
308 /*
309 * Try to fill the bnobt cursor with extra blocks to populate
310 * the AGFL. If we don't get all the blocks we want, stop
311 * trying to fill the AGFL because the AG is totally out of
312 * space.
313 */
314 agfl_wanted = agfl_goal - (delta_bno + delta_cnt);
315 if (agfl_wanted > 0 &&
316 !reserve_agblocks(sc->mp, agno, btr_bno, agfl_wanted))
317 agfl_goal = 0;
7e5ec4e4
DW
318
319 /* Ok, now how many free space records do we have? */
320 *nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
321 } while (1);
7e5ec4e4
DW
322}
323
324/* Rebuild the free space btrees. */
325void
326build_freespace_btrees(
327 struct repair_ctx *sc,
328 xfs_agnumber_t agno,
329 struct bt_rebuild *btr_bno,
330 struct bt_rebuild *btr_cnt)
331{
332 int error;
333
334 /* Add all observed bnobt records. */
335 error = -libxfs_btree_bload(btr_bno->cur, &btr_bno->bload, btr_bno);
336 if (error)
337 do_error(
338_("Error %d while creating bnobt btree for AG %u.\n"), error, agno);
339
340 /* Add all observed cntbt records. */
341 error = -libxfs_btree_bload(btr_cnt->cur, &btr_cnt->bload, btr_cnt);
342 if (error)
343 do_error(
344_("Error %d while creating cntbt btree for AG %u.\n"), error, agno);
345
346 /* Since we're not writing the AGF yet, no need to commit the cursor */
347 libxfs_btree_del_cursor(btr_bno->cur, 0);
348 libxfs_btree_del_cursor(btr_cnt->cur, 0);
349}
7a21223c
DW
350
351/* Inode Btrees */
352
353static inline struct ino_tree_node *
354get_ino_rec(
355 struct xfs_btree_cur *cur,
356 struct ino_tree_node *prev_value)
357{
a0577dbb 358 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
7a21223c
DW
359
360 if (cur->bc_btnum == XFS_BTNUM_INO) {
361 if (!prev_value)
362 return findfirst_inode_rec(agno);
363 return next_ino_rec(prev_value);
364 }
365
366 /* finobt */
367 if (!prev_value)
368 return findfirst_free_inode_rec(agno);
369 return next_free_ino_rec(prev_value);
370}
371
372/* Grab one inobt record. */
373static int
374get_inobt_record(
375 struct xfs_btree_cur *cur,
376 void *priv)
377{
378 struct bt_rebuild *btr = priv;
379 struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i;
380 struct ino_tree_node *ino_rec;
381 int inocnt = 0;
382 int finocnt = 0;
383 int k;
384
385 btr->ino_rec = ino_rec = get_ino_rec(cur, btr->ino_rec);
386
387 /* Transform the incore record into an on-disk record. */
388 irec->ir_startino = ino_rec->ino_startnum;
389 irec->ir_free = ino_rec->ir_free;
390
391 for (k = 0; k < sizeof(xfs_inofree_t) * NBBY; k++) {
392 ASSERT(is_inode_confirmed(ino_rec, k));
393
394 if (is_inode_sparse(ino_rec, k))
395 continue;
396 if (is_inode_free(ino_rec, k))
397 finocnt++;
398 inocnt++;
399 }
400
401 irec->ir_count = inocnt;
402 irec->ir_freecount = finocnt;
403
2660e653 404 if (xfs_has_sparseinodes(cur->bc_mp)) {
7a21223c
DW
405 uint64_t sparse;
406 int spmask;
407 uint16_t holemask;
408
409 /*
410 * Convert the 64-bit in-core sparse inode state to the
411 * 16-bit on-disk holemask.
412 */
413 holemask = 0;
414 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
415 sparse = ino_rec->ir_sparse;
416 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
417 if (sparse & spmask) {
418 ASSERT((sparse & spmask) == spmask);
419 holemask |= (1 << k);
420 } else
421 ASSERT((sparse & spmask) == 0);
422 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
423 }
424
425 irec->ir_holemask = holemask;
426 } else {
427 irec->ir_holemask = 0;
428 }
429
430 if (btr->first_agino == NULLAGINO)
431 btr->first_agino = ino_rec->ino_startnum;
432 btr->freecount += finocnt;
433 btr->count += inocnt;
434 return 0;
435}
436
437/* Initialize both inode btree cursors as needed. */
438void
439init_ino_cursors(
440 struct repair_ctx *sc,
a426d0e1 441 struct xfs_perag *pag,
76555964 442 unsigned int est_agfreeblocks,
7a21223c
DW
443 uint64_t *num_inos,
444 uint64_t *num_free_inos,
445 struct bt_rebuild *btr_ino,
446 struct bt_rebuild *btr_fino)
447{
448 struct ino_tree_node *ino_rec;
a426d0e1 449 xfs_agnumber_t agno = pag->pag_agno;
7a21223c
DW
450 unsigned int ino_recs = 0;
451 unsigned int fino_recs = 0;
452 bool finobt;
453 int error;
454
2660e653 455 finobt = xfs_has_finobt(sc->mp);
76555964 456 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, est_agfreeblocks, btr_ino);
7a21223c
DW
457
458 /* Compute inode statistics. */
459 *num_free_inos = 0;
460 *num_inos = 0;
461 for (ino_rec = findfirst_inode_rec(agno);
462 ino_rec != NULL;
463 ino_rec = next_ino_rec(ino_rec)) {
464 unsigned int rec_ninos = 0;
465 unsigned int rec_nfinos = 0;
466 int i;
467
468 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
469 ASSERT(is_inode_confirmed(ino_rec, i));
470 /*
471 * sparse inodes are not factored into superblock (free)
472 * inode counts
473 */
474 if (is_inode_sparse(ino_rec, i))
475 continue;
476 if (is_inode_free(ino_rec, i))
477 rec_nfinos++;
478 rec_ninos++;
479 }
480
481 *num_free_inos += rec_nfinos;
482 *num_inos += rec_ninos;
483 ino_recs++;
484
485 /* finobt only considers records with free inodes */
486 if (rec_nfinos)
487 fino_recs++;
488 }
489
87a02c9e
DC
490 btr_ino->cur = libxfs_inobt_stage_cursor(pag, &btr_ino->newbt.afake,
491 XFS_BTNUM_INO);
7a21223c
DW
492
493 btr_ino->bload.get_record = get_inobt_record;
494 btr_ino->bload.claim_block = rebuild_claim_block;
495 btr_ino->first_agino = NULLAGINO;
496
497 /* Compute how many inobt blocks we'll need. */
498 error = -libxfs_btree_bload_compute_geometry(btr_ino->cur,
499 &btr_ino->bload, ino_recs);
500 if (error)
501 do_error(
502_("Unable to compute inode btree geometry, error %d.\n"), error);
503
504 reserve_btblocks(sc->mp, agno, btr_ino, btr_ino->bload.nr_blocks);
505
506 if (!finobt)
507 return;
508
76555964 509 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, est_agfreeblocks, btr_fino);
87a02c9e
DC
510 btr_fino->cur = libxfs_inobt_stage_cursor(pag,
511 &btr_fino->newbt.afake, XFS_BTNUM_FINO);
7a21223c
DW
512
513 btr_fino->bload.get_record = get_inobt_record;
514 btr_fino->bload.claim_block = rebuild_claim_block;
515 btr_fino->first_agino = NULLAGINO;
516
517 /* Compute how many finobt blocks we'll need. */
518 error = -libxfs_btree_bload_compute_geometry(btr_fino->cur,
519 &btr_fino->bload, fino_recs);
520 if (error)
521 do_error(
522_("Unable to compute free inode btree geometry, error %d.\n"), error);
523
524 reserve_btblocks(sc->mp, agno, btr_fino, btr_fino->bload.nr_blocks);
525}
526
527/* Rebuild the inode btrees. */
528void
529build_inode_btrees(
530 struct repair_ctx *sc,
531 xfs_agnumber_t agno,
532 struct bt_rebuild *btr_ino,
533 struct bt_rebuild *btr_fino)
534{
535 int error;
536
537 /* Add all observed inobt records. */
538 error = -libxfs_btree_bload(btr_ino->cur, &btr_ino->bload, btr_ino);
539 if (error)
540 do_error(
541_("Error %d while creating inobt btree for AG %u.\n"), error, agno);
542
543 /* Since we're not writing the AGI yet, no need to commit the cursor */
544 libxfs_btree_del_cursor(btr_ino->cur, 0);
545
2660e653 546 if (!xfs_has_finobt(sc->mp))
7a21223c
DW
547 return;
548
549 /* Add all observed finobt records. */
550 error = -libxfs_btree_bload(btr_fino->cur, &btr_fino->bload, btr_fino);
551 if (error)
552 do_error(
553_("Error %d while creating finobt btree for AG %u.\n"), error, agno);
554
555 /* Since we're not writing the AGI yet, no need to commit the cursor */
556 libxfs_btree_del_cursor(btr_fino->cur, 0);
557}
dc9f4f5e
DW
558
559/* rebuild the rmap tree */
560
561/* Grab one rmap record. */
562static int
563get_rmapbt_record(
564 struct xfs_btree_cur *cur,
565 void *priv)
566{
567 struct xfs_rmap_irec *rec;
568 struct bt_rebuild *btr = priv;
569
570 rec = pop_slab_cursor(btr->slab_cursor);
571 memcpy(&cur->bc_rec.r, rec, sizeof(struct xfs_rmap_irec));
572 return 0;
573}
574
575/* Set up the rmap rebuild parameters. */
576void
577init_rmapbt_cursor(
578 struct repair_ctx *sc,
195c248c 579 struct xfs_perag *pag,
76555964 580 unsigned int est_agfreeblocks,
dc9f4f5e
DW
581 struct bt_rebuild *btr)
582{
195c248c 583 xfs_agnumber_t agno = pag->pag_agno;
dc9f4f5e
DW
584 int error;
585
2660e653 586 if (!xfs_has_rmapbt(sc->mp))
dc9f4f5e
DW
587 return;
588
76555964 589 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr);
195c248c 590 btr->cur = libxfs_rmapbt_stage_cursor(sc->mp, &btr->newbt.afake, pag);
dc9f4f5e
DW
591
592 btr->bload.get_record = get_rmapbt_record;
593 btr->bload.claim_block = rebuild_claim_block;
594
595 /* Compute how many blocks we'll need. */
596 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
597 rmap_record_count(sc->mp, agno));
598 if (error)
599 do_error(
600_("Unable to compute rmap btree geometry, error %d.\n"), error);
601
602 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
603}
604
605/* Rebuild a rmap btree. */
606void
607build_rmap_tree(
608 struct repair_ctx *sc,
609 xfs_agnumber_t agno,
610 struct bt_rebuild *btr)
611{
612 int error;
613
614 error = rmap_init_cursor(agno, &btr->slab_cursor);
615 if (error)
616 do_error(
617_("Insufficient memory to construct rmap cursor.\n"));
618
619 /* Add all observed rmap records. */
620 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
621 if (error)
622 do_error(
623_("Error %d while creating rmap btree for AG %u.\n"), error, agno);
624
625 /* Since we're not writing the AGF yet, no need to commit the cursor */
626 libxfs_btree_del_cursor(btr->cur, 0);
627 free_slab_cursor(&btr->slab_cursor);
628}
3c1ce0fc
DW
629
630/* rebuild the refcount tree */
631
632/* Grab one refcount record. */
633static int
634get_refcountbt_record(
635 struct xfs_btree_cur *cur,
636 void *priv)
637{
638 struct xfs_refcount_irec *rec;
639 struct bt_rebuild *btr = priv;
640
641 rec = pop_slab_cursor(btr->slab_cursor);
642 memcpy(&cur->bc_rec.rc, rec, sizeof(struct xfs_refcount_irec));
643 return 0;
644}
645
646/* Set up the refcount rebuild parameters. */
647void
648init_refc_cursor(
649 struct repair_ctx *sc,
971fceb4 650 struct xfs_perag *pag,
76555964 651 unsigned int est_agfreeblocks,
3c1ce0fc
DW
652 struct bt_rebuild *btr)
653{
971fceb4 654 xfs_agnumber_t agno = pag->pag_agno;
3c1ce0fc
DW
655 int error;
656
2660e653 657 if (!xfs_has_reflink(sc->mp))
3c1ce0fc
DW
658 return;
659
76555964 660 init_rebuild(sc, &XFS_RMAP_OINFO_REFC, est_agfreeblocks, btr);
3c1ce0fc 661 btr->cur = libxfs_refcountbt_stage_cursor(sc->mp, &btr->newbt.afake,
971fceb4 662 pag);
3c1ce0fc
DW
663
664 btr->bload.get_record = get_refcountbt_record;
665 btr->bload.claim_block = rebuild_claim_block;
666
667 /* Compute how many blocks we'll need. */
668 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
669 refcount_record_count(sc->mp, agno));
670 if (error)
671 do_error(
672_("Unable to compute refcount btree geometry, error %d.\n"), error);
673
674 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
675}
676
677/* Rebuild a refcount btree. */
678void
679build_refcount_tree(
680 struct repair_ctx *sc,
681 xfs_agnumber_t agno,
682 struct bt_rebuild *btr)
683{
684 int error;
685
686 error = init_refcount_cursor(agno, &btr->slab_cursor);
687 if (error)
688 do_error(
689_("Insufficient memory to construct refcount cursor.\n"));
690
691 /* Add all observed refcount records. */
692 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
693 if (error)
694 do_error(
695_("Error %d while creating refcount btree for AG %u.\n"), error, agno);
696
697 /* Since we're not writing the AGF yet, no need to commit the cursor */
698 libxfs_btree_del_cursor(btr->cur, 0);
699 free_slab_cursor(&btr->slab_cursor);
700}
76555964
DW
701
702static xfs_extlen_t
703estimate_allocbt_blocks(
704 struct xfs_perag *pag,
705 unsigned int nr_extents)
706{
707 /* Account for space consumed by both free space btrees */
708 return libxfs_allocbt_calc_size(pag->pag_mount, nr_extents) * 2;
709}
710
711static xfs_extlen_t
712estimate_inobt_blocks(
713 struct xfs_perag *pag)
714{
715 struct ino_tree_node *ino_rec;
716 xfs_agnumber_t agno = pag->pag_agno;
717 unsigned int ino_recs = 0;
718 unsigned int fino_recs = 0;
719 xfs_extlen_t ret;
720
721 for (ino_rec = findfirst_inode_rec(agno);
722 ino_rec != NULL;
723 ino_rec = next_ino_rec(ino_rec)) {
724 unsigned int rec_nfinos = 0;
725 int i;
726
727 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
728 ASSERT(is_inode_confirmed(ino_rec, i));
729 /*
730 * sparse inodes are not factored into superblock (free)
731 * inode counts
732 */
733 if (is_inode_sparse(ino_rec, i))
734 continue;
735 if (is_inode_free(ino_rec, i))
736 rec_nfinos++;
737 }
738
739 ino_recs++;
740
741 /* finobt only considers records with free inodes */
742 if (rec_nfinos)
743 fino_recs++;
744 }
745
746 ret = libxfs_iallocbt_calc_size(pag->pag_mount, ino_recs);
747 if (xfs_has_finobt(pag->pag_mount))
748 ret += libxfs_iallocbt_calc_size(pag->pag_mount, fino_recs);
749 return ret;
750
751}
752
753/* Estimate the size of the per-AG btrees. */
754xfs_extlen_t
755estimate_agbtree_blocks(
756 struct xfs_perag *pag,
757 unsigned int free_extents)
758{
759 unsigned int ret = 0;
760
761 ret += estimate_allocbt_blocks(pag, free_extents);
762 ret += estimate_inobt_blocks(pag);
763 ret += estimate_rmapbt_blocks(pag);
764 ret += estimate_refcountbt_blocks(pag);
765
766 return ret;
767}