]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/agbtree.c
xfs_repair: sync bulkload data structures with kernel newbt code
[thirdparty/xfsprogs-dev.git] / repair / agbtree.c
CommitLineData
79f86c9d
DW
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2020 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include <libxfs.h>
7#include "err_protos.h"
c94d40ce 8#include "libfrog/bitmap.h"
79f86c9d
DW
9#include "slab.h"
10#include "rmap.h"
11#include "incore.h"
12#include "bulkload.h"
13#include "agbtree.h"
14
15/* Initialize a btree rebuild context. */
16static void
17init_rebuild(
18 struct repair_ctx *sc,
19 const struct xfs_owner_info *oinfo,
76555964 20 xfs_agblock_t est_agfreeblocks,
79f86c9d
DW
21 struct bt_rebuild *btr)
22{
23 memset(btr, 0, sizeof(struct bt_rebuild));
24
25 bulkload_init_ag(&btr->newbt, sc, oinfo);
8c1ed622 26 btr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */
76555964 27 bulkload_estimate_ag_slack(sc, &btr->bload, est_agfreeblocks);
79f86c9d
DW
28}
29
30/*
31 * Update this free space record to reflect the blocks we stole from the
32 * beginning of the record.
33 */
34static void
35consume_freespace(
36 xfs_agnumber_t agno,
37 struct extent_tree_node *ext_ptr,
38 uint32_t len)
39{
40 struct extent_tree_node *bno_ext_ptr;
41 xfs_agblock_t new_start = ext_ptr->ex_startblock + len;
42 xfs_extlen_t new_len = ext_ptr->ex_blockcount - len;
43
44 /* Delete the used-up extent from both extent trees. */
45#ifdef XR_BLD_FREE_TRACE
46 fprintf(stderr, "releasing extent: %u [%u %u]\n", agno,
47 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
48#endif
49 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
50 ASSERT(bno_ext_ptr != NULL);
51 get_bno_extent(agno, bno_ext_ptr);
52 release_extent_tree_node(bno_ext_ptr);
53
54 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
55 ext_ptr->ex_blockcount);
56 release_extent_tree_node(ext_ptr);
57
58 /*
59 * If we only used part of this last extent, then we must reinsert the
60 * extent to maintain proper sorting order.
61 */
62 if (new_len > 0) {
63 add_bno_extent(agno, new_start, new_len);
64 add_bcnt_extent(agno, new_start, new_len);
65 }
66}
67
41865980
DW
68/*
69 * Reserve blocks for the new per-AG structures. Returns true if all blocks
70 * were allocated, and false if we ran out of space.
71 */
72static bool
73reserve_agblocks(
79f86c9d
DW
74 struct xfs_mount *mp,
75 xfs_agnumber_t agno,
76 struct bt_rebuild *btr,
77 uint32_t nr_blocks)
78{
79 struct extent_tree_node *ext_ptr;
e8844518 80 struct xfs_perag *pag;
79f86c9d
DW
81 uint32_t blocks_allocated = 0;
82 uint32_t len;
83 int error;
84
e8844518
DW
85 pag = libxfs_perag_get(mp, agno);
86 if (!pag)
87 do_error(_("could not open perag structure for agno 0x%x\n"),
88 agno);
79f86c9d 89
e8844518 90 while (blocks_allocated < nr_blocks) {
79f86c9d
DW
91 /*
92 * Grab the smallest extent and use it up, then get the
93 * next smallest. This mimics the init_*_cursor code.
94 */
95 ext_ptr = findfirst_bcnt_extent(agno);
96 if (!ext_ptr)
41865980 97 break;
79f86c9d
DW
98
99 /* Use up the extent we've got. */
100 len = min(ext_ptr->ex_blockcount, nr_blocks - blocks_allocated);
e8844518
DW
101 error = bulkload_add_extent(&btr->newbt, pag,
102 ext_ptr->ex_startblock, len);
79f86c9d
DW
103 if (error)
104 do_error(_("could not set up btree reservation: %s\n"),
105 strerror(-error));
106
107 error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, len,
108 btr->newbt.oinfo.oi_owner);
109 if (error)
110 do_error(_("could not set up btree rmaps: %s\n"),
111 strerror(-error));
112
113 consume_freespace(agno, ext_ptr, len);
114 blocks_allocated += len;
115 }
116#ifdef XR_BLD_FREE_TRACE
117 fprintf(stderr, "blocks_allocated = %d\n",
118 blocks_allocated);
119#endif
e8844518 120 libxfs_perag_put(pag);
41865980
DW
121 return blocks_allocated == nr_blocks;
122}
123
124static inline void
125reserve_btblocks(
126 struct xfs_mount *mp,
127 xfs_agnumber_t agno,
128 struct bt_rebuild *btr,
129 uint32_t nr_blocks)
130{
131 if (!reserve_agblocks(mp, agno, btr, nr_blocks))
132 do_error(
133 _("error - not enough free space in filesystem, AG %u\n"),
134 agno);
79f86c9d
DW
135}
136
137/* Feed one of the new btree blocks to the bulk loader. */
138static int
139rebuild_claim_block(
140 struct xfs_btree_cur *cur,
141 union xfs_btree_ptr *ptr,
142 void *priv)
143{
144 struct bt_rebuild *btr = priv;
145
146 return bulkload_claim_block(cur, &btr->newbt, ptr);
147}
148
149/*
150 * Scoop up leftovers from a rebuild cursor for later freeing, then free the
151 * rebuild context.
152 */
153void
154finish_rebuild(
155 struct xfs_mount *mp,
156 struct bt_rebuild *btr,
c94d40ce 157 struct bitmap *lost_blocks)
79f86c9d
DW
158{
159 struct bulkload_resv *resv, *n;
c94d40ce 160 int error;
79f86c9d
DW
161
162 for_each_bulkload_reservation(&btr->newbt, resv, n) {
e8844518
DW
163 xfs_fsblock_t fsbno;
164
c94d40ce
DW
165 if (resv->used == resv->len)
166 continue;
167
e8844518
DW
168 fsbno = XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
169 resv->agbno + resv->used);
170 error = bitmap_set(lost_blocks, fsbno, resv->len - resv->used);
c94d40ce
DW
171 if (error)
172 do_error(
173_("Insufficient memory saving lost blocks, err=%d.\n"), error);
174 resv->used = resv->len;
79f86c9d
DW
175 }
176
e8844518 177 bulkload_commit(&btr->newbt);
79f86c9d 178}
7e5ec4e4
DW
179
180/*
181 * Free Space Btrees
182 *
183 * We need to leave some free records in the tree for the corner case of
184 * setting up the AGFL. This may require allocation of blocks, and as
185 * such can require insertion of new records into the tree (e.g. moving
186 * a record in the by-count tree when a long extent is shortened). If we
187 * pack the records into the leaves with no slack space, this requires a
188 * leaf split to occur and a block to be allocated from the free list.
189 * If we don't have any blocks on the free list (because we are setting
190 * it up!), then we fail, and the filesystem will fail with the same
191 * failure at runtime. Hence leave a couple of records slack space in
192 * each block to allow immediate modification of the tree without
193 * requiring splits to be done.
194 */
195
196/*
197 * Return the next free space extent tree record from the previous value we
198 * saw.
199 */
200static inline struct extent_tree_node *
201get_bno_rec(
202 struct xfs_btree_cur *cur,
203 struct extent_tree_node *prev_value)
204{
a0577dbb 205 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
7e5ec4e4
DW
206
207 if (cur->bc_btnum == XFS_BTNUM_BNO) {
208 if (!prev_value)
209 return findfirst_bno_extent(agno);
210 return findnext_bno_extent(prev_value);
211 }
212
213 /* cnt btree */
214 if (!prev_value)
215 return findfirst_bcnt_extent(agno);
216 return findnext_bcnt_extent(agno, prev_value);
217}
218
219/* Grab one bnobt record and put it in the btree cursor. */
220static int
d05b191b 221get_bnobt_records(
7e5ec4e4 222 struct xfs_btree_cur *cur,
d05b191b
DW
223 unsigned int idx,
224 struct xfs_btree_block *block,
225 unsigned int nr_wanted,
7e5ec4e4
DW
226 void *priv)
227{
228 struct bt_rebuild *btr = priv;
229 struct xfs_alloc_rec_incore *arec = &cur->bc_rec.a;
d05b191b 230 union xfs_btree_rec *block_rec;
2f2e6b36 231 unsigned int loaded;
7e5ec4e4 232
2f2e6b36
DW
233 for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
234 btr->bno_rec = get_bno_rec(cur, btr->bno_rec);
235 arec->ar_startblock = btr->bno_rec->ex_startblock;
236 arec->ar_blockcount = btr->bno_rec->ex_blockcount;
237 btr->freeblks += btr->bno_rec->ex_blockcount;
d05b191b 238
2f2e6b36
DW
239 block_rec = libxfs_btree_rec_addr(cur, idx, block);
240 cur->bc_ops->init_rec_from_cur(cur, block_rec);
241 }
242
243 return loaded;
7e5ec4e4
DW
244}
245
246void
247init_freespace_cursors(
248 struct repair_ctx *sc,
ecb44e84 249 struct xfs_perag *pag,
76555964 250 unsigned int est_agfreeblocks,
7e5ec4e4
DW
251 unsigned int *nr_extents,
252 int *extra_blocks,
253 struct bt_rebuild *btr_bno,
254 struct bt_rebuild *btr_cnt)
255{
ecb44e84 256 xfs_agnumber_t agno = pag->pag_agno;
41865980 257 unsigned int agfl_goal;
7e5ec4e4
DW
258 int error;
259
41865980
DW
260 agfl_goal = libxfs_alloc_min_freelist(sc->mp, NULL);
261
76555964
DW
262 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr_bno);
263 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr_cnt);
7e5ec4e4
DW
264
265 btr_bno->cur = libxfs_allocbt_stage_cursor(sc->mp,
ecb44e84 266 &btr_bno->newbt.afake, pag, XFS_BTNUM_BNO);
7e5ec4e4 267 btr_cnt->cur = libxfs_allocbt_stage_cursor(sc->mp,
ecb44e84 268 &btr_cnt->newbt.afake, pag, XFS_BTNUM_CNT);
7e5ec4e4 269
d05b191b 270 btr_bno->bload.get_records = get_bnobt_records;
7e5ec4e4
DW
271 btr_bno->bload.claim_block = rebuild_claim_block;
272
d05b191b 273 btr_cnt->bload.get_records = get_bnobt_records;
7e5ec4e4
DW
274 btr_cnt->bload.claim_block = rebuild_claim_block;
275
276 /*
277 * Now we need to allocate blocks for the free space btrees using the
278 * free space records we're about to put in them. Every record we use
279 * can change the shape of the free space trees, so we recompute the
280 * btree shape until we stop needing /more/ blocks. If we have any
281 * left over we'll stash them in the AGFL when we're done.
282 */
283 do {
284 unsigned int num_freeblocks;
6ffc9523 285 int delta_bno, delta_cnt;
41865980 286 int agfl_wanted;
7e5ec4e4
DW
287
288 /* Compute how many bnobt blocks we'll need. */
289 error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
290 &btr_bno->bload, *nr_extents);
291 if (error)
292 do_error(
293_("Unable to compute free space by block btree geometry, error %d.\n"), -error);
294
295 /* Compute how many cntbt blocks we'll need. */
296 error = -libxfs_btree_bload_compute_geometry(btr_cnt->cur,
297 &btr_cnt->bload, *nr_extents);
298 if (error)
299 do_error(
300_("Unable to compute free space by length btree geometry, error %d.\n"), -error);
301
6ffc9523
DW
302 /*
303 * Compute the deficit between the number of blocks reserved
304 * and the number of blocks we think we need for the btree.
305 */
306 delta_bno = (int)btr_bno->newbt.nr_reserved -
307 btr_bno->bload.nr_blocks;
308 delta_cnt = (int)btr_cnt->newbt.nr_reserved -
309 btr_cnt->bload.nr_blocks;
310
7e5ec4e4 311 /* We don't need any more blocks, so we're done. */
41865980
DW
312 if (delta_bno >= 0 && delta_cnt >= 0 &&
313 delta_bno + delta_cnt >= agfl_goal) {
6ffc9523 314 *extra_blocks = delta_bno + delta_cnt;
7e5ec4e4 315 break;
6ffc9523 316 }
7e5ec4e4
DW
317
318 /* Allocate however many more blocks we need this time. */
41865980 319 if (delta_bno < 0) {
6ffc9523 320 reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
41865980
DW
321 delta_bno = 0;
322 }
323 if (delta_cnt < 0) {
6ffc9523 324 reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
41865980
DW
325 delta_cnt = 0;
326 }
327
328 /*
329 * Try to fill the bnobt cursor with extra blocks to populate
330 * the AGFL. If we don't get all the blocks we want, stop
331 * trying to fill the AGFL because the AG is totally out of
332 * space.
333 */
334 agfl_wanted = agfl_goal - (delta_bno + delta_cnt);
335 if (agfl_wanted > 0 &&
336 !reserve_agblocks(sc->mp, agno, btr_bno, agfl_wanted))
337 agfl_goal = 0;
7e5ec4e4
DW
338
339 /* Ok, now how many free space records do we have? */
340 *nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
341 } while (1);
7e5ec4e4
DW
342}
343
344/* Rebuild the free space btrees. */
345void
346build_freespace_btrees(
347 struct repair_ctx *sc,
348 xfs_agnumber_t agno,
349 struct bt_rebuild *btr_bno,
350 struct bt_rebuild *btr_cnt)
351{
352 int error;
353
354 /* Add all observed bnobt records. */
355 error = -libxfs_btree_bload(btr_bno->cur, &btr_bno->bload, btr_bno);
356 if (error)
357 do_error(
358_("Error %d while creating bnobt btree for AG %u.\n"), error, agno);
359
360 /* Add all observed cntbt records. */
361 error = -libxfs_btree_bload(btr_cnt->cur, &btr_cnt->bload, btr_cnt);
362 if (error)
363 do_error(
364_("Error %d while creating cntbt btree for AG %u.\n"), error, agno);
365
366 /* Since we're not writing the AGF yet, no need to commit the cursor */
367 libxfs_btree_del_cursor(btr_bno->cur, 0);
368 libxfs_btree_del_cursor(btr_cnt->cur, 0);
369}
7a21223c
DW
370
371/* Inode Btrees */
372
373static inline struct ino_tree_node *
374get_ino_rec(
375 struct xfs_btree_cur *cur,
376 struct ino_tree_node *prev_value)
377{
a0577dbb 378 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
7a21223c
DW
379
380 if (cur->bc_btnum == XFS_BTNUM_INO) {
381 if (!prev_value)
382 return findfirst_inode_rec(agno);
383 return next_ino_rec(prev_value);
384 }
385
386 /* finobt */
387 if (!prev_value)
388 return findfirst_free_inode_rec(agno);
389 return next_free_ino_rec(prev_value);
390}
391
392/* Grab one inobt record. */
393static int
d05b191b 394get_inobt_records(
7a21223c 395 struct xfs_btree_cur *cur,
d05b191b
DW
396 unsigned int idx,
397 struct xfs_btree_block *block,
398 unsigned int nr_wanted,
7a21223c
DW
399 void *priv)
400{
401 struct bt_rebuild *btr = priv;
402 struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i;
2f2e6b36 403 unsigned int loaded = 0;
7a21223c 404
2f2e6b36
DW
405 while (loaded < nr_wanted) {
406 struct ino_tree_node *ino_rec;
407 union xfs_btree_rec *block_rec;
408 int inocnt = 0;
409 int finocnt = 0;
410 int k;
7a21223c 411
2f2e6b36 412 btr->ino_rec = ino_rec = get_ino_rec(cur, btr->ino_rec);
7a21223c 413
2f2e6b36
DW
414 /* Transform the incore record into an on-disk record. */
415 irec->ir_startino = ino_rec->ino_startnum;
416 irec->ir_free = ino_rec->ir_free;
7a21223c 417
2f2e6b36
DW
418 for (k = 0; k < sizeof(xfs_inofree_t) * NBBY; k++) {
419 ASSERT(is_inode_confirmed(ino_rec, k));
7a21223c 420
2f2e6b36
DW
421 if (is_inode_sparse(ino_rec, k))
422 continue;
423 if (is_inode_free(ino_rec, k))
424 finocnt++;
425 inocnt++;
426 }
7a21223c 427
2f2e6b36
DW
428 irec->ir_count = inocnt;
429 irec->ir_freecount = finocnt;
7a21223c 430
2f2e6b36
DW
431 if (xfs_has_sparseinodes(cur->bc_mp)) {
432 uint64_t sparse;
433 int spmask;
434 uint16_t holemask;
435
436 /*
437 * Convert the 64-bit in-core sparse inode state to the
438 * 16-bit on-disk holemask.
439 */
440 holemask = 0;
441 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
442 sparse = ino_rec->ir_sparse;
443 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
444 if (sparse & spmask) {
445 ASSERT((sparse & spmask) == spmask);
446 holemask |= (1 << k);
447 } else
448 ASSERT((sparse & spmask) == 0);
449 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
450 }
451
452 irec->ir_holemask = holemask;
453 } else {
454 irec->ir_holemask = 0;
7a21223c
DW
455 }
456
2f2e6b36
DW
457 if (btr->first_agino == NULLAGINO)
458 btr->first_agino = ino_rec->ino_startnum;
459 btr->freecount += finocnt;
460 btr->count += inocnt;
7a21223c 461
2f2e6b36
DW
462 block_rec = libxfs_btree_rec_addr(cur, idx, block);
463 cur->bc_ops->init_rec_from_cur(cur, block_rec);
464 loaded++;
465 idx++;
466 }
d05b191b 467
2f2e6b36 468 return loaded;
7a21223c
DW
469}
470
471/* Initialize both inode btree cursors as needed. */
472void
473init_ino_cursors(
474 struct repair_ctx *sc,
a426d0e1 475 struct xfs_perag *pag,
76555964 476 unsigned int est_agfreeblocks,
7a21223c
DW
477 uint64_t *num_inos,
478 uint64_t *num_free_inos,
479 struct bt_rebuild *btr_ino,
480 struct bt_rebuild *btr_fino)
481{
482 struct ino_tree_node *ino_rec;
a426d0e1 483 xfs_agnumber_t agno = pag->pag_agno;
7a21223c
DW
484 unsigned int ino_recs = 0;
485 unsigned int fino_recs = 0;
486 bool finobt;
487 int error;
488
2660e653 489 finobt = xfs_has_finobt(sc->mp);
76555964 490 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, est_agfreeblocks, btr_ino);
7a21223c
DW
491
492 /* Compute inode statistics. */
493 *num_free_inos = 0;
494 *num_inos = 0;
495 for (ino_rec = findfirst_inode_rec(agno);
496 ino_rec != NULL;
497 ino_rec = next_ino_rec(ino_rec)) {
498 unsigned int rec_ninos = 0;
499 unsigned int rec_nfinos = 0;
500 int i;
501
502 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
503 ASSERT(is_inode_confirmed(ino_rec, i));
504 /*
505 * sparse inodes are not factored into superblock (free)
506 * inode counts
507 */
508 if (is_inode_sparse(ino_rec, i))
509 continue;
510 if (is_inode_free(ino_rec, i))
511 rec_nfinos++;
512 rec_ninos++;
513 }
514
515 *num_free_inos += rec_nfinos;
516 *num_inos += rec_ninos;
517 ino_recs++;
518
519 /* finobt only considers records with free inodes */
520 if (rec_nfinos)
521 fino_recs++;
522 }
523
87a02c9e
DC
524 btr_ino->cur = libxfs_inobt_stage_cursor(pag, &btr_ino->newbt.afake,
525 XFS_BTNUM_INO);
7a21223c 526
d05b191b 527 btr_ino->bload.get_records = get_inobt_records;
7a21223c
DW
528 btr_ino->bload.claim_block = rebuild_claim_block;
529 btr_ino->first_agino = NULLAGINO;
530
531 /* Compute how many inobt blocks we'll need. */
532 error = -libxfs_btree_bload_compute_geometry(btr_ino->cur,
533 &btr_ino->bload, ino_recs);
534 if (error)
535 do_error(
536_("Unable to compute inode btree geometry, error %d.\n"), error);
537
538 reserve_btblocks(sc->mp, agno, btr_ino, btr_ino->bload.nr_blocks);
539
540 if (!finobt)
541 return;
542
76555964 543 init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, est_agfreeblocks, btr_fino);
87a02c9e
DC
544 btr_fino->cur = libxfs_inobt_stage_cursor(pag,
545 &btr_fino->newbt.afake, XFS_BTNUM_FINO);
7a21223c 546
d05b191b 547 btr_fino->bload.get_records = get_inobt_records;
7a21223c
DW
548 btr_fino->bload.claim_block = rebuild_claim_block;
549 btr_fino->first_agino = NULLAGINO;
550
551 /* Compute how many finobt blocks we'll need. */
552 error = -libxfs_btree_bload_compute_geometry(btr_fino->cur,
553 &btr_fino->bload, fino_recs);
554 if (error)
555 do_error(
556_("Unable to compute free inode btree geometry, error %d.\n"), error);
557
558 reserve_btblocks(sc->mp, agno, btr_fino, btr_fino->bload.nr_blocks);
559}
560
561/* Rebuild the inode btrees. */
562void
563build_inode_btrees(
564 struct repair_ctx *sc,
565 xfs_agnumber_t agno,
566 struct bt_rebuild *btr_ino,
567 struct bt_rebuild *btr_fino)
568{
569 int error;
570
571 /* Add all observed inobt records. */
572 error = -libxfs_btree_bload(btr_ino->cur, &btr_ino->bload, btr_ino);
573 if (error)
574 do_error(
575_("Error %d while creating inobt btree for AG %u.\n"), error, agno);
576
577 /* Since we're not writing the AGI yet, no need to commit the cursor */
578 libxfs_btree_del_cursor(btr_ino->cur, 0);
579
2660e653 580 if (!xfs_has_finobt(sc->mp))
7a21223c
DW
581 return;
582
583 /* Add all observed finobt records. */
584 error = -libxfs_btree_bload(btr_fino->cur, &btr_fino->bload, btr_fino);
585 if (error)
586 do_error(
587_("Error %d while creating finobt btree for AG %u.\n"), error, agno);
588
589 /* Since we're not writing the AGI yet, no need to commit the cursor */
590 libxfs_btree_del_cursor(btr_fino->cur, 0);
591}
dc9f4f5e
DW
592
593/* rebuild the rmap tree */
594
595/* Grab one rmap record. */
596static int
d05b191b 597get_rmapbt_records(
dc9f4f5e 598 struct xfs_btree_cur *cur,
d05b191b
DW
599 unsigned int idx,
600 struct xfs_btree_block *block,
601 unsigned int nr_wanted,
dc9f4f5e
DW
602 void *priv)
603{
604 struct xfs_rmap_irec *rec;
605 struct bt_rebuild *btr = priv;
d05b191b 606 union xfs_btree_rec *block_rec;
2f2e6b36 607 unsigned int loaded;
dc9f4f5e 608
2f2e6b36
DW
609 for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
610 rec = pop_slab_cursor(btr->slab_cursor);
611 memcpy(&cur->bc_rec.r, rec, sizeof(struct xfs_rmap_irec));
d05b191b 612
2f2e6b36
DW
613 block_rec = libxfs_btree_rec_addr(cur, idx, block);
614 cur->bc_ops->init_rec_from_cur(cur, block_rec);
615 }
616
617 return loaded;
dc9f4f5e
DW
618}
619
620/* Set up the rmap rebuild parameters. */
621void
622init_rmapbt_cursor(
623 struct repair_ctx *sc,
195c248c 624 struct xfs_perag *pag,
76555964 625 unsigned int est_agfreeblocks,
dc9f4f5e
DW
626 struct bt_rebuild *btr)
627{
195c248c 628 xfs_agnumber_t agno = pag->pag_agno;
dc9f4f5e
DW
629 int error;
630
2660e653 631 if (!xfs_has_rmapbt(sc->mp))
dc9f4f5e
DW
632 return;
633
76555964 634 init_rebuild(sc, &XFS_RMAP_OINFO_AG, est_agfreeblocks, btr);
195c248c 635 btr->cur = libxfs_rmapbt_stage_cursor(sc->mp, &btr->newbt.afake, pag);
dc9f4f5e 636
d05b191b 637 btr->bload.get_records = get_rmapbt_records;
dc9f4f5e
DW
638 btr->bload.claim_block = rebuild_claim_block;
639
640 /* Compute how many blocks we'll need. */
641 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
642 rmap_record_count(sc->mp, agno));
643 if (error)
644 do_error(
645_("Unable to compute rmap btree geometry, error %d.\n"), error);
646
647 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
648}
649
650/* Rebuild a rmap btree. */
651void
652build_rmap_tree(
653 struct repair_ctx *sc,
654 xfs_agnumber_t agno,
655 struct bt_rebuild *btr)
656{
657 int error;
658
659 error = rmap_init_cursor(agno, &btr->slab_cursor);
660 if (error)
661 do_error(
662_("Insufficient memory to construct rmap cursor.\n"));
663
664 /* Add all observed rmap records. */
665 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
666 if (error)
667 do_error(
668_("Error %d while creating rmap btree for AG %u.\n"), error, agno);
669
670 /* Since we're not writing the AGF yet, no need to commit the cursor */
671 libxfs_btree_del_cursor(btr->cur, 0);
672 free_slab_cursor(&btr->slab_cursor);
673}
3c1ce0fc
DW
674
675/* rebuild the refcount tree */
676
677/* Grab one refcount record. */
678static int
d05b191b 679get_refcountbt_records(
3c1ce0fc 680 struct xfs_btree_cur *cur,
d05b191b
DW
681 unsigned int idx,
682 struct xfs_btree_block *block,
683 unsigned int nr_wanted,
3c1ce0fc
DW
684 void *priv)
685{
686 struct xfs_refcount_irec *rec;
687 struct bt_rebuild *btr = priv;
d05b191b 688 union xfs_btree_rec *block_rec;
2f2e6b36 689 unsigned int loaded;
3c1ce0fc 690
2f2e6b36
DW
691 for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
692 rec = pop_slab_cursor(btr->slab_cursor);
693 memcpy(&cur->bc_rec.rc, rec, sizeof(struct xfs_refcount_irec));
694
695 block_rec = libxfs_btree_rec_addr(cur, idx, block);
696 cur->bc_ops->init_rec_from_cur(cur, block_rec);
697 }
d05b191b 698
2f2e6b36 699 return loaded;
3c1ce0fc
DW
700}
701
702/* Set up the refcount rebuild parameters. */
703void
704init_refc_cursor(
705 struct repair_ctx *sc,
971fceb4 706 struct xfs_perag *pag,
76555964 707 unsigned int est_agfreeblocks,
3c1ce0fc
DW
708 struct bt_rebuild *btr)
709{
971fceb4 710 xfs_agnumber_t agno = pag->pag_agno;
3c1ce0fc
DW
711 int error;
712
2660e653 713 if (!xfs_has_reflink(sc->mp))
3c1ce0fc
DW
714 return;
715
76555964 716 init_rebuild(sc, &XFS_RMAP_OINFO_REFC, est_agfreeblocks, btr);
3c1ce0fc 717 btr->cur = libxfs_refcountbt_stage_cursor(sc->mp, &btr->newbt.afake,
971fceb4 718 pag);
3c1ce0fc 719
d05b191b 720 btr->bload.get_records = get_refcountbt_records;
3c1ce0fc
DW
721 btr->bload.claim_block = rebuild_claim_block;
722
723 /* Compute how many blocks we'll need. */
724 error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload,
725 refcount_record_count(sc->mp, agno));
726 if (error)
727 do_error(
728_("Unable to compute refcount btree geometry, error %d.\n"), error);
729
730 reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks);
731}
732
733/* Rebuild a refcount btree. */
734void
735build_refcount_tree(
736 struct repair_ctx *sc,
737 xfs_agnumber_t agno,
738 struct bt_rebuild *btr)
739{
740 int error;
741
742 error = init_refcount_cursor(agno, &btr->slab_cursor);
743 if (error)
744 do_error(
745_("Insufficient memory to construct refcount cursor.\n"));
746
747 /* Add all observed refcount records. */
748 error = -libxfs_btree_bload(btr->cur, &btr->bload, btr);
749 if (error)
750 do_error(
751_("Error %d while creating refcount btree for AG %u.\n"), error, agno);
752
753 /* Since we're not writing the AGF yet, no need to commit the cursor */
754 libxfs_btree_del_cursor(btr->cur, 0);
755 free_slab_cursor(&btr->slab_cursor);
756}
76555964
DW
757
758static xfs_extlen_t
759estimate_allocbt_blocks(
760 struct xfs_perag *pag,
761 unsigned int nr_extents)
762{
763 /* Account for space consumed by both free space btrees */
764 return libxfs_allocbt_calc_size(pag->pag_mount, nr_extents) * 2;
765}
766
767static xfs_extlen_t
768estimate_inobt_blocks(
769 struct xfs_perag *pag)
770{
771 struct ino_tree_node *ino_rec;
772 xfs_agnumber_t agno = pag->pag_agno;
773 unsigned int ino_recs = 0;
774 unsigned int fino_recs = 0;
775 xfs_extlen_t ret;
776
777 for (ino_rec = findfirst_inode_rec(agno);
778 ino_rec != NULL;
779 ino_rec = next_ino_rec(ino_rec)) {
780 unsigned int rec_nfinos = 0;
781 int i;
782
783 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
784 ASSERT(is_inode_confirmed(ino_rec, i));
785 /*
786 * sparse inodes are not factored into superblock (free)
787 * inode counts
788 */
789 if (is_inode_sparse(ino_rec, i))
790 continue;
791 if (is_inode_free(ino_rec, i))
792 rec_nfinos++;
793 }
794
795 ino_recs++;
796
797 /* finobt only considers records with free inodes */
798 if (rec_nfinos)
799 fino_recs++;
800 }
801
802 ret = libxfs_iallocbt_calc_size(pag->pag_mount, ino_recs);
803 if (xfs_has_finobt(pag->pag_mount))
804 ret += libxfs_iallocbt_calc_size(pag->pag_mount, fino_recs);
805 return ret;
806
807}
808
809/* Estimate the size of the per-AG btrees. */
810xfs_extlen_t
811estimate_agbtree_blocks(
812 struct xfs_perag *pag,
813 unsigned int free_extents)
814{
815 unsigned int ret = 0;
816
817 ret += estimate_allocbt_blocks(pag, free_extents);
818 ret += estimate_inobt_blocks(pag);
819 ret += estimate_rmapbt_blocks(pag);
820 ret += estimate_refcountbt_blocks(pag);
821
822 return ret;
823}