]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
libxfs: refactor manage_zones()
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "avl.h"
9 #include "globals.h"
10 #include "agheader.h"
11 #include "incore.h"
12 #include "protos.h"
13 #include "err_protos.h"
14 #include "dinode.h"
15 #include "rt.h"
16 #include "versions.h"
17 #include "threads.h"
18 #include "progress.h"
19 #include "slab.h"
20 #include "rmap.h"
21
22 /*
23 * we maintain the current slice (path from root to leaf)
24 * of the btree incore. when we need a new block, we ask
25 * the block allocator for the address of a block on that
26 * level, map the block in, and set up the appropriate
27 * pointers (child, silbing, etc.) and keys that should
28 * point to the new block.
29 */
30 typedef struct bt_stat_level {
31 /*
32 * set in setup_cursor routine and maintained in the tree-building
33 * routines
34 */
35 xfs_buf_t *buf_p; /* 2 buffer pointers to ... */
36 xfs_buf_t *prev_buf_p;
37 xfs_agblock_t agbno; /* current block being filled */
38 xfs_agblock_t prev_agbno; /* previous block */
39 /*
40 * set in calculate/init cursor routines for each btree level
41 */
42 int num_recs_tot; /* # tree recs in level */
43 int num_blocks; /* # tree blocks in level */
44 int num_recs_pb; /* num_recs_tot / num_blocks */
45 int modulo; /* num_recs_tot % num_blocks */
46 } bt_stat_level_t;
47
48 typedef struct bt_status {
49 int init; /* cursor set up once? */
50 int num_levels; /* # of levels in btree */
51 xfs_extlen_t num_tot_blocks; /* # blocks alloc'ed for tree */
52 xfs_extlen_t num_free_blocks;/* # blocks currently unused */
53
54 xfs_agblock_t root; /* root block */
55 /*
56 * list of blocks to be used to set up this tree
57 * and pointer to the first unused block on the list
58 */
59 xfs_agblock_t *btree_blocks; /* block list */
60 xfs_agblock_t *free_btree_blocks; /* first unused block */
61 /*
62 * per-level status info
63 */
64 bt_stat_level_t level[XFS_BTREE_MAXLEVELS];
65 uint64_t owner; /* owner */
66 } bt_status_t;
67
68 /*
69 * extra metadata for the agi
70 */
71 struct agi_stat {
72 xfs_agino_t first_agino;
73 xfs_agino_t count;
74 xfs_agino_t freecount;
75 };
76
77 static uint64_t *sb_icount_ag; /* allocated inodes per ag */
78 static uint64_t *sb_ifree_ag; /* free inodes per ag */
79 static uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
80
81 static int
82 mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
83 {
84 int in_extent;
85 int num_extents;
86 xfs_agblock_t extent_start;
87 xfs_extlen_t extent_len;
88 xfs_agblock_t agbno;
89 xfs_agblock_t ag_end;
90 uint free_blocks;
91 xfs_extlen_t blen;
92 int bstate;
93
94 /*
95 * scan the bitmap for the ag looking for continuous
96 * extents of free blocks. At this point, we know
97 * that blocks in the bitmap are either set to an
98 * "in use" state or set to unknown (0) since the
99 * bmaps were zero'ed in phase 4 and only blocks
100 * being used by inodes, inode bmaps, ag headers,
101 * and the files themselves were put into the bitmap.
102 *
103 */
104 ASSERT(agno < mp->m_sb.sb_agcount);
105
106 extent_start = extent_len = 0;
107 in_extent = 0;
108 num_extents = free_blocks = 0;
109
110 if (agno < mp->m_sb.sb_agcount - 1)
111 ag_end = mp->m_sb.sb_agblocks;
112 else
113 ag_end = mp->m_sb.sb_dblocks -
114 (xfs_rfsblock_t)mp->m_sb.sb_agblocks *
115 (mp->m_sb.sb_agcount - 1);
116
117 /*
118 * ok, now find the number of extents, keep track of the
119 * largest extent.
120 */
121 for (agbno = 0; agbno < ag_end; agbno += blen) {
122 bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
123 if (bstate < XR_E_INUSE) {
124 free_blocks += blen;
125 if (in_extent == 0) {
126 /*
127 * found the start of a free extent
128 */
129 in_extent = 1;
130 num_extents++;
131 extent_start = agbno;
132 extent_len = blen;
133 } else {
134 extent_len += blen;
135 }
136 } else {
137 if (in_extent) {
138 /*
139 * free extent ends here, add extent to the
140 * 2 incore extent (avl-to-be-B+) trees
141 */
142 in_extent = 0;
143 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
144 fprintf(stderr, "adding extent %u [%u %u]\n",
145 agno, extent_start, extent_len);
146 #endif
147 add_bno_extent(agno, extent_start, extent_len);
148 add_bcnt_extent(agno, extent_start, extent_len);
149 }
150 }
151 }
152 if (in_extent) {
153 /*
154 * free extent ends here
155 */
156 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
157 fprintf(stderr, "adding extent %u [%u %u]\n",
158 agno, extent_start, extent_len);
159 #endif
160 add_bno_extent(agno, extent_start, extent_len);
161 add_bcnt_extent(agno, extent_start, extent_len);
162 }
163
164 return(num_extents);
165 }
166
167 static xfs_agblock_t
168 get_next_blockaddr(xfs_agnumber_t agno, int level, bt_status_t *curs)
169 {
170 ASSERT(curs->free_btree_blocks < curs->btree_blocks +
171 curs->num_tot_blocks);
172 ASSERT(curs->num_free_blocks > 0);
173
174 curs->num_free_blocks--;
175 return(*curs->free_btree_blocks++);
176 }
177
178 /*
179 * set up the dynamically allocated block allocation data in the btree
180 * cursor that depends on the info in the static portion of the cursor.
181 * allocates space from the incore bno/bcnt extent trees and sets up
182 * the first path up the left side of the tree. Also sets up the
183 * cursor pointer to the btree root. called by init_freespace_cursor()
184 * and init_ino_cursor()
185 */
186 static void
187 setup_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *curs)
188 {
189 int j;
190 unsigned int u;
191 xfs_extlen_t big_extent_len;
192 xfs_agblock_t big_extent_start;
193 extent_tree_node_t *ext_ptr;
194 extent_tree_node_t *bno_ext_ptr;
195 xfs_extlen_t blocks_allocated;
196 xfs_agblock_t *agb_ptr;
197 int error;
198
199 /*
200 * get the number of blocks we need to allocate, then
201 * set up block number array, set the free block pointer
202 * to the first block in the array, and null the array
203 */
204 big_extent_len = curs->num_tot_blocks;
205 blocks_allocated = 0;
206
207 ASSERT(big_extent_len > 0);
208
209 if ((curs->btree_blocks = malloc(sizeof(xfs_agblock_t)
210 * big_extent_len)) == NULL)
211 do_error(_("could not set up btree block array\n"));
212
213 agb_ptr = curs->free_btree_blocks = curs->btree_blocks;
214
215 for (j = 0; j < curs->num_free_blocks; j++, agb_ptr++)
216 *agb_ptr = NULLAGBLOCK;
217
218 /*
219 * grab the smallest extent and use it up, then get the
220 * next smallest. This mimics the init_*_cursor code.
221 */
222 ext_ptr = findfirst_bcnt_extent(agno);
223
224 agb_ptr = curs->btree_blocks;
225
226 /*
227 * set up the free block array
228 */
229 while (blocks_allocated < big_extent_len) {
230 if (!ext_ptr)
231 do_error(
232 _("error - not enough free space in filesystem\n"));
233 /*
234 * use up the extent we've got
235 */
236 for (u = 0; u < ext_ptr->ex_blockcount &&
237 blocks_allocated < big_extent_len; u++) {
238 ASSERT(agb_ptr < curs->btree_blocks
239 + curs->num_tot_blocks);
240 *agb_ptr++ = ext_ptr->ex_startblock + u;
241 blocks_allocated++;
242 }
243
244 error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, u,
245 curs->owner);
246 if (error)
247 do_error(_("could not set up btree rmaps: %s\n"),
248 strerror(-error));
249
250 /*
251 * if we only used part of this last extent, then we
252 * need only to reset the extent in the extent
253 * trees and we're done
254 */
255 if (u < ext_ptr->ex_blockcount) {
256 big_extent_start = ext_ptr->ex_startblock + u;
257 big_extent_len = ext_ptr->ex_blockcount - u;
258
259 ASSERT(big_extent_len > 0);
260
261 bno_ext_ptr = find_bno_extent(agno,
262 ext_ptr->ex_startblock);
263 ASSERT(bno_ext_ptr != NULL);
264 get_bno_extent(agno, bno_ext_ptr);
265 release_extent_tree_node(bno_ext_ptr);
266
267 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
268 ext_ptr->ex_blockcount);
269 release_extent_tree_node(ext_ptr);
270 #ifdef XR_BLD_FREE_TRACE
271 fprintf(stderr, "releasing extent: %u [%u %u]\n",
272 agno, ext_ptr->ex_startblock,
273 ext_ptr->ex_blockcount);
274 fprintf(stderr, "blocks_allocated = %d\n",
275 blocks_allocated);
276 #endif
277
278 add_bno_extent(agno, big_extent_start, big_extent_len);
279 add_bcnt_extent(agno, big_extent_start, big_extent_len);
280
281 return;
282 }
283 /*
284 * delete the used-up extent from both extent trees and
285 * find next biggest extent
286 */
287 #ifdef XR_BLD_FREE_TRACE
288 fprintf(stderr, "releasing extent: %u [%u %u]\n",
289 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
290 #endif
291 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
292 ASSERT(bno_ext_ptr != NULL);
293 get_bno_extent(agno, bno_ext_ptr);
294 release_extent_tree_node(bno_ext_ptr);
295
296 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
297 ext_ptr->ex_blockcount);
298 ASSERT(ext_ptr != NULL);
299 release_extent_tree_node(ext_ptr);
300
301 ext_ptr = findfirst_bcnt_extent(agno);
302 }
303 #ifdef XR_BLD_FREE_TRACE
304 fprintf(stderr, "blocks_allocated = %d\n",
305 blocks_allocated);
306 #endif
307 }
308
309 static void
310 write_cursor(bt_status_t *curs)
311 {
312 int i;
313
314 for (i = 0; i < curs->num_levels; i++) {
315 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
316 fprintf(stderr, "writing bt block %u\n", curs->level[i].agbno);
317 #endif
318 if (curs->level[i].prev_buf_p != NULL) {
319 ASSERT(curs->level[i].prev_agbno != NULLAGBLOCK);
320 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
321 fprintf(stderr, "writing bt prev block %u\n",
322 curs->level[i].prev_agbno);
323 #endif
324 libxfs_writebuf(curs->level[i].prev_buf_p, 0);
325 }
326 libxfs_writebuf(curs->level[i].buf_p, 0);
327 }
328 }
329
330 static void
331 finish_cursor(bt_status_t *curs)
332 {
333 ASSERT(curs->num_free_blocks == 0);
334 free(curs->btree_blocks);
335 }
336
337 /*
338 * We need to leave some free records in the tree for the corner case of
339 * setting up the AGFL. This may require allocation of blocks, and as
340 * such can require insertion of new records into the tree (e.g. moving
341 * a record in the by-count tree when a long extent is shortened). If we
342 * pack the records into the leaves with no slack space, this requires a
343 * leaf split to occur and a block to be allocated from the free list.
344 * If we don't have any blocks on the free list (because we are setting
345 * it up!), then we fail, and the filesystem will fail with the same
346 * failure at runtime. Hence leave a couple of records slack space in
347 * each block to allow immediate modification of the tree without
348 * requiring splits to be done.
349 *
350 * XXX(hch): any reason we don't just look at mp->m_alloc_mxr?
351 */
352 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
353 (libxfs_allocbt_maxrecs((mp), (mp)->m_sb.sb_blocksize, (level) == 0) - 2)
354
355 /*
356 * this calculates a freespace cursor for an ag.
357 * btree_curs is an in/out. returns the number of
358 * blocks that will show up in the AGFL.
359 */
360 static int
361 calculate_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
362 xfs_agblock_t *extents, bt_status_t *btree_curs)
363 {
364 xfs_extlen_t blocks_needed; /* a running count */
365 xfs_extlen_t blocks_allocated_pt; /* per tree */
366 xfs_extlen_t blocks_allocated_total; /* for both trees */
367 xfs_agblock_t num_extents;
368 int i;
369 int extents_used;
370 int extra_blocks;
371 bt_stat_level_t *lptr;
372 bt_stat_level_t *p_lptr;
373 extent_tree_node_t *ext_ptr;
374 int level;
375
376 num_extents = *extents;
377 extents_used = 0;
378
379 ASSERT(num_extents != 0);
380
381 lptr = &btree_curs->level[0];
382 btree_curs->init = 1;
383
384 /*
385 * figure out how much space we need for the leaf level
386 * of the tree and set up the cursor for the leaf level
387 * (note that the same code is duplicated further down)
388 */
389 lptr->num_blocks = howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0));
390 lptr->num_recs_pb = num_extents / lptr->num_blocks;
391 lptr->modulo = num_extents % lptr->num_blocks;
392 lptr->num_recs_tot = num_extents;
393 level = 1;
394
395 #ifdef XR_BLD_FREE_TRACE
396 fprintf(stderr, "%s 0 %d %d %d %d\n", __func__,
397 lptr->num_blocks,
398 lptr->num_recs_pb,
399 lptr->modulo,
400 lptr->num_recs_tot);
401 #endif
402 /*
403 * if we need more levels, set them up. # of records
404 * per level is the # of blocks in the level below it
405 */
406 if (lptr->num_blocks > 1) {
407 for (; btree_curs->level[level - 1].num_blocks > 1
408 && level < XFS_BTREE_MAXLEVELS;
409 level++) {
410 lptr = &btree_curs->level[level];
411 p_lptr = &btree_curs->level[level - 1];
412 lptr->num_blocks = howmany(p_lptr->num_blocks,
413 XR_ALLOC_BLOCK_MAXRECS(mp, level));
414 lptr->modulo = p_lptr->num_blocks
415 % lptr->num_blocks;
416 lptr->num_recs_pb = p_lptr->num_blocks
417 / lptr->num_blocks;
418 lptr->num_recs_tot = p_lptr->num_blocks;
419 #ifdef XR_BLD_FREE_TRACE
420 fprintf(stderr, "%s %d %d %d %d %d\n", __func__,
421 level,
422 lptr->num_blocks,
423 lptr->num_recs_pb,
424 lptr->modulo,
425 lptr->num_recs_tot);
426 #endif
427 }
428 }
429
430 ASSERT(lptr->num_blocks == 1);
431 btree_curs->num_levels = level;
432
433 /*
434 * ok, now we have a hypothetical cursor that
435 * will work for both the bno and bcnt trees.
436 * now figure out if using up blocks to set up the
437 * trees will perturb the shape of the freespace tree.
438 * if so, we've over-allocated. the freespace trees
439 * as they will be *after* accounting for the free space
440 * we've used up will need fewer blocks to to represent
441 * than we've allocated. We can use the AGFL to hold
442 * xfs_agfl_size (sector/xfs_agfl_t) blocks but that's it.
443 * Thus we limit things to xfs_agfl_size/2 for each of the 2 btrees.
444 * if the number of extra blocks is more than that,
445 * we'll have to be called again.
446 */
447 for (blocks_needed = 0, i = 0; i < level; i++) {
448 blocks_needed += btree_curs->level[i].num_blocks;
449 }
450
451 /*
452 * record the # of blocks we've allocated
453 */
454 blocks_allocated_pt = blocks_needed;
455 blocks_needed *= 2;
456 blocks_allocated_total = blocks_needed;
457
458 /*
459 * figure out how many free extents will be used up by
460 * our space allocation
461 */
462 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
463 do_error(_("can't rebuild fs trees -- not enough free space "
464 "on ag %u\n"), agno);
465
466 while (ext_ptr != NULL && blocks_needed > 0) {
467 if (ext_ptr->ex_blockcount <= blocks_needed) {
468 blocks_needed -= ext_ptr->ex_blockcount;
469 extents_used++;
470 } else {
471 blocks_needed = 0;
472 }
473
474 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
475
476 #ifdef XR_BLD_FREE_TRACE
477 if (ext_ptr != NULL) {
478 fprintf(stderr, "got next extent [%u %u]\n",
479 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
480 } else {
481 fprintf(stderr, "out of extents\n");
482 }
483 #endif
484 }
485 if (blocks_needed > 0)
486 do_error(_("ag %u - not enough free space to build freespace "
487 "btrees\n"), agno);
488
489 ASSERT(num_extents >= extents_used);
490
491 num_extents -= extents_used;
492
493 /*
494 * see if the number of leaf blocks will change as a result
495 * of the number of extents changing
496 */
497 if (howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0))
498 != btree_curs->level[0].num_blocks) {
499 /*
500 * yes -- recalculate the cursor. If the number of
501 * excess (overallocated) blocks is < xfs_agfl_size/2, we're ok.
502 * we can put those into the AGFL. we don't try
503 * and get things to converge exactly (reach a
504 * state with zero excess blocks) because there
505 * exist pathological cases which will never
506 * converge. first, check for the zero-case.
507 */
508 if (num_extents == 0) {
509 /*
510 * ok, we've used up all the free blocks
511 * trying to lay out the leaf level. go
512 * to a one block (empty) btree and put the
513 * already allocated blocks into the AGFL
514 */
515 if (btree_curs->level[0].num_blocks != 1) {
516 /*
517 * we really needed more blocks because
518 * the old tree had more than one level.
519 * this is bad.
520 */
521 do_warn(_("not enough free blocks left to "
522 "describe all free blocks in AG "
523 "%u\n"), agno);
524 }
525 #ifdef XR_BLD_FREE_TRACE
526 fprintf(stderr,
527 "ag %u -- no free extents, alloc'ed %d\n",
528 agno, blocks_allocated_pt);
529 #endif
530 lptr->num_blocks = 1;
531 lptr->modulo = 0;
532 lptr->num_recs_pb = 0;
533 lptr->num_recs_tot = 0;
534
535 btree_curs->num_levels = 1;
536
537 /*
538 * don't reset the allocation stats, assume
539 * they're all extra blocks
540 * don't forget to return the total block count
541 * not the per-tree block count. these are the
542 * extras that will go into the AGFL. subtract
543 * two for the root blocks.
544 */
545 btree_curs->num_tot_blocks = blocks_allocated_pt;
546 btree_curs->num_free_blocks = blocks_allocated_pt;
547
548 *extents = 0;
549
550 return(blocks_allocated_total - 2);
551 }
552
553 lptr = &btree_curs->level[0];
554 lptr->num_blocks = howmany(num_extents,
555 XR_ALLOC_BLOCK_MAXRECS(mp, 0));
556 lptr->num_recs_pb = num_extents / lptr->num_blocks;
557 lptr->modulo = num_extents % lptr->num_blocks;
558 lptr->num_recs_tot = num_extents;
559 level = 1;
560
561 /*
562 * if we need more levels, set them up
563 */
564 if (lptr->num_blocks > 1) {
565 for (level = 1; btree_curs->level[level-1].num_blocks
566 > 1 && level < XFS_BTREE_MAXLEVELS;
567 level++) {
568 lptr = &btree_curs->level[level];
569 p_lptr = &btree_curs->level[level-1];
570 lptr->num_blocks = howmany(p_lptr->num_blocks,
571 XR_ALLOC_BLOCK_MAXRECS(mp, level));
572 lptr->modulo = p_lptr->num_blocks
573 % lptr->num_blocks;
574 lptr->num_recs_pb = p_lptr->num_blocks
575 / lptr->num_blocks;
576 lptr->num_recs_tot = p_lptr->num_blocks;
577 }
578 }
579 ASSERT(lptr->num_blocks == 1);
580 btree_curs->num_levels = level;
581
582 /*
583 * now figure out the number of excess blocks
584 */
585 for (blocks_needed = 0, i = 0; i < level; i++) {
586 blocks_needed += btree_curs->level[i].num_blocks;
587 }
588 blocks_needed *= 2;
589
590 ASSERT(blocks_allocated_total >= blocks_needed);
591 extra_blocks = blocks_allocated_total - blocks_needed;
592 } else {
593 if (extents_used > 0) {
594 /*
595 * reset the leaf level geometry to account
596 * for consumed extents. we can leave the
597 * rest of the cursor alone since the number
598 * of leaf blocks hasn't changed.
599 */
600 lptr = &btree_curs->level[0];
601
602 lptr->num_recs_pb = num_extents / lptr->num_blocks;
603 lptr->modulo = num_extents % lptr->num_blocks;
604 lptr->num_recs_tot = num_extents;
605 }
606
607 extra_blocks = 0;
608 }
609
610 btree_curs->num_tot_blocks = blocks_allocated_pt;
611 btree_curs->num_free_blocks = blocks_allocated_pt;
612
613 *extents = num_extents;
614
615 return(extra_blocks);
616 }
617
618 /* Map btnum to buffer ops for the types that need it. */
619 static const struct xfs_buf_ops *
620 btnum_to_ops(
621 xfs_btnum_t btnum)
622 {
623 switch (btnum) {
624 case XFS_BTNUM_BNO:
625 case XFS_BTNUM_CNT:
626 return &xfs_allocbt_buf_ops;
627 case XFS_BTNUM_INO:
628 case XFS_BTNUM_FINO:
629 return &xfs_inobt_buf_ops;
630 case XFS_BTNUM_RMAP:
631 return &xfs_rmapbt_buf_ops;
632 case XFS_BTNUM_REFC:
633 return &xfs_refcountbt_buf_ops;
634 default:
635 ASSERT(0);
636 return NULL;
637 }
638 }
639
640 static void
641 prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
642 bt_status_t *btree_curs, xfs_agblock_t startblock,
643 xfs_extlen_t blockcount, int level, xfs_btnum_t btnum)
644 {
645 struct xfs_btree_block *bt_hdr;
646 xfs_alloc_key_t *bt_key;
647 xfs_alloc_ptr_t *bt_ptr;
648 xfs_agblock_t agbno;
649 bt_stat_level_t *lptr;
650 const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
651
652 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
653
654 level++;
655
656 if (level >= btree_curs->num_levels)
657 return;
658
659 lptr = &btree_curs->level[level];
660 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
661
662 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
663 /*
664 * only happens once when initializing the
665 * left-hand side of the tree.
666 */
667 prop_freespace_cursor(mp, agno, btree_curs, startblock,
668 blockcount, level, btnum);
669 }
670
671 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
672 lptr->num_recs_pb + (lptr->modulo > 0)) {
673 /*
674 * write out current prev block, grab us a new block,
675 * and set the rightsib pointer of current block
676 */
677 #ifdef XR_BLD_FREE_TRACE
678 fprintf(stderr, " %d ", lptr->prev_agbno);
679 #endif
680 if (lptr->prev_agbno != NULLAGBLOCK) {
681 ASSERT(lptr->prev_buf_p != NULL);
682 libxfs_writebuf(lptr->prev_buf_p, 0);
683 }
684 lptr->prev_agbno = lptr->agbno;;
685 lptr->prev_buf_p = lptr->buf_p;
686 agbno = get_next_blockaddr(agno, level, btree_curs);
687
688 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
689
690 lptr->buf_p = libxfs_getbuf(mp->m_dev,
691 XFS_AGB_TO_DADDR(mp, agno, agbno),
692 XFS_FSB_TO_BB(mp, 1));
693 lptr->agbno = agbno;
694
695 if (lptr->modulo)
696 lptr->modulo--;
697
698 /*
699 * initialize block header
700 */
701 lptr->buf_p->b_ops = ops;
702 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
703 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
704 libxfs_btree_init_block(mp, lptr->buf_p, btnum, level,
705 0, agno, 0);
706
707 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
708
709 /*
710 * propagate extent record for first extent in new block up
711 */
712 prop_freespace_cursor(mp, agno, btree_curs, startblock,
713 blockcount, level, btnum);
714 }
715 /*
716 * add extent info to current block
717 */
718 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
719
720 bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
721 be16_to_cpu(bt_hdr->bb_numrecs));
722 bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
723 be16_to_cpu(bt_hdr->bb_numrecs),
724 mp->m_alloc_mxr[1]);
725
726 bt_key->ar_startblock = cpu_to_be32(startblock);
727 bt_key->ar_blockcount = cpu_to_be32(blockcount);
728 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
729 }
730
731 /*
732 * rebuilds a freespace tree given a cursor and type
733 * of tree to build (bno or bcnt). returns the number of free blocks
734 * represented by the tree.
735 */
736 static xfs_extlen_t
737 build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
738 bt_status_t *btree_curs, xfs_btnum_t btnum)
739 {
740 xfs_agnumber_t i;
741 xfs_agblock_t j;
742 struct xfs_btree_block *bt_hdr;
743 xfs_alloc_rec_t *bt_rec;
744 int level;
745 xfs_agblock_t agbno;
746 extent_tree_node_t *ext_ptr;
747 bt_stat_level_t *lptr;
748 xfs_extlen_t freeblks;
749 const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
750
751 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
752
753 #ifdef XR_BLD_FREE_TRACE
754 fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
755 #endif
756 level = btree_curs->num_levels;
757 freeblks = 0;
758
759 ASSERT(level > 0);
760
761 /*
762 * initialize the first block on each btree level
763 */
764 for (i = 0; i < level; i++) {
765 lptr = &btree_curs->level[i];
766
767 agbno = get_next_blockaddr(agno, i, btree_curs);
768 lptr->buf_p = libxfs_getbuf(mp->m_dev,
769 XFS_AGB_TO_DADDR(mp, agno, agbno),
770 XFS_FSB_TO_BB(mp, 1));
771
772 if (i == btree_curs->num_levels - 1)
773 btree_curs->root = agbno;
774
775 lptr->agbno = agbno;
776 lptr->prev_agbno = NULLAGBLOCK;
777 lptr->prev_buf_p = NULL;
778 /*
779 * initialize block header
780 */
781 lptr->buf_p->b_ops = ops;
782 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
783 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
784 libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
785 }
786 /*
787 * run along leaf, setting up records. as we have to switch
788 * blocks, call the prop_freespace_cursor routine to set up the new
789 * pointers for the parent. that can recurse up to the root
790 * if required. set the sibling pointers for leaf level here.
791 */
792 if (btnum == XFS_BTNUM_BNO)
793 ext_ptr = findfirst_bno_extent(agno);
794 else
795 ext_ptr = findfirst_bcnt_extent(agno);
796
797 #ifdef XR_BLD_FREE_TRACE
798 fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
799 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
800 #endif
801
802 lptr = &btree_curs->level[0];
803
804 for (i = 0; i < btree_curs->level[0].num_blocks; i++) {
805 /*
806 * block initialization, lay in block header
807 */
808 lptr->buf_p->b_ops = ops;
809 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
810 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
811 libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
812
813 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
814 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
815 (lptr->modulo > 0));
816 #ifdef XR_BLD_FREE_TRACE
817 fprintf(stderr, "bft, bb_numrecs = %d\n",
818 be16_to_cpu(bt_hdr->bb_numrecs));
819 #endif
820
821 if (lptr->modulo > 0)
822 lptr->modulo--;
823
824 /*
825 * initialize values in the path up to the root if
826 * this is a multi-level btree
827 */
828 if (btree_curs->num_levels > 1)
829 prop_freespace_cursor(mp, agno, btree_curs,
830 ext_ptr->ex_startblock,
831 ext_ptr->ex_blockcount,
832 0, btnum);
833
834 bt_rec = (xfs_alloc_rec_t *)
835 ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
836 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
837 ASSERT(ext_ptr != NULL);
838 bt_rec[j].ar_startblock = cpu_to_be32(
839 ext_ptr->ex_startblock);
840 bt_rec[j].ar_blockcount = cpu_to_be32(
841 ext_ptr->ex_blockcount);
842 freeblks += ext_ptr->ex_blockcount;
843 if (btnum == XFS_BTNUM_BNO)
844 ext_ptr = findnext_bno_extent(ext_ptr);
845 else
846 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
847 #if 0
848 #ifdef XR_BLD_FREE_TRACE
849 if (ext_ptr == NULL)
850 fprintf(stderr, "null extent pointer, j = %d\n",
851 j);
852 else
853 fprintf(stderr,
854 "bft, agno = %d, start = %u, count = %u\n",
855 agno, ext_ptr->ex_startblock,
856 ext_ptr->ex_blockcount);
857 #endif
858 #endif
859 }
860
861 if (ext_ptr != NULL) {
862 /*
863 * get next leaf level block
864 */
865 if (lptr->prev_buf_p != NULL) {
866 #ifdef XR_BLD_FREE_TRACE
867 fprintf(stderr, " writing fst agbno %u\n",
868 lptr->prev_agbno);
869 #endif
870 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
871 libxfs_writebuf(lptr->prev_buf_p, 0);
872 }
873 lptr->prev_buf_p = lptr->buf_p;
874 lptr->prev_agbno = lptr->agbno;
875 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
876 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
877
878 lptr->buf_p = libxfs_getbuf(mp->m_dev,
879 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
880 XFS_FSB_TO_BB(mp, 1));
881 }
882 }
883
884 return(freeblks);
885 }
886
887 /*
888 * XXX(hch): any reason we don't just look at mp->m_inobt_mxr?
889 */
890 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
891 libxfs_inobt_maxrecs((mp), (mp)->m_sb.sb_blocksize, \
892 (level) == 0)
893
894 /*
895 * we don't have to worry here about how chewing up free extents
896 * may perturb things because inode tree building happens before
897 * freespace tree building.
898 */
899 static void
900 init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
901 uint64_t *num_inos, uint64_t *num_free_inos, int finobt)
902 {
903 uint64_t ninos;
904 uint64_t nfinos;
905 int rec_nfinos;
906 int rec_ninos;
907 ino_tree_node_t *ino_rec;
908 int num_recs;
909 int level;
910 bt_stat_level_t *lptr;
911 bt_stat_level_t *p_lptr;
912 xfs_extlen_t blocks_allocated;
913 int i;
914
915 *num_inos = *num_free_inos = 0;
916 ninos = nfinos = 0;
917
918 lptr = &btree_curs->level[0];
919 btree_curs->init = 1;
920 btree_curs->owner = XFS_RMAP_OWN_INOBT;
921
922 /*
923 * build up statistics
924 */
925 ino_rec = findfirst_inode_rec(agno);
926 for (num_recs = 0; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
927 rec_ninos = 0;
928 rec_nfinos = 0;
929 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
930 ASSERT(is_inode_confirmed(ino_rec, i));
931 /*
932 * sparse inodes are not factored into superblock (free)
933 * inode counts
934 */
935 if (is_inode_sparse(ino_rec, i))
936 continue;
937 if (is_inode_free(ino_rec, i))
938 rec_nfinos++;
939 rec_ninos++;
940 }
941
942 /*
943 * finobt only considers records with free inodes
944 */
945 if (finobt && !rec_nfinos)
946 continue;
947
948 nfinos += rec_nfinos;
949 ninos += rec_ninos;
950 num_recs++;
951 }
952
953 if (num_recs == 0) {
954 /*
955 * easy corner-case -- no inode records
956 */
957 lptr->num_blocks = 1;
958 lptr->modulo = 0;
959 lptr->num_recs_pb = 0;
960 lptr->num_recs_tot = 0;
961
962 btree_curs->num_levels = 1;
963 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
964
965 setup_cursor(mp, agno, btree_curs);
966
967 return;
968 }
969
970 blocks_allocated = lptr->num_blocks = howmany(num_recs,
971 XR_INOBT_BLOCK_MAXRECS(mp, 0));
972
973 lptr->modulo = num_recs % lptr->num_blocks;
974 lptr->num_recs_pb = num_recs / lptr->num_blocks;
975 lptr->num_recs_tot = num_recs;
976 level = 1;
977
978 if (lptr->num_blocks > 1) {
979 for (; btree_curs->level[level-1].num_blocks > 1
980 && level < XFS_BTREE_MAXLEVELS;
981 level++) {
982 lptr = &btree_curs->level[level];
983 p_lptr = &btree_curs->level[level - 1];
984 lptr->num_blocks = howmany(p_lptr->num_blocks,
985 XR_INOBT_BLOCK_MAXRECS(mp, level));
986 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
987 lptr->num_recs_pb = p_lptr->num_blocks
988 / lptr->num_blocks;
989 lptr->num_recs_tot = p_lptr->num_blocks;
990
991 blocks_allocated += lptr->num_blocks;
992 }
993 }
994 ASSERT(lptr->num_blocks == 1);
995 btree_curs->num_levels = level;
996
997 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
998 = blocks_allocated;
999
1000 setup_cursor(mp, agno, btree_curs);
1001
1002 *num_inos = ninos;
1003 *num_free_inos = nfinos;
1004
1005 return;
1006 }
1007
1008 static void
1009 prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
1010 xfs_btnum_t btnum, xfs_agino_t startino, int level)
1011 {
1012 struct xfs_btree_block *bt_hdr;
1013 xfs_inobt_key_t *bt_key;
1014 xfs_inobt_ptr_t *bt_ptr;
1015 xfs_agblock_t agbno;
1016 bt_stat_level_t *lptr;
1017 const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
1018
1019 level++;
1020
1021 if (level >= btree_curs->num_levels)
1022 return;
1023
1024 lptr = &btree_curs->level[level];
1025 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1026
1027 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1028 /*
1029 * this only happens once to initialize the
1030 * first path up the left side of the tree
1031 * where the agbno's are already set up
1032 */
1033 prop_ino_cursor(mp, agno, btree_curs, btnum, startino, level);
1034 }
1035
1036 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1037 lptr->num_recs_pb + (lptr->modulo > 0)) {
1038 /*
1039 * write out current prev block, grab us a new block,
1040 * and set the rightsib pointer of current block
1041 */
1042 #ifdef XR_BLD_INO_TRACE
1043 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1044 #endif
1045 if (lptr->prev_agbno != NULLAGBLOCK) {
1046 ASSERT(lptr->prev_buf_p != NULL);
1047 libxfs_writebuf(lptr->prev_buf_p, 0);
1048 }
1049 lptr->prev_agbno = lptr->agbno;;
1050 lptr->prev_buf_p = lptr->buf_p;
1051 agbno = get_next_blockaddr(agno, level, btree_curs);
1052
1053 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1054
1055 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1056 XFS_AGB_TO_DADDR(mp, agno, agbno),
1057 XFS_FSB_TO_BB(mp, 1));
1058 lptr->agbno = agbno;
1059
1060 if (lptr->modulo)
1061 lptr->modulo--;
1062
1063 /*
1064 * initialize block header
1065 */
1066 lptr->buf_p->b_ops = ops;
1067 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1068 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1069 libxfs_btree_init_block(mp, lptr->buf_p, btnum,
1070 level, 0, agno, 0);
1071
1072 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1073
1074 /*
1075 * propagate extent record for first extent in new block up
1076 */
1077 prop_ino_cursor(mp, agno, btree_curs, btnum, startino, level);
1078 }
1079 /*
1080 * add inode info to current block
1081 */
1082 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1083
1084 bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
1085 be16_to_cpu(bt_hdr->bb_numrecs));
1086 bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
1087 be16_to_cpu(bt_hdr->bb_numrecs),
1088 mp->m_inobt_mxr[1]);
1089
1090 bt_key->ir_startino = cpu_to_be32(startino);
1091 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1092 }
1093
1094 /*
1095 * XXX: yet more code that can be shared with mkfs, growfs.
1096 */
1097 static void
1098 build_agi(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
1099 bt_status_t *finobt_curs, struct agi_stat *agi_stat)
1100 {
1101 xfs_buf_t *agi_buf;
1102 xfs_agi_t *agi;
1103 int i;
1104
1105 agi_buf = libxfs_getbuf(mp->m_dev,
1106 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1107 mp->m_sb.sb_sectsize/BBSIZE);
1108 agi_buf->b_ops = &xfs_agi_buf_ops;
1109 agi = XFS_BUF_TO_AGI(agi_buf);
1110 memset(agi, 0, mp->m_sb.sb_sectsize);
1111
1112 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
1113 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
1114 agi->agi_seqno = cpu_to_be32(agno);
1115 if (agno < mp->m_sb.sb_agcount - 1)
1116 agi->agi_length = cpu_to_be32(mp->m_sb.sb_agblocks);
1117 else
1118 agi->agi_length = cpu_to_be32(mp->m_sb.sb_dblocks -
1119 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
1120 agi->agi_count = cpu_to_be32(agi_stat->count);
1121 agi->agi_root = cpu_to_be32(btree_curs->root);
1122 agi->agi_level = cpu_to_be32(btree_curs->num_levels);
1123 agi->agi_freecount = cpu_to_be32(agi_stat->freecount);
1124 agi->agi_newino = cpu_to_be32(agi_stat->first_agino);
1125 agi->agi_dirino = cpu_to_be32(NULLAGINO);
1126
1127 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1128 agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO);
1129
1130 if (xfs_sb_version_hascrc(&mp->m_sb))
1131 platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
1132
1133 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
1134 agi->agi_free_root = cpu_to_be32(finobt_curs->root);
1135 agi->agi_free_level = cpu_to_be32(finobt_curs->num_levels);
1136 }
1137
1138 libxfs_writebuf(agi_buf, 0);
1139 }
1140
1141 /*
1142 * rebuilds an inode tree given a cursor. We're lazy here and call
1143 * the routine that builds the agi
1144 */
1145 static void
1146 build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
1147 bt_status_t *btree_curs, xfs_btnum_t btnum,
1148 struct agi_stat *agi_stat)
1149 {
1150 xfs_agnumber_t i;
1151 xfs_agblock_t j;
1152 xfs_agblock_t agbno;
1153 xfs_agino_t first_agino;
1154 struct xfs_btree_block *bt_hdr;
1155 xfs_inobt_rec_t *bt_rec;
1156 ino_tree_node_t *ino_rec;
1157 bt_stat_level_t *lptr;
1158 const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
1159 xfs_agino_t count = 0;
1160 xfs_agino_t freecount = 0;
1161 int inocnt;
1162 uint8_t finocnt;
1163 int k;
1164 int level = btree_curs->num_levels;
1165 int spmask;
1166 uint64_t sparse;
1167 uint16_t holemask;
1168
1169 ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
1170
1171 for (i = 0; i < level; i++) {
1172 lptr = &btree_curs->level[i];
1173
1174 agbno = get_next_blockaddr(agno, i, btree_curs);
1175 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1176 XFS_AGB_TO_DADDR(mp, agno, agbno),
1177 XFS_FSB_TO_BB(mp, 1));
1178
1179 if (i == btree_curs->num_levels - 1)
1180 btree_curs->root = agbno;
1181
1182 lptr->agbno = agbno;
1183 lptr->prev_agbno = NULLAGBLOCK;
1184 lptr->prev_buf_p = NULL;
1185 /*
1186 * initialize block header
1187 */
1188
1189 lptr->buf_p->b_ops = ops;
1190 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1191 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1192 libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
1193 }
1194
1195 /*
1196 * run along leaf, setting up records. as we have to switch
1197 * blocks, call the prop_ino_cursor routine to set up the new
1198 * pointers for the parent. that can recurse up to the root
1199 * if required. set the sibling pointers for leaf level here.
1200 */
1201 if (btnum == XFS_BTNUM_FINO)
1202 ino_rec = findfirst_free_inode_rec(agno);
1203 else
1204 ino_rec = findfirst_inode_rec(agno);
1205
1206 if (ino_rec != NULL)
1207 first_agino = ino_rec->ino_startnum;
1208 else
1209 first_agino = NULLAGINO;
1210
1211 lptr = &btree_curs->level[0];
1212
1213 for (i = 0; i < lptr->num_blocks; i++) {
1214 /*
1215 * block initialization, lay in block header
1216 */
1217 lptr->buf_p->b_ops = ops;
1218 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1219 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1220 libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
1221
1222 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1223 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
1224 (lptr->modulo > 0));
1225
1226 if (lptr->modulo > 0)
1227 lptr->modulo--;
1228
1229 if (lptr->num_recs_pb > 0)
1230 prop_ino_cursor(mp, agno, btree_curs, btnum,
1231 ino_rec->ino_startnum, 0);
1232
1233 bt_rec = (xfs_inobt_rec_t *)
1234 ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
1235 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1236 ASSERT(ino_rec != NULL);
1237 bt_rec[j].ir_startino =
1238 cpu_to_be32(ino_rec->ino_startnum);
1239 bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free);
1240
1241 inocnt = finocnt = 0;
1242 for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) {
1243 ASSERT(is_inode_confirmed(ino_rec, k));
1244
1245 if (is_inode_sparse(ino_rec, k))
1246 continue;
1247 if (is_inode_free(ino_rec, k))
1248 finocnt++;
1249 inocnt++;
1250 }
1251
1252 /*
1253 * Set the freecount and check whether we need to update
1254 * the sparse format fields. Otherwise, skip to the next
1255 * record.
1256 */
1257 inorec_set_freecount(mp, &bt_rec[j], finocnt);
1258 if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
1259 goto nextrec;
1260
1261 /*
1262 * Convert the 64-bit in-core sparse inode state to the
1263 * 16-bit on-disk holemask.
1264 */
1265 holemask = 0;
1266 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
1267 sparse = ino_rec->ir_sparse;
1268 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
1269 if (sparse & spmask) {
1270 ASSERT((sparse & spmask) == spmask);
1271 holemask |= (1 << k);
1272 } else
1273 ASSERT((sparse & spmask) == 0);
1274 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
1275 }
1276
1277 bt_rec[j].ir_u.sp.ir_count = inocnt;
1278 bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask);
1279
1280 nextrec:
1281 freecount += finocnt;
1282 count += inocnt;
1283
1284 if (btnum == XFS_BTNUM_FINO)
1285 ino_rec = next_free_ino_rec(ino_rec);
1286 else
1287 ino_rec = next_ino_rec(ino_rec);
1288 }
1289
1290 if (ino_rec != NULL) {
1291 /*
1292 * get next leaf level block
1293 */
1294 if (lptr->prev_buf_p != NULL) {
1295 #ifdef XR_BLD_INO_TRACE
1296 fprintf(stderr, "writing inobt agbno %u\n",
1297 lptr->prev_agbno);
1298 #endif
1299 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1300 libxfs_writebuf(lptr->prev_buf_p, 0);
1301 }
1302 lptr->prev_buf_p = lptr->buf_p;
1303 lptr->prev_agbno = lptr->agbno;
1304 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1305 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1306
1307 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1308 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1309 XFS_FSB_TO_BB(mp, 1));
1310 }
1311 }
1312
1313 if (agi_stat) {
1314 agi_stat->first_agino = first_agino;
1315 agi_stat->count = count;
1316 agi_stat->freecount = freecount;
1317 }
1318 }
1319
1320 /* rebuild the rmap tree */
1321
1322 /*
1323 * we don't have to worry here about how chewing up free extents
1324 * may perturb things because rmap tree building happens before
1325 * freespace tree building.
1326 */
1327 static void
1328 init_rmapbt_cursor(
1329 struct xfs_mount *mp,
1330 xfs_agnumber_t agno,
1331 struct bt_status *btree_curs)
1332 {
1333 size_t num_recs;
1334 int level;
1335 struct bt_stat_level *lptr;
1336 struct bt_stat_level *p_lptr;
1337 xfs_extlen_t blocks_allocated;
1338 int maxrecs;
1339
1340 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1341 memset(btree_curs, 0, sizeof(struct bt_status));
1342 return;
1343 }
1344
1345 lptr = &btree_curs->level[0];
1346 btree_curs->init = 1;
1347 btree_curs->owner = XFS_RMAP_OWN_AG;
1348
1349 /*
1350 * build up statistics
1351 */
1352 num_recs = rmap_record_count(mp, agno);
1353 if (num_recs == 0) {
1354 /*
1355 * easy corner-case -- no rmap records
1356 */
1357 lptr->num_blocks = 1;
1358 lptr->modulo = 0;
1359 lptr->num_recs_pb = 0;
1360 lptr->num_recs_tot = 0;
1361
1362 btree_curs->num_levels = 1;
1363 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
1364
1365 setup_cursor(mp, agno, btree_curs);
1366
1367 return;
1368 }
1369
1370 /*
1371 * Leave enough slack in the rmapbt that we can insert the
1372 * metadata AG entries without too many splits.
1373 */
1374 maxrecs = mp->m_rmap_mxr[0];
1375 if (num_recs > maxrecs)
1376 maxrecs -= 10;
1377 blocks_allocated = lptr->num_blocks = howmany(num_recs, maxrecs);
1378
1379 lptr->modulo = num_recs % lptr->num_blocks;
1380 lptr->num_recs_pb = num_recs / lptr->num_blocks;
1381 lptr->num_recs_tot = num_recs;
1382 level = 1;
1383
1384 if (lptr->num_blocks > 1) {
1385 for (; btree_curs->level[level-1].num_blocks > 1
1386 && level < XFS_BTREE_MAXLEVELS;
1387 level++) {
1388 lptr = &btree_curs->level[level];
1389 p_lptr = &btree_curs->level[level - 1];
1390 lptr->num_blocks = howmany(p_lptr->num_blocks,
1391 mp->m_rmap_mxr[1]);
1392 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
1393 lptr->num_recs_pb = p_lptr->num_blocks
1394 / lptr->num_blocks;
1395 lptr->num_recs_tot = p_lptr->num_blocks;
1396
1397 blocks_allocated += lptr->num_blocks;
1398 }
1399 }
1400 ASSERT(lptr->num_blocks == 1);
1401 btree_curs->num_levels = level;
1402
1403 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
1404 = blocks_allocated;
1405
1406 setup_cursor(mp, agno, btree_curs);
1407 }
1408
1409 static void
1410 prop_rmap_cursor(
1411 struct xfs_mount *mp,
1412 xfs_agnumber_t agno,
1413 struct bt_status *btree_curs,
1414 struct xfs_rmap_irec *rm_rec,
1415 int level)
1416 {
1417 struct xfs_btree_block *bt_hdr;
1418 struct xfs_rmap_key *bt_key;
1419 xfs_rmap_ptr_t *bt_ptr;
1420 xfs_agblock_t agbno;
1421 struct bt_stat_level *lptr;
1422 const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_RMAP);
1423
1424 level++;
1425
1426 if (level >= btree_curs->num_levels)
1427 return;
1428
1429 lptr = &btree_curs->level[level];
1430 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1431
1432 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1433 /*
1434 * this only happens once to initialize the
1435 * first path up the left side of the tree
1436 * where the agbno's are already set up
1437 */
1438 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, level);
1439 }
1440
1441 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1442 lptr->num_recs_pb + (lptr->modulo > 0)) {
1443 /*
1444 * write out current prev block, grab us a new block,
1445 * and set the rightsib pointer of current block
1446 */
1447 #ifdef XR_BLD_INO_TRACE
1448 fprintf(stderr, " rmap prop agbno %d ", lptr->prev_agbno);
1449 #endif
1450 if (lptr->prev_agbno != NULLAGBLOCK) {
1451 ASSERT(lptr->prev_buf_p != NULL);
1452 libxfs_writebuf(lptr->prev_buf_p, 0);
1453 }
1454 lptr->prev_agbno = lptr->agbno;
1455 lptr->prev_buf_p = lptr->buf_p;
1456 agbno = get_next_blockaddr(agno, level, btree_curs);
1457
1458 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1459
1460 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1461 XFS_AGB_TO_DADDR(mp, agno, agbno),
1462 XFS_FSB_TO_BB(mp, 1));
1463 lptr->agbno = agbno;
1464
1465 if (lptr->modulo)
1466 lptr->modulo--;
1467
1468 /*
1469 * initialize block header
1470 */
1471 lptr->buf_p->b_ops = ops;
1472 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1473 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1474 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1475 level, 0, agno, 0);
1476
1477 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1478
1479 /*
1480 * propagate extent record for first extent in new block up
1481 */
1482 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, level);
1483 }
1484 /*
1485 * add rmap info to current block
1486 */
1487 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1488
1489 bt_key = XFS_RMAP_KEY_ADDR(bt_hdr,
1490 be16_to_cpu(bt_hdr->bb_numrecs));
1491 bt_ptr = XFS_RMAP_PTR_ADDR(bt_hdr,
1492 be16_to_cpu(bt_hdr->bb_numrecs),
1493 mp->m_rmap_mxr[1]);
1494
1495 bt_key->rm_startblock = cpu_to_be32(rm_rec->rm_startblock);
1496 bt_key->rm_owner = cpu_to_be64(rm_rec->rm_owner);
1497 bt_key->rm_offset = cpu_to_be64(rm_rec->rm_offset);
1498
1499 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1500 }
1501
1502 static void
1503 prop_rmap_highkey(
1504 struct xfs_mount *mp,
1505 xfs_agnumber_t agno,
1506 struct bt_status *btree_curs,
1507 struct xfs_rmap_irec *rm_highkey)
1508 {
1509 struct xfs_btree_block *bt_hdr;
1510 struct xfs_rmap_key *bt_key;
1511 struct bt_stat_level *lptr;
1512 struct xfs_rmap_irec key = {0};
1513 struct xfs_rmap_irec high_key;
1514 int level;
1515 int i;
1516 int numrecs;
1517
1518 high_key = *rm_highkey;
1519 for (level = 1; level < btree_curs->num_levels; level++) {
1520 lptr = &btree_curs->level[level];
1521 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1522 numrecs = be16_to_cpu(bt_hdr->bb_numrecs);
1523 bt_key = XFS_RMAP_HIGH_KEY_ADDR(bt_hdr, numrecs);
1524
1525 bt_key->rm_startblock = cpu_to_be32(high_key.rm_startblock);
1526 bt_key->rm_owner = cpu_to_be64(high_key.rm_owner);
1527 bt_key->rm_offset = cpu_to_be64(
1528 libxfs_rmap_irec_offset_pack(&high_key));
1529
1530 for (i = 1; i <= numrecs; i++) {
1531 bt_key = XFS_RMAP_HIGH_KEY_ADDR(bt_hdr, i);
1532 key.rm_startblock = be32_to_cpu(bt_key->rm_startblock);
1533 key.rm_owner = be64_to_cpu(bt_key->rm_owner);
1534 key.rm_offset = be64_to_cpu(bt_key->rm_offset);
1535 if (rmap_diffkeys(&key, &high_key) > 0)
1536 high_key = key;
1537 }
1538 }
1539 }
1540
1541 /*
1542 * rebuilds a rmap btree given a cursor.
1543 */
1544 static void
1545 build_rmap_tree(
1546 struct xfs_mount *mp,
1547 xfs_agnumber_t agno,
1548 struct bt_status *btree_curs)
1549 {
1550 xfs_agnumber_t i;
1551 xfs_agblock_t j;
1552 xfs_agblock_t agbno;
1553 struct xfs_btree_block *bt_hdr;
1554 struct xfs_rmap_irec *rm_rec;
1555 struct xfs_slab_cursor *rmap_cur;
1556 struct xfs_rmap_rec *bt_rec;
1557 struct xfs_rmap_irec highest_key = {0};
1558 struct xfs_rmap_irec hi_key = {0};
1559 struct bt_stat_level *lptr;
1560 const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_RMAP);
1561 int numrecs;
1562 int level = btree_curs->num_levels;
1563 int error;
1564
1565 highest_key.rm_flags = 0;
1566 for (i = 0; i < level; i++) {
1567 lptr = &btree_curs->level[i];
1568
1569 agbno = get_next_blockaddr(agno, i, btree_curs);
1570 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1571 XFS_AGB_TO_DADDR(mp, agno, agbno),
1572 XFS_FSB_TO_BB(mp, 1));
1573
1574 if (i == btree_curs->num_levels - 1)
1575 btree_curs->root = agbno;
1576
1577 lptr->agbno = agbno;
1578 lptr->prev_agbno = NULLAGBLOCK;
1579 lptr->prev_buf_p = NULL;
1580 /*
1581 * initialize block header
1582 */
1583
1584 lptr->buf_p->b_ops = ops;
1585 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1586 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1587 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1588 i, 0, agno, 0);
1589 }
1590
1591 /*
1592 * run along leaf, setting up records. as we have to switch
1593 * blocks, call the prop_rmap_cursor routine to set up the new
1594 * pointers for the parent. that can recurse up to the root
1595 * if required. set the sibling pointers for leaf level here.
1596 */
1597 error = rmap_init_cursor(agno, &rmap_cur);
1598 if (error)
1599 do_error(
1600 _("Insufficient memory to construct reverse-map cursor."));
1601 rm_rec = pop_slab_cursor(rmap_cur);
1602 lptr = &btree_curs->level[0];
1603
1604 for (i = 0; i < lptr->num_blocks; i++) {
1605 numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
1606 ASSERT(rm_rec != NULL || numrecs == 0);
1607
1608 /*
1609 * block initialization, lay in block header
1610 */
1611 lptr->buf_p->b_ops = ops;
1612 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1613 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1614 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1615 0, 0, agno, 0);
1616
1617 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1618 bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
1619
1620 if (lptr->modulo > 0)
1621 lptr->modulo--;
1622
1623 if (lptr->num_recs_pb > 0) {
1624 ASSERT(rm_rec != NULL);
1625 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, 0);
1626 }
1627
1628 bt_rec = (struct xfs_rmap_rec *)
1629 ((char *)bt_hdr + XFS_RMAP_BLOCK_LEN);
1630 highest_key.rm_startblock = 0;
1631 highest_key.rm_owner = 0;
1632 highest_key.rm_offset = 0;
1633 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1634 ASSERT(rm_rec != NULL);
1635 bt_rec[j].rm_startblock =
1636 cpu_to_be32(rm_rec->rm_startblock);
1637 bt_rec[j].rm_blockcount =
1638 cpu_to_be32(rm_rec->rm_blockcount);
1639 bt_rec[j].rm_owner = cpu_to_be64(rm_rec->rm_owner);
1640 bt_rec[j].rm_offset = cpu_to_be64(
1641 libxfs_rmap_irec_offset_pack(rm_rec));
1642 rmap_high_key_from_rec(rm_rec, &hi_key);
1643 if (rmap_diffkeys(&hi_key, &highest_key) > 0)
1644 highest_key = hi_key;
1645
1646 rm_rec = pop_slab_cursor(rmap_cur);
1647 }
1648
1649 /* Now go set the parent key */
1650 prop_rmap_highkey(mp, agno, btree_curs, &highest_key);
1651
1652 if (rm_rec != NULL) {
1653 /*
1654 * get next leaf level block
1655 */
1656 if (lptr->prev_buf_p != NULL) {
1657 #ifdef XR_BLD_RL_TRACE
1658 fprintf(stderr, "writing rmapbt agbno %u\n",
1659 lptr->prev_agbno);
1660 #endif
1661 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1662 libxfs_writebuf(lptr->prev_buf_p, 0);
1663 }
1664 lptr->prev_buf_p = lptr->buf_p;
1665 lptr->prev_agbno = lptr->agbno;
1666 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1667 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1668
1669 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1670 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1671 XFS_FSB_TO_BB(mp, 1));
1672 }
1673 }
1674 free_slab_cursor(&rmap_cur);
1675 }
1676
1677 /* rebuild the refcount tree */
1678
1679 /*
1680 * we don't have to worry here about how chewing up free extents
1681 * may perturb things because reflink tree building happens before
1682 * freespace tree building.
1683 */
1684 static void
1685 init_refc_cursor(
1686 struct xfs_mount *mp,
1687 xfs_agnumber_t agno,
1688 struct bt_status *btree_curs)
1689 {
1690 size_t num_recs;
1691 int level;
1692 struct bt_stat_level *lptr;
1693 struct bt_stat_level *p_lptr;
1694 xfs_extlen_t blocks_allocated;
1695
1696 if (!xfs_sb_version_hasreflink(&mp->m_sb)) {
1697 memset(btree_curs, 0, sizeof(struct bt_status));
1698 return;
1699 }
1700
1701 lptr = &btree_curs->level[0];
1702 btree_curs->init = 1;
1703 btree_curs->owner = XFS_RMAP_OWN_REFC;
1704
1705 /*
1706 * build up statistics
1707 */
1708 num_recs = refcount_record_count(mp, agno);
1709 if (num_recs == 0) {
1710 /*
1711 * easy corner-case -- no refcount records
1712 */
1713 lptr->num_blocks = 1;
1714 lptr->modulo = 0;
1715 lptr->num_recs_pb = 0;
1716 lptr->num_recs_tot = 0;
1717
1718 btree_curs->num_levels = 1;
1719 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
1720
1721 setup_cursor(mp, agno, btree_curs);
1722
1723 return;
1724 }
1725
1726 blocks_allocated = lptr->num_blocks = howmany(num_recs,
1727 mp->m_refc_mxr[0]);
1728
1729 lptr->modulo = num_recs % lptr->num_blocks;
1730 lptr->num_recs_pb = num_recs / lptr->num_blocks;
1731 lptr->num_recs_tot = num_recs;
1732 level = 1;
1733
1734 if (lptr->num_blocks > 1) {
1735 for (; btree_curs->level[level-1].num_blocks > 1
1736 && level < XFS_BTREE_MAXLEVELS;
1737 level++) {
1738 lptr = &btree_curs->level[level];
1739 p_lptr = &btree_curs->level[level - 1];
1740 lptr->num_blocks = howmany(p_lptr->num_blocks,
1741 mp->m_refc_mxr[1]);
1742 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
1743 lptr->num_recs_pb = p_lptr->num_blocks
1744 / lptr->num_blocks;
1745 lptr->num_recs_tot = p_lptr->num_blocks;
1746
1747 blocks_allocated += lptr->num_blocks;
1748 }
1749 }
1750 ASSERT(lptr->num_blocks == 1);
1751 btree_curs->num_levels = level;
1752
1753 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
1754 = blocks_allocated;
1755
1756 setup_cursor(mp, agno, btree_curs);
1757 }
1758
1759 static void
1760 prop_refc_cursor(
1761 struct xfs_mount *mp,
1762 xfs_agnumber_t agno,
1763 struct bt_status *btree_curs,
1764 xfs_agblock_t startbno,
1765 int level)
1766 {
1767 struct xfs_btree_block *bt_hdr;
1768 struct xfs_refcount_key *bt_key;
1769 xfs_refcount_ptr_t *bt_ptr;
1770 xfs_agblock_t agbno;
1771 struct bt_stat_level *lptr;
1772 const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_REFC);
1773
1774 level++;
1775
1776 if (level >= btree_curs->num_levels)
1777 return;
1778
1779 lptr = &btree_curs->level[level];
1780 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1781
1782 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1783 /*
1784 * this only happens once to initialize the
1785 * first path up the left side of the tree
1786 * where the agbno's are already set up
1787 */
1788 prop_refc_cursor(mp, agno, btree_curs, startbno, level);
1789 }
1790
1791 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1792 lptr->num_recs_pb + (lptr->modulo > 0)) {
1793 /*
1794 * write out current prev block, grab us a new block,
1795 * and set the rightsib pointer of current block
1796 */
1797 #ifdef XR_BLD_INO_TRACE
1798 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1799 #endif
1800 if (lptr->prev_agbno != NULLAGBLOCK) {
1801 ASSERT(lptr->prev_buf_p != NULL);
1802 libxfs_writebuf(lptr->prev_buf_p, 0);
1803 }
1804 lptr->prev_agbno = lptr->agbno;
1805 lptr->prev_buf_p = lptr->buf_p;
1806 agbno = get_next_blockaddr(agno, level, btree_curs);
1807
1808 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1809
1810 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1811 XFS_AGB_TO_DADDR(mp, agno, agbno),
1812 XFS_FSB_TO_BB(mp, 1));
1813 lptr->agbno = agbno;
1814
1815 if (lptr->modulo)
1816 lptr->modulo--;
1817
1818 /*
1819 * initialize block header
1820 */
1821 lptr->buf_p->b_ops = ops;
1822 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1823 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1824 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1825 level, 0, agno, 0);
1826
1827 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1828
1829 /*
1830 * propagate extent record for first extent in new block up
1831 */
1832 prop_refc_cursor(mp, agno, btree_curs, startbno, level);
1833 }
1834 /*
1835 * add inode info to current block
1836 */
1837 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1838
1839 bt_key = XFS_REFCOUNT_KEY_ADDR(bt_hdr,
1840 be16_to_cpu(bt_hdr->bb_numrecs));
1841 bt_ptr = XFS_REFCOUNT_PTR_ADDR(bt_hdr,
1842 be16_to_cpu(bt_hdr->bb_numrecs),
1843 mp->m_refc_mxr[1]);
1844
1845 bt_key->rc_startblock = cpu_to_be32(startbno);
1846 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1847 }
1848
1849 /*
1850 * rebuilds a refcount btree given a cursor.
1851 */
1852 static void
1853 build_refcount_tree(
1854 struct xfs_mount *mp,
1855 xfs_agnumber_t agno,
1856 struct bt_status *btree_curs)
1857 {
1858 xfs_agnumber_t i;
1859 xfs_agblock_t j;
1860 xfs_agblock_t agbno;
1861 struct xfs_btree_block *bt_hdr;
1862 struct xfs_refcount_irec *refc_rec;
1863 struct xfs_slab_cursor *refc_cur;
1864 struct xfs_refcount_rec *bt_rec;
1865 struct bt_stat_level *lptr;
1866 const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_REFC);
1867 int numrecs;
1868 int level = btree_curs->num_levels;
1869 int error;
1870
1871 for (i = 0; i < level; i++) {
1872 lptr = &btree_curs->level[i];
1873
1874 agbno = get_next_blockaddr(agno, i, btree_curs);
1875 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1876 XFS_AGB_TO_DADDR(mp, agno, agbno),
1877 XFS_FSB_TO_BB(mp, 1));
1878
1879 if (i == btree_curs->num_levels - 1)
1880 btree_curs->root = agbno;
1881
1882 lptr->agbno = agbno;
1883 lptr->prev_agbno = NULLAGBLOCK;
1884 lptr->prev_buf_p = NULL;
1885 /*
1886 * initialize block header
1887 */
1888
1889 lptr->buf_p->b_ops = ops;
1890 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1891 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1892 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1893 i, 0, agno, 0);
1894 }
1895
1896 /*
1897 * run along leaf, setting up records. as we have to switch
1898 * blocks, call the prop_refc_cursor routine to set up the new
1899 * pointers for the parent. that can recurse up to the root
1900 * if required. set the sibling pointers for leaf level here.
1901 */
1902 error = init_refcount_cursor(agno, &refc_cur);
1903 if (error)
1904 do_error(
1905 _("Insufficient memory to construct refcount cursor."));
1906 refc_rec = pop_slab_cursor(refc_cur);
1907 lptr = &btree_curs->level[0];
1908
1909 for (i = 0; i < lptr->num_blocks; i++) {
1910 numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
1911 ASSERT(refc_rec != NULL || numrecs == 0);
1912
1913 /*
1914 * block initialization, lay in block header
1915 */
1916 lptr->buf_p->b_ops = ops;
1917 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1918 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1919 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1920 0, 0, agno, 0);
1921
1922 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1923 bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
1924
1925 if (lptr->modulo > 0)
1926 lptr->modulo--;
1927
1928 if (lptr->num_recs_pb > 0)
1929 prop_refc_cursor(mp, agno, btree_curs,
1930 refc_rec->rc_startblock, 0);
1931
1932 bt_rec = (struct xfs_refcount_rec *)
1933 ((char *)bt_hdr + XFS_REFCOUNT_BLOCK_LEN);
1934 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1935 ASSERT(refc_rec != NULL);
1936 bt_rec[j].rc_startblock =
1937 cpu_to_be32(refc_rec->rc_startblock);
1938 bt_rec[j].rc_blockcount =
1939 cpu_to_be32(refc_rec->rc_blockcount);
1940 bt_rec[j].rc_refcount = cpu_to_be32(refc_rec->rc_refcount);
1941
1942 refc_rec = pop_slab_cursor(refc_cur);
1943 }
1944
1945 if (refc_rec != NULL) {
1946 /*
1947 * get next leaf level block
1948 */
1949 if (lptr->prev_buf_p != NULL) {
1950 #ifdef XR_BLD_RL_TRACE
1951 fprintf(stderr, "writing refcntbt agbno %u\n",
1952 lptr->prev_agbno);
1953 #endif
1954 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1955 libxfs_writebuf(lptr->prev_buf_p, 0);
1956 }
1957 lptr->prev_buf_p = lptr->buf_p;
1958 lptr->prev_agbno = lptr->agbno;
1959 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1960 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1961
1962 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1963 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1964 XFS_FSB_TO_BB(mp, 1));
1965 }
1966 }
1967 free_slab_cursor(&refc_cur);
1968 }
1969
1970 /*
1971 * build both the agf and the agfl for an agno given both
1972 * btree cursors.
1973 *
1974 * XXX: yet more common code that can be shared with mkfs/growfs.
1975 */
1976 static void
1977 build_agf_agfl(
1978 struct xfs_mount *mp,
1979 xfs_agnumber_t agno,
1980 struct bt_status *bno_bt,
1981 struct bt_status *bcnt_bt,
1982 xfs_extlen_t freeblks, /* # free blocks in tree */
1983 int lostblocks, /* # blocks that will be lost */
1984 struct bt_status *rmap_bt,
1985 struct bt_status *refcnt_bt,
1986 struct xfs_slab *lost_fsb)
1987 {
1988 struct extent_tree_node *ext_ptr;
1989 struct xfs_buf *agf_buf, *agfl_buf;
1990 int i;
1991 struct xfs_agfl *agfl;
1992 struct xfs_agf *agf;
1993 xfs_fsblock_t fsb;
1994 __be32 *freelist;
1995 int error;
1996
1997 agf_buf = libxfs_getbuf(mp->m_dev,
1998 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1999 mp->m_sb.sb_sectsize/BBSIZE);
2000 agf_buf->b_ops = &xfs_agf_buf_ops;
2001 agf = XFS_BUF_TO_AGF(agf_buf);
2002 memset(agf, 0, mp->m_sb.sb_sectsize);
2003
2004 #ifdef XR_BLD_FREE_TRACE
2005 fprintf(stderr, "agf = %p, agf_buf->b_addr = %p\n",
2006 agf, agf_buf->b_addr);
2007 #endif
2008
2009 /*
2010 * set up fixed part of agf
2011 */
2012 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
2013 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
2014 agf->agf_seqno = cpu_to_be32(agno);
2015
2016 if (agno < mp->m_sb.sb_agcount - 1)
2017 agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
2018 else
2019 agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
2020 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
2021
2022 agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root);
2023 agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels);
2024 agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root);
2025 agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels);
2026 agf->agf_roots[XFS_BTNUM_RMAP] = cpu_to_be32(rmap_bt->root);
2027 agf->agf_levels[XFS_BTNUM_RMAP] = cpu_to_be32(rmap_bt->num_levels);
2028 agf->agf_freeblks = cpu_to_be32(freeblks);
2029 agf->agf_rmap_blocks = cpu_to_be32(rmap_bt->num_tot_blocks -
2030 rmap_bt->num_free_blocks);
2031 agf->agf_refcount_root = cpu_to_be32(refcnt_bt->root);
2032 agf->agf_refcount_level = cpu_to_be32(refcnt_bt->num_levels);
2033 agf->agf_refcount_blocks = cpu_to_be32(refcnt_bt->num_tot_blocks -
2034 refcnt_bt->num_free_blocks);
2035
2036 /*
2037 * Count and record the number of btree blocks consumed if required.
2038 */
2039 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
2040 unsigned int blks;
2041 /*
2042 * Don't count the root blocks as they are already
2043 * accounted for.
2044 */
2045 blks = (bno_bt->num_tot_blocks - bno_bt->num_free_blocks) +
2046 (bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) -
2047 2;
2048 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2049 blks += rmap_bt->num_tot_blocks - rmap_bt->num_free_blocks - 1;
2050 agf->agf_btreeblks = cpu_to_be32(blks);
2051 #ifdef XR_BLD_FREE_TRACE
2052 fprintf(stderr, "agf->agf_btreeblks = %u\n",
2053 be32_to_cpu(agf->agf_btreeblks));
2054 #endif
2055 }
2056
2057 #ifdef XR_BLD_FREE_TRACE
2058 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
2059 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
2060 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
2061 XFS_BTNUM_BNO,
2062 XFS_BTNUM_CNT);
2063 #endif
2064
2065 if (xfs_sb_version_hascrc(&mp->m_sb))
2066 platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
2067
2068 /* initialise the AGFL, then fill it if there are blocks left over. */
2069 agfl_buf = libxfs_getbuf(mp->m_dev,
2070 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
2071 mp->m_sb.sb_sectsize/BBSIZE);
2072 agfl_buf->b_ops = &xfs_agfl_buf_ops;
2073 agfl = XFS_BUF_TO_AGFL(agfl_buf);
2074
2075 /* setting to 0xff results in initialisation to NULLAGBLOCK */
2076 memset(agfl, 0xff, mp->m_sb.sb_sectsize);
2077 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2078 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
2079 agfl->agfl_seqno = cpu_to_be32(agno);
2080 platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
2081 for (i = 0; i < libxfs_agfl_size(mp); i++)
2082 agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
2083 }
2084 freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);
2085
2086 /*
2087 * do we have left-over blocks in the btree cursors that should
2088 * be used to fill the AGFL?
2089 */
2090 if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) {
2091 /*
2092 * yes, now grab as many blocks as we can
2093 */
2094 i = 0;
2095 while (bno_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
2096 {
2097 freelist[i] = cpu_to_be32(
2098 get_next_blockaddr(agno, 0, bno_bt));
2099 i++;
2100 }
2101
2102 while (bcnt_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
2103 {
2104 freelist[i] = cpu_to_be32(
2105 get_next_blockaddr(agno, 0, bcnt_bt));
2106 i++;
2107 }
2108 /*
2109 * now throw the rest of the blocks away and complain
2110 */
2111 while (bno_bt->num_free_blocks > 0) {
2112 fsb = XFS_AGB_TO_FSB(mp, agno,
2113 get_next_blockaddr(agno, 0, bno_bt));
2114 error = slab_add(lost_fsb, &fsb);
2115 if (error)
2116 do_error(
2117 _("Insufficient memory saving lost blocks.\n"));
2118 }
2119 while (bcnt_bt->num_free_blocks > 0) {
2120 fsb = XFS_AGB_TO_FSB(mp, agno,
2121 get_next_blockaddr(agno, 0, bcnt_bt));
2122 error = slab_add(lost_fsb, &fsb);
2123 if (error)
2124 do_error(
2125 _("Insufficient memory saving lost blocks.\n"));
2126 }
2127
2128 agf->agf_flfirst = 0;
2129 agf->agf_fllast = cpu_to_be32(i - 1);
2130 agf->agf_flcount = cpu_to_be32(i);
2131 rmap_store_agflcount(mp, agno, i);
2132
2133 #ifdef XR_BLD_FREE_TRACE
2134 fprintf(stderr, "writing agfl for ag %u\n", agno);
2135 #endif
2136
2137 } else {
2138 agf->agf_flfirst = 0;
2139 agf->agf_fllast = cpu_to_be32(libxfs_agfl_size(mp) - 1);
2140 agf->agf_flcount = 0;
2141 }
2142
2143 libxfs_writebuf(agfl_buf, 0);
2144
2145 ext_ptr = findbiggest_bcnt_extent(agno);
2146 agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
2147 ext_ptr->ex_blockcount : 0);
2148
2149 ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
2150 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
2151 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
2152 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]));
2153 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
2154 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
2155
2156 libxfs_writebuf(agf_buf, 0);
2157
2158 /*
2159 * now fix up the free list appropriately
2160 */
2161 fix_freelist(mp, agno, true);
2162
2163 #ifdef XR_BLD_FREE_TRACE
2164 fprintf(stderr, "wrote agf for ag %u\n", agno);
2165 #endif
2166 }
2167
2168 /*
2169 * update the superblock counters, sync the sb version numbers and
2170 * feature bits to the filesystem, and sync up the on-disk superblock
2171 * to match the incore superblock.
2172 */
2173 static void
2174 sync_sb(xfs_mount_t *mp)
2175 {
2176 xfs_buf_t *bp;
2177
2178 bp = libxfs_getsb(mp, 0);
2179 if (!bp)
2180 do_error(_("couldn't get superblock\n"));
2181
2182 mp->m_sb.sb_icount = sb_icount;
2183 mp->m_sb.sb_ifree = sb_ifree;
2184 mp->m_sb.sb_fdblocks = sb_fdblocks;
2185 mp->m_sb.sb_frextents = sb_frextents;
2186
2187 update_sb_version(mp);
2188
2189 libxfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
2190 libxfs_writebuf(bp, 0);
2191 }
2192
2193 /*
2194 * make sure the root and realtime inodes show up allocated
2195 * even if they've been freed. they get reinitialized in phase6.
2196 */
2197 static void
2198 keep_fsinos(xfs_mount_t *mp)
2199 {
2200 ino_tree_node_t *irec;
2201 int i;
2202
2203 irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
2204 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
2205
2206 for (i = 0; i < 3; i++)
2207 set_inode_used(irec, i);
2208 }
2209
2210 static void
2211 phase5_func(
2212 xfs_mount_t *mp,
2213 xfs_agnumber_t agno,
2214 struct xfs_slab *lost_fsb)
2215 {
2216 uint64_t num_inos;
2217 uint64_t num_free_inos;
2218 uint64_t finobt_num_inos;
2219 uint64_t finobt_num_free_inos;
2220 bt_status_t bno_btree_curs;
2221 bt_status_t bcnt_btree_curs;
2222 bt_status_t ino_btree_curs;
2223 bt_status_t fino_btree_curs;
2224 bt_status_t rmap_btree_curs;
2225 bt_status_t refcnt_btree_curs;
2226 int extra_blocks = 0;
2227 uint num_freeblocks;
2228 xfs_extlen_t freeblks1;
2229 #ifdef DEBUG
2230 xfs_extlen_t freeblks2;
2231 #endif
2232 xfs_agblock_t num_extents;
2233 struct agi_stat agi_stat = {0,};
2234 int error;
2235
2236 if (verbose)
2237 do_log(_(" - agno = %d\n"), agno);
2238
2239 {
2240 /*
2241 * build up incore bno and bcnt extent btrees
2242 */
2243 num_extents = mk_incore_fstree(mp, agno);
2244
2245 #ifdef XR_BLD_FREE_TRACE
2246 fprintf(stderr, "# of bno extents is %d\n",
2247 count_bno_extents(agno));
2248 #endif
2249
2250 if (num_extents == 0) {
2251 /*
2252 * XXX - what we probably should do here is pick an
2253 * inode for a regular file in the allocation group
2254 * that has space allocated and shoot it by traversing
2255 * the bmap list and putting all its extents on the
2256 * incore freespace trees, clearing the inode,
2257 * and clearing the in-use bit in the incore inode
2258 * tree. Then try mk_incore_fstree() again.
2259 */
2260 do_error(_("unable to rebuild AG %u. "
2261 "Not enough free space in on-disk AG.\n"),
2262 agno);
2263 }
2264
2265 /*
2266 * ok, now set up the btree cursors for the
2267 * on-disk btrees (includs pre-allocating all
2268 * required blocks for the trees themselves)
2269 */
2270 init_ino_cursor(mp, agno, &ino_btree_curs, &num_inos,
2271 &num_free_inos, 0);
2272
2273 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2274 init_ino_cursor(mp, agno, &fino_btree_curs,
2275 &finobt_num_inos, &finobt_num_free_inos,
2276 1);
2277
2278 sb_icount_ag[agno] += num_inos;
2279 sb_ifree_ag[agno] += num_free_inos;
2280
2281 /*
2282 * Set up the btree cursors for the on-disk rmap btrees,
2283 * which includes pre-allocating all required blocks.
2284 */
2285 init_rmapbt_cursor(mp, agno, &rmap_btree_curs);
2286
2287 /*
2288 * Set up the btree cursors for the on-disk refcount btrees,
2289 * which includes pre-allocating all required blocks.
2290 */
2291 init_refc_cursor(mp, agno, &refcnt_btree_curs);
2292
2293 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
2294 /*
2295 * lose two blocks per AG -- the space tree roots
2296 * are counted as allocated since the space trees
2297 * always have roots
2298 */
2299 sb_fdblocks_ag[agno] += num_freeblocks - 2;
2300
2301 if (num_extents == 0) {
2302 /*
2303 * XXX - what we probably should do here is pick an
2304 * inode for a regular file in the allocation group
2305 * that has space allocated and shoot it by traversing
2306 * the bmap list and putting all its extents on the
2307 * incore freespace trees, clearing the inode,
2308 * and clearing the in-use bit in the incore inode
2309 * tree. Then try mk_incore_fstree() again.
2310 */
2311 do_error(
2312 _("unable to rebuild AG %u. No free space.\n"), agno);
2313 }
2314
2315 #ifdef XR_BLD_FREE_TRACE
2316 fprintf(stderr, "# of bno extents is %d\n", num_extents);
2317 #endif
2318
2319 /*
2320 * track blocks that we might really lose
2321 */
2322 extra_blocks = calculate_freespace_cursor(mp, agno,
2323 &num_extents, &bno_btree_curs);
2324
2325 /*
2326 * freespace btrees live in the "free space" but
2327 * the filesystem treats AGFL blocks as allocated
2328 * since they aren't described by the freespace trees
2329 */
2330
2331 /*
2332 * see if we can fit all the extra blocks into the AGFL
2333 */
2334 extra_blocks = (extra_blocks - libxfs_agfl_size(mp) > 0)
2335 ? extra_blocks - libxfs_agfl_size(mp)
2336 : 0;
2337
2338 if (extra_blocks > 0)
2339 sb_fdblocks_ag[agno] -= extra_blocks;
2340
2341 bcnt_btree_curs = bno_btree_curs;
2342
2343 bno_btree_curs.owner = XFS_RMAP_OWN_AG;
2344 bcnt_btree_curs.owner = XFS_RMAP_OWN_AG;
2345 setup_cursor(mp, agno, &bno_btree_curs);
2346 setup_cursor(mp, agno, &bcnt_btree_curs);
2347
2348 #ifdef XR_BLD_FREE_TRACE
2349 fprintf(stderr, "# of bno extents is %d\n",
2350 count_bno_extents(agno));
2351 fprintf(stderr, "# of bcnt extents is %d\n",
2352 count_bcnt_extents(agno));
2353 #endif
2354
2355 /*
2356 * now rebuild the freespace trees
2357 */
2358 freeblks1 = build_freespace_tree(mp, agno,
2359 &bno_btree_curs, XFS_BTNUM_BNO);
2360 #ifdef XR_BLD_FREE_TRACE
2361 fprintf(stderr, "# of free blocks == %d\n", freeblks1);
2362 #endif
2363 write_cursor(&bno_btree_curs);
2364
2365 #ifdef DEBUG
2366 freeblks2 = build_freespace_tree(mp, agno,
2367 &bcnt_btree_curs, XFS_BTNUM_CNT);
2368 #else
2369 (void) build_freespace_tree(mp, agno,
2370 &bcnt_btree_curs, XFS_BTNUM_CNT);
2371 #endif
2372 write_cursor(&bcnt_btree_curs);
2373
2374 ASSERT(freeblks1 == freeblks2);
2375
2376 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
2377 build_rmap_tree(mp, agno, &rmap_btree_curs);
2378 write_cursor(&rmap_btree_curs);
2379 sb_fdblocks_ag[agno] += (rmap_btree_curs.num_tot_blocks -
2380 rmap_btree_curs.num_free_blocks) - 1;
2381 }
2382
2383 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2384 build_refcount_tree(mp, agno, &refcnt_btree_curs);
2385 write_cursor(&refcnt_btree_curs);
2386 }
2387
2388 /*
2389 * set up agf and agfl
2390 */
2391 build_agf_agfl(mp, agno, &bno_btree_curs,
2392 &bcnt_btree_curs, freeblks1, extra_blocks,
2393 &rmap_btree_curs, &refcnt_btree_curs, lost_fsb);
2394 /*
2395 * build inode allocation tree.
2396 */
2397 build_ino_tree(mp, agno, &ino_btree_curs, XFS_BTNUM_INO,
2398 &agi_stat);
2399 write_cursor(&ino_btree_curs);
2400
2401 /*
2402 * build free inode tree
2403 */
2404 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2405 build_ino_tree(mp, agno, &fino_btree_curs,
2406 XFS_BTNUM_FINO, NULL);
2407 write_cursor(&fino_btree_curs);
2408 }
2409
2410 /* build the agi */
2411 build_agi(mp, agno, &ino_btree_curs, &fino_btree_curs,
2412 &agi_stat);
2413
2414 /*
2415 * tear down cursors
2416 */
2417 finish_cursor(&bno_btree_curs);
2418 finish_cursor(&ino_btree_curs);
2419 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2420 finish_cursor(&rmap_btree_curs);
2421 if (xfs_sb_version_hasreflink(&mp->m_sb))
2422 finish_cursor(&refcnt_btree_curs);
2423 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2424 finish_cursor(&fino_btree_curs);
2425 finish_cursor(&bcnt_btree_curs);
2426
2427 /*
2428 * Put the per-AG btree rmap data into the rmapbt
2429 */
2430 error = rmap_store_ag_btree_rec(mp, agno);
2431 if (error)
2432 do_error(
2433 _("unable to add AG %u reverse-mapping data to btree.\n"), agno);
2434
2435 /*
2436 * release the incore per-AG bno/bcnt trees so
2437 * the extent nodes can be recycled
2438 */
2439 release_agbno_extent_tree(agno);
2440 release_agbcnt_extent_tree(agno);
2441 }
2442 PROG_RPT_INC(prog_rpt_done[agno], 1);
2443 }
2444
2445 /* Inject lost blocks back into the filesystem. */
2446 static int
2447 inject_lost_blocks(
2448 struct xfs_mount *mp,
2449 struct xfs_slab *lost_fsbs)
2450 {
2451 struct xfs_trans *tp = NULL;
2452 struct xfs_slab_cursor *cur = NULL;
2453 xfs_fsblock_t *fsb;
2454 int error;
2455
2456 error = init_slab_cursor(lost_fsbs, NULL, &cur);
2457 if (error)
2458 return error;
2459
2460 while ((fsb = pop_slab_cursor(cur)) != NULL) {
2461 error = -libxfs_trans_alloc_rollable(mp, 16, &tp);
2462 if (error)
2463 goto out_cancel;
2464
2465 error = -libxfs_free_extent(tp, *fsb, 1, &XFS_RMAP_OINFO_AG,
2466 XFS_AG_RESV_NONE);
2467 if (error)
2468 goto out_cancel;
2469
2470 error = -libxfs_trans_commit(tp);
2471 if (error)
2472 goto out_cancel;
2473 tp = NULL;
2474 }
2475
2476 out_cancel:
2477 if (tp)
2478 libxfs_trans_cancel(tp);
2479 free_slab_cursor(&cur);
2480 return error;
2481 }
2482
2483 void
2484 phase5(xfs_mount_t *mp)
2485 {
2486 struct xfs_slab *lost_fsb;
2487 xfs_agnumber_t agno;
2488 int error;
2489
2490 do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
2491 set_progress_msg(PROG_FMT_REBUILD_AG, (uint64_t)glob_agcount);
2492
2493 #ifdef XR_BLD_FREE_TRACE
2494 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
2495 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
2496 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
2497 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
2498 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
2499 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
2500 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
2501 XR_INOBT_BLOCK_MAXRECS(mp, 0));
2502 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
2503 XR_INOBT_BLOCK_MAXRECS(mp, 1));
2504 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
2505 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
2506 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
2507 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
2508 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
2509 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
2510 #endif
2511 /*
2512 * make sure the root and realtime inodes show up allocated
2513 */
2514 keep_fsinos(mp);
2515
2516 /* allocate per ag counters */
2517 sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2518 if (sb_icount_ag == NULL)
2519 do_error(_("cannot alloc sb_icount_ag buffers\n"));
2520
2521 sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2522 if (sb_ifree_ag == NULL)
2523 do_error(_("cannot alloc sb_ifree_ag buffers\n"));
2524
2525 sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2526 if (sb_fdblocks_ag == NULL)
2527 do_error(_("cannot alloc sb_fdblocks_ag buffers\n"));
2528
2529 error = init_slab(&lost_fsb, sizeof(xfs_fsblock_t));
2530 if (error)
2531 do_error(_("cannot alloc lost block slab\n"));
2532
2533 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++)
2534 phase5_func(mp, agno, lost_fsb);
2535
2536 print_final_rpt();
2537
2538 /* aggregate per ag counters */
2539 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2540 sb_icount += sb_icount_ag[agno];
2541 sb_ifree += sb_ifree_ag[agno];
2542 sb_fdblocks += sb_fdblocks_ag[agno];
2543 }
2544 free(sb_icount_ag);
2545 free(sb_ifree_ag);
2546 free(sb_fdblocks_ag);
2547
2548 if (mp->m_sb.sb_rblocks) {
2549 do_log(
2550 _(" - generate realtime summary info and bitmap...\n"));
2551 rtinit(mp);
2552 generate_rtinfo(mp, btmcompute, sumcompute);
2553 }
2554
2555 do_log(_(" - reset superblock...\n"));
2556
2557 /*
2558 * sync superblock counter and set version bits correctly
2559 */
2560 sync_sb(mp);
2561
2562 error = inject_lost_blocks(mp, lost_fsb);
2563 if (error)
2564 do_error(_("Unable to reinsert lost blocks into filesystem.\n"));
2565 free_slab(&lost_fsb);
2566
2567 bad_ino_btree = 0;
2568
2569 }