]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
xfs: better xfs_trans_alloc interface
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libxfs.h"
20 #include "avl.h"
21 #include "globals.h"
22 #include "agheader.h"
23 #include "incore.h"
24 #include "protos.h"
25 #include "err_protos.h"
26 #include "dinode.h"
27 #include "rt.h"
28 #include "versions.h"
29 #include "threads.h"
30 #include "progress.h"
31
32 /*
33 * we maintain the current slice (path from root to leaf)
34 * of the btree incore. when we need a new block, we ask
35 * the block allocator for the address of a block on that
36 * level, map the block in, and set up the appropriate
37 * pointers (child, silbing, etc.) and keys that should
38 * point to the new block.
39 */
40 typedef struct bt_stat_level {
41 /*
42 * set in setup_cursor routine and maintained in the tree-building
43 * routines
44 */
45 xfs_buf_t *buf_p; /* 2 buffer pointers to ... */
46 xfs_buf_t *prev_buf_p;
47 xfs_agblock_t agbno; /* current block being filled */
48 xfs_agblock_t prev_agbno; /* previous block */
49 /*
50 * set in calculate/init cursor routines for each btree level
51 */
52 int num_recs_tot; /* # tree recs in level */
53 int num_blocks; /* # tree blocks in level */
54 int num_recs_pb; /* num_recs_tot / num_blocks */
55 int modulo; /* num_recs_tot % num_blocks */
56 } bt_stat_level_t;
57
58 typedef struct bt_status {
59 int init; /* cursor set up once? */
60 int num_levels; /* # of levels in btree */
61 xfs_extlen_t num_tot_blocks; /* # blocks alloc'ed for tree */
62 xfs_extlen_t num_free_blocks;/* # blocks currently unused */
63
64 xfs_agblock_t root; /* root block */
65 /*
66 * list of blocks to be used to set up this tree
67 * and pointer to the first unused block on the list
68 */
69 xfs_agblock_t *btree_blocks; /* block list */
70 xfs_agblock_t *free_btree_blocks; /* first unused block */
71 /*
72 * per-level status info
73 */
74 bt_stat_level_t level[XFS_BTREE_MAXLEVELS];
75 } bt_status_t;
76
77 /*
78 * extra metadata for the agi
79 */
80 struct agi_stat {
81 xfs_agino_t first_agino;
82 xfs_agino_t count;
83 xfs_agino_t freecount;
84 };
85
86 static __uint64_t *sb_icount_ag; /* allocated inodes per ag */
87 static __uint64_t *sb_ifree_ag; /* free inodes per ag */
88 static __uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
89
90 static int
91 mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
92 {
93 int in_extent;
94 int num_extents;
95 xfs_agblock_t extent_start;
96 xfs_extlen_t extent_len;
97 xfs_agblock_t agbno;
98 xfs_agblock_t ag_end;
99 uint free_blocks;
100 xfs_extlen_t blen;
101 int bstate;
102
103 /*
104 * scan the bitmap for the ag looking for continuous
105 * extents of free blocks. At this point, we know
106 * that blocks in the bitmap are either set to an
107 * "in use" state or set to unknown (0) since the
108 * bmaps were zero'ed in phase 4 and only blocks
109 * being used by inodes, inode bmaps, ag headers,
110 * and the files themselves were put into the bitmap.
111 *
112 */
113 ASSERT(agno < mp->m_sb.sb_agcount);
114
115 extent_start = extent_len = 0;
116 in_extent = 0;
117 num_extents = free_blocks = 0;
118
119 if (agno < mp->m_sb.sb_agcount - 1)
120 ag_end = mp->m_sb.sb_agblocks;
121 else
122 ag_end = mp->m_sb.sb_dblocks -
123 (xfs_rfsblock_t)mp->m_sb.sb_agblocks *
124 (mp->m_sb.sb_agcount - 1);
125
126 /*
127 * ok, now find the number of extents, keep track of the
128 * largest extent.
129 */
130 for (agbno = 0; agbno < ag_end; agbno += blen) {
131 bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
132 if (bstate < XR_E_INUSE) {
133 free_blocks += blen;
134 if (in_extent == 0) {
135 /*
136 * found the start of a free extent
137 */
138 in_extent = 1;
139 num_extents++;
140 extent_start = agbno;
141 extent_len = blen;
142 } else {
143 extent_len += blen;
144 }
145 } else {
146 if (in_extent) {
147 /*
148 * free extent ends here, add extent to the
149 * 2 incore extent (avl-to-be-B+) trees
150 */
151 in_extent = 0;
152 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
153 fprintf(stderr, "adding extent %u [%u %u]\n",
154 agno, extent_start, extent_len);
155 #endif
156 add_bno_extent(agno, extent_start, extent_len);
157 add_bcnt_extent(agno, extent_start, extent_len);
158 }
159 }
160 }
161 if (in_extent) {
162 /*
163 * free extent ends here
164 */
165 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
166 fprintf(stderr, "adding extent %u [%u %u]\n",
167 agno, extent_start, extent_len);
168 #endif
169 add_bno_extent(agno, extent_start, extent_len);
170 add_bcnt_extent(agno, extent_start, extent_len);
171 }
172
173 return(num_extents);
174 }
175
176 static xfs_agblock_t
177 get_next_blockaddr(xfs_agnumber_t agno, int level, bt_status_t *curs)
178 {
179 ASSERT(curs->free_btree_blocks < curs->btree_blocks +
180 curs->num_tot_blocks);
181 ASSERT(curs->num_free_blocks > 0);
182
183 curs->num_free_blocks--;
184 return(*curs->free_btree_blocks++);
185 }
186
187 /*
188 * set up the dynamically allocated block allocation data in the btree
189 * cursor that depends on the info in the static portion of the cursor.
190 * allocates space from the incore bno/bcnt extent trees and sets up
191 * the first path up the left side of the tree. Also sets up the
192 * cursor pointer to the btree root. called by init_freespace_cursor()
193 * and init_ino_cursor()
194 */
195 static void
196 setup_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *curs)
197 {
198 int j;
199 unsigned int u;
200 xfs_extlen_t big_extent_len;
201 xfs_agblock_t big_extent_start;
202 extent_tree_node_t *ext_ptr;
203 extent_tree_node_t *bno_ext_ptr;
204 xfs_extlen_t blocks_allocated;
205 xfs_agblock_t *agb_ptr;
206
207 /*
208 * get the number of blocks we need to allocate, then
209 * set up block number array, set the free block pointer
210 * to the first block in the array, and null the array
211 */
212 big_extent_len = curs->num_tot_blocks;
213 blocks_allocated = 0;
214
215 ASSERT(big_extent_len > 0);
216
217 if ((curs->btree_blocks = malloc(sizeof(xfs_agblock_t)
218 * big_extent_len)) == NULL)
219 do_error(_("could not set up btree block array\n"));
220
221 agb_ptr = curs->free_btree_blocks = curs->btree_blocks;
222
223 for (j = 0; j < curs->num_free_blocks; j++, agb_ptr++)
224 *agb_ptr = NULLAGBLOCK;
225
226 /*
227 * grab the smallest extent and use it up, then get the
228 * next smallest. This mimics the init_*_cursor code.
229 */
230 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
231 do_error(_("error - not enough free space in filesystem\n"));
232
233 agb_ptr = curs->btree_blocks;
234
235 /*
236 * set up the free block array
237 */
238 while (blocks_allocated < big_extent_len) {
239 /*
240 * use up the extent we've got
241 */
242 for (u = 0; u < ext_ptr->ex_blockcount &&
243 blocks_allocated < big_extent_len; u++) {
244 ASSERT(agb_ptr < curs->btree_blocks
245 + curs->num_tot_blocks);
246 *agb_ptr++ = ext_ptr->ex_startblock + u;
247 blocks_allocated++;
248 }
249
250 /*
251 * if we only used part of this last extent, then we
252 * need only to reset the extent in the extent
253 * trees and we're done
254 */
255 if (u < ext_ptr->ex_blockcount) {
256 big_extent_start = ext_ptr->ex_startblock + u;
257 big_extent_len = ext_ptr->ex_blockcount - u;
258
259 ASSERT(big_extent_len > 0);
260
261 bno_ext_ptr = find_bno_extent(agno,
262 ext_ptr->ex_startblock);
263 ASSERT(bno_ext_ptr != NULL);
264 get_bno_extent(agno, bno_ext_ptr);
265 release_extent_tree_node(bno_ext_ptr);
266
267 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
268 ext_ptr->ex_blockcount);
269 release_extent_tree_node(ext_ptr);
270 #ifdef XR_BLD_FREE_TRACE
271 fprintf(stderr, "releasing extent: %u [%u %u]\n",
272 agno, ext_ptr->ex_startblock,
273 ext_ptr->ex_blockcount);
274 fprintf(stderr, "blocks_allocated = %d\n",
275 blocks_allocated);
276 #endif
277
278 add_bno_extent(agno, big_extent_start, big_extent_len);
279 add_bcnt_extent(agno, big_extent_start, big_extent_len);
280
281 return;
282 }
283 /*
284 * delete the used-up extent from both extent trees and
285 * find next biggest extent
286 */
287 #ifdef XR_BLD_FREE_TRACE
288 fprintf(stderr, "releasing extent: %u [%u %u]\n",
289 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
290 #endif
291 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
292 ASSERT(bno_ext_ptr != NULL);
293 get_bno_extent(agno, bno_ext_ptr);
294 release_extent_tree_node(bno_ext_ptr);
295
296 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
297 ext_ptr->ex_blockcount);
298 ASSERT(ext_ptr != NULL);
299 release_extent_tree_node(ext_ptr);
300
301 ext_ptr = findfirst_bcnt_extent(agno);
302 }
303 #ifdef XR_BLD_FREE_TRACE
304 fprintf(stderr, "blocks_allocated = %d\n",
305 blocks_allocated);
306 #endif
307 }
308
309 static void
310 write_cursor(bt_status_t *curs)
311 {
312 int i;
313
314 for (i = 0; i < curs->num_levels; i++) {
315 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
316 fprintf(stderr, "writing bt block %u\n", curs->level[i].agbno);
317 #endif
318 if (curs->level[i].prev_buf_p != NULL) {
319 ASSERT(curs->level[i].prev_agbno != NULLAGBLOCK);
320 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
321 fprintf(stderr, "writing bt prev block %u\n",
322 curs->level[i].prev_agbno);
323 #endif
324 libxfs_writebuf(curs->level[i].prev_buf_p, 0);
325 }
326 libxfs_writebuf(curs->level[i].buf_p, 0);
327 }
328 }
329
330 static void
331 finish_cursor(bt_status_t *curs)
332 {
333 ASSERT(curs->num_free_blocks == 0);
334 free(curs->btree_blocks);
335 }
336
337 /*
338 * We need to leave some free records in the tree for the corner case of
339 * setting up the AGFL. This may require allocation of blocks, and as
340 * such can require insertion of new records into the tree (e.g. moving
341 * a record in the by-count tree when a long extent is shortened). If we
342 * pack the records into the leaves with no slack space, this requires a
343 * leaf split to occur and a block to be allocated from the free list.
344 * If we don't have any blocks on the free list (because we are setting
345 * it up!), then we fail, and the filesystem will fail with the same
346 * failure at runtime. Hence leave a couple of records slack space in
347 * each block to allow immediate modification of the tree without
348 * requiring splits to be done.
349 *
350 * XXX(hch): any reason we don't just look at mp->m_alloc_mxr?
351 */
352 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
353 (xfs_allocbt_maxrecs((mp), (mp)->m_sb.sb_blocksize, (level) == 0) - 2)
354
355 /*
356 * this calculates a freespace cursor for an ag.
357 * btree_curs is an in/out. returns the number of
358 * blocks that will show up in the AGFL.
359 */
360 static int
361 calculate_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
362 xfs_agblock_t *extents, bt_status_t *btree_curs)
363 {
364 xfs_extlen_t blocks_needed; /* a running count */
365 xfs_extlen_t blocks_allocated_pt; /* per tree */
366 xfs_extlen_t blocks_allocated_total; /* for both trees */
367 xfs_agblock_t num_extents;
368 int i;
369 int extents_used;
370 int extra_blocks;
371 bt_stat_level_t *lptr;
372 bt_stat_level_t *p_lptr;
373 extent_tree_node_t *ext_ptr;
374 int level;
375
376 num_extents = *extents;
377 extents_used = 0;
378
379 ASSERT(num_extents != 0);
380
381 lptr = &btree_curs->level[0];
382 btree_curs->init = 1;
383
384 /*
385 * figure out how much space we need for the leaf level
386 * of the tree and set up the cursor for the leaf level
387 * (note that the same code is duplicated further down)
388 */
389 lptr->num_blocks = howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0));
390 lptr->num_recs_pb = num_extents / lptr->num_blocks;
391 lptr->modulo = num_extents % lptr->num_blocks;
392 lptr->num_recs_tot = num_extents;
393 level = 1;
394
395 #ifdef XR_BLD_FREE_TRACE
396 fprintf(stderr, "%s 0 %d %d %d %d\n", __func__,
397 lptr->num_blocks,
398 lptr->num_recs_pb,
399 lptr->modulo,
400 lptr->num_recs_tot);
401 #endif
402 /*
403 * if we need more levels, set them up. # of records
404 * per level is the # of blocks in the level below it
405 */
406 if (lptr->num_blocks > 1) {
407 for (; btree_curs->level[level - 1].num_blocks > 1
408 && level < XFS_BTREE_MAXLEVELS;
409 level++) {
410 lptr = &btree_curs->level[level];
411 p_lptr = &btree_curs->level[level - 1];
412 lptr->num_blocks = howmany(p_lptr->num_blocks,
413 XR_ALLOC_BLOCK_MAXRECS(mp, level));
414 lptr->modulo = p_lptr->num_blocks
415 % lptr->num_blocks;
416 lptr->num_recs_pb = p_lptr->num_blocks
417 / lptr->num_blocks;
418 lptr->num_recs_tot = p_lptr->num_blocks;
419 #ifdef XR_BLD_FREE_TRACE
420 fprintf(stderr, "%s %d %d %d %d %d\n", __func__,
421 level,
422 lptr->num_blocks,
423 lptr->num_recs_pb,
424 lptr->modulo,
425 lptr->num_recs_tot);
426 #endif
427 }
428 }
429
430 ASSERT(lptr->num_blocks == 1);
431 btree_curs->num_levels = level;
432
433 /*
434 * ok, now we have a hypothetical cursor that
435 * will work for both the bno and bcnt trees.
436 * now figure out if using up blocks to set up the
437 * trees will perturb the shape of the freespace tree.
438 * if so, we've over-allocated. the freespace trees
439 * as they will be *after* accounting for the free space
440 * we've used up will need fewer blocks to to represent
441 * than we've allocated. We can use the AGFL to hold
442 * XFS_AGFL_SIZE (sector/xfs_agfl_t) blocks but that's it.
443 * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
444 * if the number of extra blocks is more than that,
445 * we'll have to be called again.
446 */
447 for (blocks_needed = 0, i = 0; i < level; i++) {
448 blocks_needed += btree_curs->level[i].num_blocks;
449 }
450
451 /*
452 * record the # of blocks we've allocated
453 */
454 blocks_allocated_pt = blocks_needed;
455 blocks_needed *= 2;
456 blocks_allocated_total = blocks_needed;
457
458 /*
459 * figure out how many free extents will be used up by
460 * our space allocation
461 */
462 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
463 do_error(_("can't rebuild fs trees -- not enough free space "
464 "on ag %u\n"), agno);
465
466 while (ext_ptr != NULL && blocks_needed > 0) {
467 if (ext_ptr->ex_blockcount <= blocks_needed) {
468 blocks_needed -= ext_ptr->ex_blockcount;
469 extents_used++;
470 } else {
471 blocks_needed = 0;
472 }
473
474 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
475
476 #ifdef XR_BLD_FREE_TRACE
477 if (ext_ptr != NULL) {
478 fprintf(stderr, "got next extent [%u %u]\n",
479 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
480 } else {
481 fprintf(stderr, "out of extents\n");
482 }
483 #endif
484 }
485 if (blocks_needed > 0)
486 do_error(_("ag %u - not enough free space to build freespace "
487 "btrees\n"), agno);
488
489 ASSERT(num_extents >= extents_used);
490
491 num_extents -= extents_used;
492
493 /*
494 * see if the number of leaf blocks will change as a result
495 * of the number of extents changing
496 */
497 if (howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0))
498 != btree_curs->level[0].num_blocks) {
499 /*
500 * yes -- recalculate the cursor. If the number of
501 * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
502 * we can put those into the AGFL. we don't try
503 * and get things to converge exactly (reach a
504 * state with zero excess blocks) because there
505 * exist pathological cases which will never
506 * converge. first, check for the zero-case.
507 */
508 if (num_extents == 0) {
509 /*
510 * ok, we've used up all the free blocks
511 * trying to lay out the leaf level. go
512 * to a one block (empty) btree and put the
513 * already allocated blocks into the AGFL
514 */
515 if (btree_curs->level[0].num_blocks != 1) {
516 /*
517 * we really needed more blocks because
518 * the old tree had more than one level.
519 * this is bad.
520 */
521 do_warn(_("not enough free blocks left to "
522 "describe all free blocks in AG "
523 "%u\n"), agno);
524 }
525 #ifdef XR_BLD_FREE_TRACE
526 fprintf(stderr,
527 "ag %u -- no free extents, alloc'ed %d\n",
528 agno, blocks_allocated_pt);
529 #endif
530 lptr->num_blocks = 1;
531 lptr->modulo = 0;
532 lptr->num_recs_pb = 0;
533 lptr->num_recs_tot = 0;
534
535 btree_curs->num_levels = 1;
536
537 /*
538 * don't reset the allocation stats, assume
539 * they're all extra blocks
540 * don't forget to return the total block count
541 * not the per-tree block count. these are the
542 * extras that will go into the AGFL. subtract
543 * two for the root blocks.
544 */
545 btree_curs->num_tot_blocks = blocks_allocated_pt;
546 btree_curs->num_free_blocks = blocks_allocated_pt;
547
548 *extents = 0;
549
550 return(blocks_allocated_total - 2);
551 }
552
553 lptr = &btree_curs->level[0];
554 lptr->num_blocks = howmany(num_extents,
555 XR_ALLOC_BLOCK_MAXRECS(mp, 0));
556 lptr->num_recs_pb = num_extents / lptr->num_blocks;
557 lptr->modulo = num_extents % lptr->num_blocks;
558 lptr->num_recs_tot = num_extents;
559 level = 1;
560
561 /*
562 * if we need more levels, set them up
563 */
564 if (lptr->num_blocks > 1) {
565 for (level = 1; btree_curs->level[level-1].num_blocks
566 > 1 && level < XFS_BTREE_MAXLEVELS;
567 level++) {
568 lptr = &btree_curs->level[level];
569 p_lptr = &btree_curs->level[level-1];
570 lptr->num_blocks = howmany(p_lptr->num_blocks,
571 XR_ALLOC_BLOCK_MAXRECS(mp, level));
572 lptr->modulo = p_lptr->num_blocks
573 % lptr->num_blocks;
574 lptr->num_recs_pb = p_lptr->num_blocks
575 / lptr->num_blocks;
576 lptr->num_recs_tot = p_lptr->num_blocks;
577 }
578 }
579 ASSERT(lptr->num_blocks == 1);
580 btree_curs->num_levels = level;
581
582 /*
583 * now figure out the number of excess blocks
584 */
585 for (blocks_needed = 0, i = 0; i < level; i++) {
586 blocks_needed += btree_curs->level[i].num_blocks;
587 }
588 blocks_needed *= 2;
589
590 ASSERT(blocks_allocated_total >= blocks_needed);
591 extra_blocks = blocks_allocated_total - blocks_needed;
592 } else {
593 if (extents_used > 0) {
594 /*
595 * reset the leaf level geometry to account
596 * for consumed extents. we can leave the
597 * rest of the cursor alone since the number
598 * of leaf blocks hasn't changed.
599 */
600 lptr = &btree_curs->level[0];
601
602 lptr->num_recs_pb = num_extents / lptr->num_blocks;
603 lptr->modulo = num_extents % lptr->num_blocks;
604 lptr->num_recs_tot = num_extents;
605 }
606
607 extra_blocks = 0;
608 }
609
610 btree_curs->num_tot_blocks = blocks_allocated_pt;
611 btree_curs->num_free_blocks = blocks_allocated_pt;
612
613 *extents = num_extents;
614
615 return(extra_blocks);
616 }
617
618 static void
619 prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
620 bt_status_t *btree_curs, xfs_agblock_t startblock,
621 xfs_extlen_t blockcount, int level, __uint32_t magic)
622 {
623 struct xfs_btree_block *bt_hdr;
624 xfs_alloc_key_t *bt_key;
625 xfs_alloc_ptr_t *bt_ptr;
626 xfs_agblock_t agbno;
627 bt_stat_level_t *lptr;
628 __uint32_t crc_magic;
629
630 if (magic == XFS_ABTB_MAGIC)
631 crc_magic = XFS_ABTB_CRC_MAGIC;
632 else
633 crc_magic = XFS_ABTC_CRC_MAGIC;
634
635 level++;
636
637 if (level >= btree_curs->num_levels)
638 return;
639
640 lptr = &btree_curs->level[level];
641 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
642
643 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
644 /*
645 * only happens once when initializing the
646 * left-hand side of the tree.
647 */
648 prop_freespace_cursor(mp, agno, btree_curs, startblock,
649 blockcount, level, magic);
650 }
651
652 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
653 lptr->num_recs_pb + (lptr->modulo > 0)) {
654 /*
655 * write out current prev block, grab us a new block,
656 * and set the rightsib pointer of current block
657 */
658 #ifdef XR_BLD_FREE_TRACE
659 fprintf(stderr, " %d ", lptr->prev_agbno);
660 #endif
661 if (lptr->prev_agbno != NULLAGBLOCK) {
662 ASSERT(lptr->prev_buf_p != NULL);
663 libxfs_writebuf(lptr->prev_buf_p, 0);
664 }
665 lptr->prev_agbno = lptr->agbno;;
666 lptr->prev_buf_p = lptr->buf_p;
667 agbno = get_next_blockaddr(agno, level, btree_curs);
668
669 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
670
671 lptr->buf_p = libxfs_getbuf(mp->m_dev,
672 XFS_AGB_TO_DADDR(mp, agno, agbno),
673 XFS_FSB_TO_BB(mp, 1));
674 lptr->agbno = agbno;
675
676 if (lptr->modulo)
677 lptr->modulo--;
678
679 /*
680 * initialize block header
681 */
682 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
683 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
684 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
685 if (xfs_sb_version_hascrc(&mp->m_sb))
686 xfs_btree_init_block(mp, lptr->buf_p, crc_magic, level,
687 0, agno, XFS_BTREE_CRC_BLOCKS);
688 else
689 xfs_btree_init_block(mp, lptr->buf_p, magic, level,
690 0, agno, 0);
691
692 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
693
694 /*
695 * propagate extent record for first extent in new block up
696 */
697 prop_freespace_cursor(mp, agno, btree_curs, startblock,
698 blockcount, level, magic);
699 }
700 /*
701 * add extent info to current block
702 */
703 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
704
705 bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
706 be16_to_cpu(bt_hdr->bb_numrecs));
707 bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
708 be16_to_cpu(bt_hdr->bb_numrecs),
709 mp->m_alloc_mxr[1]);
710
711 bt_key->ar_startblock = cpu_to_be32(startblock);
712 bt_key->ar_blockcount = cpu_to_be32(blockcount);
713 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
714 }
715
716 /*
717 * rebuilds a freespace tree given a cursor and magic number of type
718 * of tree to build (bno or bcnt). returns the number of free blocks
719 * represented by the tree.
720 */
721 static xfs_extlen_t
722 build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
723 bt_status_t *btree_curs, __uint32_t magic)
724 {
725 xfs_agnumber_t i;
726 xfs_agblock_t j;
727 struct xfs_btree_block *bt_hdr;
728 xfs_alloc_rec_t *bt_rec;
729 int level;
730 xfs_agblock_t agbno;
731 extent_tree_node_t *ext_ptr;
732 bt_stat_level_t *lptr;
733 xfs_extlen_t freeblks;
734 __uint32_t crc_magic;
735
736 #ifdef XR_BLD_FREE_TRACE
737 fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
738 #endif
739 level = btree_curs->num_levels;
740 freeblks = 0;
741
742 ASSERT(level > 0);
743 if (magic == XFS_ABTB_MAGIC)
744 crc_magic = XFS_ABTB_CRC_MAGIC;
745 else
746 crc_magic = XFS_ABTC_CRC_MAGIC;
747
748 /*
749 * initialize the first block on each btree level
750 */
751 for (i = 0; i < level; i++) {
752 lptr = &btree_curs->level[i];
753
754 agbno = get_next_blockaddr(agno, i, btree_curs);
755 lptr->buf_p = libxfs_getbuf(mp->m_dev,
756 XFS_AGB_TO_DADDR(mp, agno, agbno),
757 XFS_FSB_TO_BB(mp, 1));
758
759 if (i == btree_curs->num_levels - 1)
760 btree_curs->root = agbno;
761
762 lptr->agbno = agbno;
763 lptr->prev_agbno = NULLAGBLOCK;
764 lptr->prev_buf_p = NULL;
765 /*
766 * initialize block header
767 */
768 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
769 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
770 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
771 if (xfs_sb_version_hascrc(&mp->m_sb))
772 xfs_btree_init_block(mp, lptr->buf_p, crc_magic, i,
773 0, agno, XFS_BTREE_CRC_BLOCKS);
774 else
775 xfs_btree_init_block(mp, lptr->buf_p, magic, i,
776 0, agno, 0);
777 }
778 /*
779 * run along leaf, setting up records. as we have to switch
780 * blocks, call the prop_freespace_cursor routine to set up the new
781 * pointers for the parent. that can recurse up to the root
782 * if required. set the sibling pointers for leaf level here.
783 */
784 if (magic == XFS_ABTB_MAGIC)
785 ext_ptr = findfirst_bno_extent(agno);
786 else
787 ext_ptr = findfirst_bcnt_extent(agno);
788
789 #ifdef XR_BLD_FREE_TRACE
790 fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
791 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
792 #endif
793
794 lptr = &btree_curs->level[0];
795
796 for (i = 0; i < btree_curs->level[0].num_blocks; i++) {
797 /*
798 * block initialization, lay in block header
799 */
800 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
801 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
802 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
803 if (xfs_sb_version_hascrc(&mp->m_sb))
804 xfs_btree_init_block(mp, lptr->buf_p, crc_magic, 0,
805 0, agno, XFS_BTREE_CRC_BLOCKS);
806 else
807 xfs_btree_init_block(mp, lptr->buf_p, magic, 0,
808 0, agno, 0);
809
810 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
811 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
812 (lptr->modulo > 0));
813 #ifdef XR_BLD_FREE_TRACE
814 fprintf(stderr, "bft, bb_numrecs = %d\n",
815 be16_to_cpu(bt_hdr->bb_numrecs));
816 #endif
817
818 if (lptr->modulo > 0)
819 lptr->modulo--;
820
821 /*
822 * initialize values in the path up to the root if
823 * this is a multi-level btree
824 */
825 if (btree_curs->num_levels > 1)
826 prop_freespace_cursor(mp, agno, btree_curs,
827 ext_ptr->ex_startblock,
828 ext_ptr->ex_blockcount,
829 0, magic);
830
831 bt_rec = (xfs_alloc_rec_t *)
832 ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
833 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
834 ASSERT(ext_ptr != NULL);
835 bt_rec[j].ar_startblock = cpu_to_be32(
836 ext_ptr->ex_startblock);
837 bt_rec[j].ar_blockcount = cpu_to_be32(
838 ext_ptr->ex_blockcount);
839 freeblks += ext_ptr->ex_blockcount;
840 if (magic == XFS_ABTB_MAGIC)
841 ext_ptr = findnext_bno_extent(ext_ptr);
842 else
843 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
844 #if 0
845 #ifdef XR_BLD_FREE_TRACE
846 if (ext_ptr == NULL)
847 fprintf(stderr, "null extent pointer, j = %d\n",
848 j);
849 else
850 fprintf(stderr,
851 "bft, agno = %d, start = %u, count = %u\n",
852 agno, ext_ptr->ex_startblock,
853 ext_ptr->ex_blockcount);
854 #endif
855 #endif
856 }
857
858 if (ext_ptr != NULL) {
859 /*
860 * get next leaf level block
861 */
862 if (lptr->prev_buf_p != NULL) {
863 #ifdef XR_BLD_FREE_TRACE
864 fprintf(stderr, " writing fst agbno %u\n",
865 lptr->prev_agbno);
866 #endif
867 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
868 libxfs_writebuf(lptr->prev_buf_p, 0);
869 }
870 lptr->prev_buf_p = lptr->buf_p;
871 lptr->prev_agbno = lptr->agbno;
872 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
873 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
874
875 lptr->buf_p = libxfs_getbuf(mp->m_dev,
876 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
877 XFS_FSB_TO_BB(mp, 1));
878 }
879 }
880
881 return(freeblks);
882 }
883
884 /*
885 * XXX(hch): any reason we don't just look at mp->m_inobt_mxr?
886 */
887 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
888 xfs_inobt_maxrecs((mp), (mp)->m_sb.sb_blocksize, \
889 (level) == 0)
890
891 /*
892 * we don't have to worry here about how chewing up free extents
893 * may perturb things because inode tree building happens before
894 * freespace tree building.
895 */
896 static void
897 init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
898 __uint64_t *num_inos, __uint64_t *num_free_inos, int finobt)
899 {
900 __uint64_t ninos;
901 __uint64_t nfinos;
902 int rec_nfinos;
903 int rec_ninos;
904 ino_tree_node_t *ino_rec;
905 int num_recs;
906 int level;
907 bt_stat_level_t *lptr;
908 bt_stat_level_t *p_lptr;
909 xfs_extlen_t blocks_allocated;
910 int i;
911
912 *num_inos = *num_free_inos = 0;
913 ninos = nfinos = 0;
914
915 lptr = &btree_curs->level[0];
916 btree_curs->init = 1;
917
918 /*
919 * build up statistics
920 */
921 ino_rec = findfirst_inode_rec(agno);
922 for (num_recs = 0; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
923 rec_ninos = 0;
924 rec_nfinos = 0;
925 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
926 ASSERT(is_inode_confirmed(ino_rec, i));
927 /*
928 * sparse inodes are not factored into superblock (free)
929 * inode counts
930 */
931 if (is_inode_sparse(ino_rec, i))
932 continue;
933 if (is_inode_free(ino_rec, i))
934 rec_nfinos++;
935 rec_ninos++;
936 }
937
938 /*
939 * finobt only considers records with free inodes
940 */
941 if (finobt && !rec_nfinos)
942 continue;
943
944 nfinos += rec_nfinos;
945 ninos += rec_ninos;
946 num_recs++;
947 }
948
949 if (num_recs == 0) {
950 /*
951 * easy corner-case -- no inode records
952 */
953 lptr->num_blocks = 1;
954 lptr->modulo = 0;
955 lptr->num_recs_pb = 0;
956 lptr->num_recs_tot = 0;
957
958 btree_curs->num_levels = 1;
959 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
960
961 setup_cursor(mp, agno, btree_curs);
962
963 return;
964 }
965
966 blocks_allocated = lptr->num_blocks = howmany(num_recs,
967 XR_INOBT_BLOCK_MAXRECS(mp, 0));
968
969 lptr->modulo = num_recs % lptr->num_blocks;
970 lptr->num_recs_pb = num_recs / lptr->num_blocks;
971 lptr->num_recs_tot = num_recs;
972 level = 1;
973
974 if (lptr->num_blocks > 1) {
975 for (; btree_curs->level[level-1].num_blocks > 1
976 && level < XFS_BTREE_MAXLEVELS;
977 level++) {
978 lptr = &btree_curs->level[level];
979 p_lptr = &btree_curs->level[level - 1];
980 lptr->num_blocks = howmany(p_lptr->num_blocks,
981 XR_INOBT_BLOCK_MAXRECS(mp, level));
982 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
983 lptr->num_recs_pb = p_lptr->num_blocks
984 / lptr->num_blocks;
985 lptr->num_recs_tot = p_lptr->num_blocks;
986
987 blocks_allocated += lptr->num_blocks;
988 }
989 }
990 ASSERT(lptr->num_blocks == 1);
991 btree_curs->num_levels = level;
992
993 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
994 = blocks_allocated;
995
996 setup_cursor(mp, agno, btree_curs);
997
998 *num_inos = ninos;
999 *num_free_inos = nfinos;
1000
1001 return;
1002 }
1003
1004 static void
1005 prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
1006 xfs_agino_t startino, int level)
1007 {
1008 struct xfs_btree_block *bt_hdr;
1009 xfs_inobt_key_t *bt_key;
1010 xfs_inobt_ptr_t *bt_ptr;
1011 xfs_agblock_t agbno;
1012 bt_stat_level_t *lptr;
1013
1014 level++;
1015
1016 if (level >= btree_curs->num_levels)
1017 return;
1018
1019 lptr = &btree_curs->level[level];
1020 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1021
1022 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1023 /*
1024 * this only happens once to initialize the
1025 * first path up the left side of the tree
1026 * where the agbno's are already set up
1027 */
1028 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1029 }
1030
1031 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1032 lptr->num_recs_pb + (lptr->modulo > 0)) {
1033 /*
1034 * write out current prev block, grab us a new block,
1035 * and set the rightsib pointer of current block
1036 */
1037 #ifdef XR_BLD_INO_TRACE
1038 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1039 #endif
1040 if (lptr->prev_agbno != NULLAGBLOCK) {
1041 ASSERT(lptr->prev_buf_p != NULL);
1042 libxfs_writebuf(lptr->prev_buf_p, 0);
1043 }
1044 lptr->prev_agbno = lptr->agbno;;
1045 lptr->prev_buf_p = lptr->buf_p;
1046 agbno = get_next_blockaddr(agno, level, btree_curs);
1047
1048 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1049
1050 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1051 XFS_AGB_TO_DADDR(mp, agno, agbno),
1052 XFS_FSB_TO_BB(mp, 1));
1053 lptr->agbno = agbno;
1054
1055 if (lptr->modulo)
1056 lptr->modulo--;
1057
1058 /*
1059 * initialize block header
1060 */
1061 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1062 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1063 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1064 if (xfs_sb_version_hascrc(&mp->m_sb))
1065 xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_CRC_MAGIC,
1066 level, 0, agno,
1067 XFS_BTREE_CRC_BLOCKS);
1068 else
1069 xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_MAGIC,
1070 level, 0, agno, 0);
1071
1072 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1073
1074 /*
1075 * propagate extent record for first extent in new block up
1076 */
1077 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1078 }
1079 /*
1080 * add inode info to current block
1081 */
1082 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1083
1084 bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
1085 be16_to_cpu(bt_hdr->bb_numrecs));
1086 bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
1087 be16_to_cpu(bt_hdr->bb_numrecs),
1088 mp->m_inobt_mxr[1]);
1089
1090 bt_key->ir_startino = cpu_to_be32(startino);
1091 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1092 }
1093
1094 /*
1095 * XXX: yet more code that can be shared with mkfs, growfs.
1096 */
1097 static void
1098 build_agi(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
1099 bt_status_t *finobt_curs, struct agi_stat *agi_stat)
1100 {
1101 xfs_buf_t *agi_buf;
1102 xfs_agi_t *agi;
1103 int i;
1104
1105 agi_buf = libxfs_getbuf(mp->m_dev,
1106 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1107 mp->m_sb.sb_sectsize/BBSIZE);
1108 agi_buf->b_ops = &xfs_agi_buf_ops;
1109 agi = XFS_BUF_TO_AGI(agi_buf);
1110 memset(agi, 0, mp->m_sb.sb_sectsize);
1111
1112 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
1113 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
1114 agi->agi_seqno = cpu_to_be32(agno);
1115 if (agno < mp->m_sb.sb_agcount - 1)
1116 agi->agi_length = cpu_to_be32(mp->m_sb.sb_agblocks);
1117 else
1118 agi->agi_length = cpu_to_be32(mp->m_sb.sb_dblocks -
1119 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
1120 agi->agi_count = cpu_to_be32(agi_stat->count);
1121 agi->agi_root = cpu_to_be32(btree_curs->root);
1122 agi->agi_level = cpu_to_be32(btree_curs->num_levels);
1123 agi->agi_freecount = cpu_to_be32(agi_stat->freecount);
1124 agi->agi_newino = cpu_to_be32(agi_stat->first_agino);
1125 agi->agi_dirino = cpu_to_be32(NULLAGINO);
1126
1127 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1128 agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO);
1129
1130 if (xfs_sb_version_hascrc(&mp->m_sb))
1131 platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
1132
1133 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
1134 agi->agi_free_root = cpu_to_be32(finobt_curs->root);
1135 agi->agi_free_level = cpu_to_be32(finobt_curs->num_levels);
1136 }
1137
1138 libxfs_writebuf(agi_buf, 0);
1139 }
1140
1141 /*
1142 * rebuilds an inode tree given a cursor. We're lazy here and call
1143 * the routine that builds the agi
1144 */
1145 static void
1146 build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
1147 bt_status_t *btree_curs, __uint32_t magic,
1148 struct agi_stat *agi_stat, int finobt)
1149 {
1150 xfs_agnumber_t i;
1151 xfs_agblock_t j;
1152 xfs_agblock_t agbno;
1153 xfs_agino_t first_agino;
1154 struct xfs_btree_block *bt_hdr;
1155 xfs_inobt_rec_t *bt_rec;
1156 ino_tree_node_t *ino_rec;
1157 bt_stat_level_t *lptr;
1158 xfs_agino_t count = 0;
1159 xfs_agino_t freecount = 0;
1160 int inocnt;
1161 uint8_t finocnt;
1162 int k;
1163 int level = btree_curs->num_levels;
1164 int spmask;
1165 uint64_t sparse;
1166 uint16_t holemask;
1167
1168 for (i = 0; i < level; i++) {
1169 lptr = &btree_curs->level[i];
1170
1171 agbno = get_next_blockaddr(agno, i, btree_curs);
1172 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1173 XFS_AGB_TO_DADDR(mp, agno, agbno),
1174 XFS_FSB_TO_BB(mp, 1));
1175
1176 if (i == btree_curs->num_levels - 1)
1177 btree_curs->root = agbno;
1178
1179 lptr->agbno = agbno;
1180 lptr->prev_agbno = NULLAGBLOCK;
1181 lptr->prev_buf_p = NULL;
1182 /*
1183 * initialize block header
1184 */
1185
1186 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1187 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1188 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1189 if (xfs_sb_version_hascrc(&mp->m_sb))
1190 xfs_btree_init_block(mp, lptr->buf_p, magic,
1191 i, 0, agno,
1192 XFS_BTREE_CRC_BLOCKS);
1193 else
1194 xfs_btree_init_block(mp, lptr->buf_p, magic,
1195 i, 0, agno, 0);
1196 }
1197
1198 /*
1199 * run along leaf, setting up records. as we have to switch
1200 * blocks, call the prop_ino_cursor routine to set up the new
1201 * pointers for the parent. that can recurse up to the root
1202 * if required. set the sibling pointers for leaf level here.
1203 */
1204 if (finobt)
1205 ino_rec = findfirst_free_inode_rec(agno);
1206 else
1207 ino_rec = findfirst_inode_rec(agno);
1208
1209 if (ino_rec != NULL)
1210 first_agino = ino_rec->ino_startnum;
1211 else
1212 first_agino = NULLAGINO;
1213
1214 lptr = &btree_curs->level[0];
1215
1216 for (i = 0; i < lptr->num_blocks; i++) {
1217 /*
1218 * block initialization, lay in block header
1219 */
1220 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1221 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1222 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1223 if (xfs_sb_version_hascrc(&mp->m_sb))
1224 xfs_btree_init_block(mp, lptr->buf_p, magic,
1225 0, 0, agno,
1226 XFS_BTREE_CRC_BLOCKS);
1227 else
1228 xfs_btree_init_block(mp, lptr->buf_p, magic,
1229 0, 0, agno, 0);
1230
1231 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1232 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
1233 (lptr->modulo > 0));
1234
1235 if (lptr->modulo > 0)
1236 lptr->modulo--;
1237
1238 if (lptr->num_recs_pb > 0)
1239 prop_ino_cursor(mp, agno, btree_curs,
1240 ino_rec->ino_startnum, 0);
1241
1242 bt_rec = (xfs_inobt_rec_t *)
1243 ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
1244 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1245 ASSERT(ino_rec != NULL);
1246 bt_rec[j].ir_startino =
1247 cpu_to_be32(ino_rec->ino_startnum);
1248 bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free);
1249
1250 inocnt = finocnt = 0;
1251 for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) {
1252 ASSERT(is_inode_confirmed(ino_rec, k));
1253
1254 if (is_inode_sparse(ino_rec, k))
1255 continue;
1256 if (is_inode_free(ino_rec, k))
1257 finocnt++;
1258 inocnt++;
1259 }
1260
1261 /*
1262 * Set the freecount and check whether we need to update
1263 * the sparse format fields. Otherwise, skip to the next
1264 * record.
1265 */
1266 inorec_set_freecount(mp, &bt_rec[j], finocnt);
1267 if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
1268 goto nextrec;
1269
1270 /*
1271 * Convert the 64-bit in-core sparse inode state to the
1272 * 16-bit on-disk holemask.
1273 */
1274 holemask = 0;
1275 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
1276 sparse = ino_rec->ir_sparse;
1277 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
1278 if (sparse & spmask) {
1279 ASSERT((sparse & spmask) == spmask);
1280 holemask |= (1 << k);
1281 } else
1282 ASSERT((sparse & spmask) == 0);
1283 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
1284 }
1285
1286 bt_rec[j].ir_u.sp.ir_count = inocnt;
1287 bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask);
1288
1289 nextrec:
1290 freecount += finocnt;
1291 count += inocnt;
1292
1293 if (finobt)
1294 ino_rec = next_free_ino_rec(ino_rec);
1295 else
1296 ino_rec = next_ino_rec(ino_rec);
1297 }
1298
1299 if (ino_rec != NULL) {
1300 /*
1301 * get next leaf level block
1302 */
1303 if (lptr->prev_buf_p != NULL) {
1304 #ifdef XR_BLD_INO_TRACE
1305 fprintf(stderr, "writing inobt agbno %u\n",
1306 lptr->prev_agbno);
1307 #endif
1308 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1309 libxfs_writebuf(lptr->prev_buf_p, 0);
1310 }
1311 lptr->prev_buf_p = lptr->buf_p;
1312 lptr->prev_agbno = lptr->agbno;
1313 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1314 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1315
1316 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1317 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1318 XFS_FSB_TO_BB(mp, 1));
1319 }
1320 }
1321
1322 if (agi_stat) {
1323 agi_stat->first_agino = first_agino;
1324 agi_stat->count = count;
1325 agi_stat->freecount = freecount;
1326 }
1327 }
1328
1329 /*
1330 * build both the agf and the agfl for an agno given both
1331 * btree cursors.
1332 *
1333 * XXX: yet more common code that can be shared with mkfs/growfs.
1334 */
1335 static void
1336 build_agf_agfl(xfs_mount_t *mp,
1337 xfs_agnumber_t agno,
1338 bt_status_t *bno_bt,
1339 bt_status_t *bcnt_bt,
1340 xfs_extlen_t freeblks, /* # free blocks in tree */
1341 int lostblocks) /* # blocks that will be lost */
1342 {
1343 extent_tree_node_t *ext_ptr;
1344 xfs_buf_t *agf_buf, *agfl_buf;
1345 int i;
1346 int j;
1347 xfs_agfl_t *agfl;
1348 xfs_agf_t *agf;
1349 __be32 *freelist;
1350
1351 agf_buf = libxfs_getbuf(mp->m_dev,
1352 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1353 mp->m_sb.sb_sectsize/BBSIZE);
1354 agf_buf->b_ops = &xfs_agf_buf_ops;
1355 agf = XFS_BUF_TO_AGF(agf_buf);
1356 memset(agf, 0, mp->m_sb.sb_sectsize);
1357
1358 #ifdef XR_BLD_FREE_TRACE
1359 fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n",
1360 agf, agf_buf->b_addr);
1361 #endif
1362
1363 /*
1364 * set up fixed part of agf
1365 */
1366 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
1367 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
1368 agf->agf_seqno = cpu_to_be32(agno);
1369
1370 if (agno < mp->m_sb.sb_agcount - 1)
1371 agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
1372 else
1373 agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
1374 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
1375
1376 agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root);
1377 agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels);
1378 agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root);
1379 agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels);
1380 agf->agf_freeblks = cpu_to_be32(freeblks);
1381
1382 /*
1383 * Count and record the number of btree blocks consumed if required.
1384 */
1385 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
1386 /*
1387 * Don't count the root blocks as they are already
1388 * accounted for.
1389 */
1390 agf->agf_btreeblks = cpu_to_be32(
1391 (bno_bt->num_tot_blocks - bno_bt->num_free_blocks) +
1392 (bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) -
1393 2);
1394 #ifdef XR_BLD_FREE_TRACE
1395 fprintf(stderr, "agf->agf_btreeblks = %u\n",
1396 be32_to_cpu(agf->agf_btreeblks));
1397 #endif
1398 }
1399
1400 #ifdef XR_BLD_FREE_TRACE
1401 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
1402 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
1403 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
1404 XFS_BTNUM_BNO,
1405 XFS_BTNUM_CNT);
1406 #endif
1407
1408 if (xfs_sb_version_hascrc(&mp->m_sb))
1409 platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
1410
1411 /* initialise the AGFL, then fill it if there are blocks left over. */
1412 agfl_buf = libxfs_getbuf(mp->m_dev,
1413 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1414 mp->m_sb.sb_sectsize/BBSIZE);
1415 agfl_buf->b_ops = &xfs_agfl_buf_ops;
1416 agfl = XFS_BUF_TO_AGFL(agfl_buf);
1417
1418 /* setting to 0xff results in initialisation to NULLAGBLOCK */
1419 memset(agfl, 0xff, mp->m_sb.sb_sectsize);
1420 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1421 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
1422 agfl->agfl_seqno = cpu_to_be32(agno);
1423 platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
1424 for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
1425 agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
1426 }
1427 freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);
1428
1429 /*
1430 * do we have left-over blocks in the btree cursors that should
1431 * be used to fill the AGFL?
1432 */
1433 if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) {
1434 /*
1435 * yes, now grab as many blocks as we can
1436 */
1437 i = j = 0;
1438 while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
1439 freelist[i] = cpu_to_be32(
1440 get_next_blockaddr(agno, 0, bno_bt));
1441 i++;
1442 }
1443
1444 while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
1445 freelist[i] = cpu_to_be32(
1446 get_next_blockaddr(agno, 0, bcnt_bt));
1447 i++;
1448 }
1449 /*
1450 * now throw the rest of the blocks away and complain
1451 */
1452 while (bno_bt->num_free_blocks > 0) {
1453 (void) get_next_blockaddr(agno, 0, bno_bt);
1454 j++;
1455 }
1456 while (bcnt_bt->num_free_blocks > 0) {
1457 (void) get_next_blockaddr(agno, 0, bcnt_bt);
1458 j++;
1459 }
1460
1461 if (j > 0) {
1462 if (j == lostblocks)
1463 do_warn(_("lost %d blocks in ag %u\n"),
1464 j, agno);
1465 else
1466 do_warn(_("thought we were going to lose %d "
1467 "blocks in ag %u, actually lost "
1468 "%d\n"),
1469 lostblocks, j, agno);
1470 }
1471
1472 agf->agf_flfirst = 0;
1473 agf->agf_fllast = cpu_to_be32(i - 1);
1474 agf->agf_flcount = cpu_to_be32(i);
1475
1476 #ifdef XR_BLD_FREE_TRACE
1477 fprintf(stderr, "writing agfl for ag %u\n", agno);
1478 #endif
1479
1480 } else {
1481 agf->agf_flfirst = 0;
1482 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
1483 agf->agf_flcount = 0;
1484 }
1485
1486 libxfs_writebuf(agfl_buf, 0);
1487
1488 ext_ptr = findbiggest_bcnt_extent(agno);
1489 agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
1490 ext_ptr->ex_blockcount : 0);
1491
1492 ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
1493 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
1494
1495 libxfs_writebuf(agf_buf, 0);
1496
1497 /*
1498 * now fix up the free list appropriately
1499 * XXX: code lifted from mkfs, should be shared.
1500 */
1501 {
1502 xfs_alloc_arg_t args;
1503 xfs_trans_t *tp;
1504 struct xfs_trans_res tres = {0};
1505 int error;
1506
1507 memset(&args, 0, sizeof(args));
1508 args.pag = xfs_perag_get(mp,agno);
1509 libxfs_trans_alloc(mp, &tres,
1510 xfs_alloc_min_freelist(mp, args.pag), 0, 0, &tp);
1511 args.tp = tp;
1512 args.mp = mp;
1513 args.agno = agno;
1514 args.alignment = 1;
1515 error = libxfs_alloc_fix_freelist(&args, 0);
1516 xfs_perag_put(args.pag);
1517 if (error) {
1518 do_error(_("failed to fix AGFL on AG %d, error %d\n"),
1519 agno, error);
1520 }
1521 libxfs_trans_commit(tp);
1522 }
1523
1524 #ifdef XR_BLD_FREE_TRACE
1525 fprintf(stderr, "wrote agf for ag %u\n", agno);
1526 #endif
1527 }
1528
1529 /*
1530 * update the superblock counters, sync the sb version numbers and
1531 * feature bits to the filesystem, and sync up the on-disk superblock
1532 * to match the incore superblock.
1533 */
1534 static void
1535 sync_sb(xfs_mount_t *mp)
1536 {
1537 xfs_buf_t *bp;
1538
1539 bp = libxfs_getsb(mp, 0);
1540 if (!bp)
1541 do_error(_("couldn't get superblock\n"));
1542
1543 mp->m_sb.sb_icount = sb_icount;
1544 mp->m_sb.sb_ifree = sb_ifree;
1545 mp->m_sb.sb_fdblocks = sb_fdblocks;
1546 mp->m_sb.sb_frextents = sb_frextents;
1547
1548 update_sb_version(mp);
1549
1550 libxfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
1551 libxfs_writebuf(bp, 0);
1552 }
1553
1554 /*
1555 * make sure the root and realtime inodes show up allocated
1556 * even if they've been freed. they get reinitialized in phase6.
1557 */
1558 static void
1559 keep_fsinos(xfs_mount_t *mp)
1560 {
1561 ino_tree_node_t *irec;
1562 int i;
1563
1564 irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
1565 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
1566
1567 for (i = 0; i < 3; i++)
1568 set_inode_used(irec, i);
1569 }
1570
1571 static void
1572 phase5_func(
1573 xfs_mount_t *mp,
1574 xfs_agnumber_t agno)
1575 {
1576 __uint64_t num_inos;
1577 __uint64_t num_free_inos;
1578 __uint64_t finobt_num_inos;
1579 __uint64_t finobt_num_free_inos;
1580 bt_status_t bno_btree_curs;
1581 bt_status_t bcnt_btree_curs;
1582 bt_status_t ino_btree_curs;
1583 bt_status_t fino_btree_curs;
1584 int extra_blocks = 0;
1585 uint num_freeblocks;
1586 xfs_extlen_t freeblks1;
1587 #ifdef DEBUG
1588 xfs_extlen_t freeblks2;
1589 #endif
1590 xfs_agblock_t num_extents;
1591 __uint32_t magic;
1592 struct agi_stat agi_stat = {0,};
1593
1594 if (verbose)
1595 do_log(_(" - agno = %d\n"), agno);
1596
1597 {
1598 /*
1599 * build up incore bno and bcnt extent btrees
1600 */
1601 num_extents = mk_incore_fstree(mp, agno);
1602
1603 #ifdef XR_BLD_FREE_TRACE
1604 fprintf(stderr, "# of bno extents is %d\n",
1605 count_bno_extents(agno));
1606 #endif
1607
1608 if (num_extents == 0) {
1609 /*
1610 * XXX - what we probably should do here is pick an
1611 * inode for a regular file in the allocation group
1612 * that has space allocated and shoot it by traversing
1613 * the bmap list and putting all its extents on the
1614 * incore freespace trees, clearing the inode,
1615 * and clearing the in-use bit in the incore inode
1616 * tree. Then try mk_incore_fstree() again.
1617 */
1618 do_error(_("unable to rebuild AG %u. "
1619 "Not enough free space in on-disk AG.\n"),
1620 agno);
1621 }
1622
1623 /*
1624 * ok, now set up the btree cursors for the
1625 * on-disk btrees (includs pre-allocating all
1626 * required blocks for the trees themselves)
1627 */
1628 init_ino_cursor(mp, agno, &ino_btree_curs, &num_inos,
1629 &num_free_inos, 0);
1630
1631 if (xfs_sb_version_hasfinobt(&mp->m_sb))
1632 init_ino_cursor(mp, agno, &fino_btree_curs,
1633 &finobt_num_inos, &finobt_num_free_inos,
1634 1);
1635
1636 sb_icount_ag[agno] += num_inos;
1637 sb_ifree_ag[agno] += num_free_inos;
1638
1639 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
1640 /*
1641 * lose two blocks per AG -- the space tree roots
1642 * are counted as allocated since the space trees
1643 * always have roots
1644 */
1645 sb_fdblocks_ag[agno] += num_freeblocks - 2;
1646
1647 if (num_extents == 0) {
1648 /*
1649 * XXX - what we probably should do here is pick an
1650 * inode for a regular file in the allocation group
1651 * that has space allocated and shoot it by traversing
1652 * the bmap list and putting all its extents on the
1653 * incore freespace trees, clearing the inode,
1654 * and clearing the in-use bit in the incore inode
1655 * tree. Then try mk_incore_fstree() again.
1656 */
1657 do_error(
1658 _("unable to rebuild AG %u. No free space.\n"), agno);
1659 }
1660
1661 #ifdef XR_BLD_FREE_TRACE
1662 fprintf(stderr, "# of bno extents is %d\n", num_extents);
1663 #endif
1664
1665 /*
1666 * track blocks that we might really lose
1667 */
1668 extra_blocks = calculate_freespace_cursor(mp, agno,
1669 &num_extents, &bno_btree_curs);
1670
1671 /*
1672 * freespace btrees live in the "free space" but
1673 * the filesystem treats AGFL blocks as allocated
1674 * since they aren't described by the freespace trees
1675 */
1676
1677 /*
1678 * see if we can fit all the extra blocks into the AGFL
1679 */
1680 extra_blocks = (extra_blocks - XFS_AGFL_SIZE(mp) > 0)
1681 ? extra_blocks - XFS_AGFL_SIZE(mp)
1682 : 0;
1683
1684 if (extra_blocks > 0) {
1685 do_warn(_("lost %d blocks in agno %d, sorry.\n"),
1686 extra_blocks, agno);
1687 sb_fdblocks_ag[agno] -= extra_blocks;
1688 }
1689
1690 bcnt_btree_curs = bno_btree_curs;
1691
1692 setup_cursor(mp, agno, &bno_btree_curs);
1693 setup_cursor(mp, agno, &bcnt_btree_curs);
1694
1695 #ifdef XR_BLD_FREE_TRACE
1696 fprintf(stderr, "# of bno extents is %d\n",
1697 count_bno_extents(agno));
1698 fprintf(stderr, "# of bcnt extents is %d\n",
1699 count_bcnt_extents(agno));
1700 #endif
1701
1702 /*
1703 * now rebuild the freespace trees
1704 */
1705 freeblks1 = build_freespace_tree(mp, agno,
1706 &bno_btree_curs, XFS_ABTB_MAGIC);
1707 #ifdef XR_BLD_FREE_TRACE
1708 fprintf(stderr, "# of free blocks == %d\n", freeblks1);
1709 #endif
1710 write_cursor(&bno_btree_curs);
1711
1712 #ifdef DEBUG
1713 freeblks2 = build_freespace_tree(mp, agno,
1714 &bcnt_btree_curs, XFS_ABTC_MAGIC);
1715 #else
1716 (void) build_freespace_tree(mp, agno,
1717 &bcnt_btree_curs, XFS_ABTC_MAGIC);
1718 #endif
1719 write_cursor(&bcnt_btree_curs);
1720
1721 ASSERT(freeblks1 == freeblks2);
1722
1723 /*
1724 * set up agf and agfl
1725 */
1726 build_agf_agfl(mp, agno, &bno_btree_curs,
1727 &bcnt_btree_curs, freeblks1, extra_blocks);
1728 /*
1729 * build inode allocation tree.
1730 */
1731 magic = xfs_sb_version_hascrc(&mp->m_sb) ?
1732 XFS_IBT_CRC_MAGIC : XFS_IBT_MAGIC;
1733 build_ino_tree(mp, agno, &ino_btree_curs, magic, &agi_stat, 0);
1734 write_cursor(&ino_btree_curs);
1735
1736 /*
1737 * build free inode tree
1738 */
1739 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
1740 magic = xfs_sb_version_hascrc(&mp->m_sb) ?
1741 XFS_FIBT_CRC_MAGIC : XFS_FIBT_MAGIC;
1742 build_ino_tree(mp, agno, &fino_btree_curs, magic,
1743 NULL, 1);
1744 write_cursor(&fino_btree_curs);
1745 }
1746
1747 /* build the agi */
1748 build_agi(mp, agno, &ino_btree_curs, &fino_btree_curs,
1749 &agi_stat);
1750
1751 /*
1752 * tear down cursors
1753 */
1754 finish_cursor(&bno_btree_curs);
1755 finish_cursor(&ino_btree_curs);
1756 if (xfs_sb_version_hasfinobt(&mp->m_sb))
1757 finish_cursor(&fino_btree_curs);
1758 finish_cursor(&bcnt_btree_curs);
1759 /*
1760 * release the incore per-AG bno/bcnt trees so
1761 * the extent nodes can be recycled
1762 */
1763 release_agbno_extent_tree(agno);
1764 release_agbcnt_extent_tree(agno);
1765 }
1766 PROG_RPT_INC(prog_rpt_done[agno], 1);
1767 }
1768
1769 void
1770 phase5(xfs_mount_t *mp)
1771 {
1772 xfs_agnumber_t agno;
1773
1774 do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
1775 set_progress_msg(PROG_FMT_REBUILD_AG, (__uint64_t )glob_agcount);
1776
1777 #ifdef XR_BLD_FREE_TRACE
1778 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
1779 xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
1780 xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
1781 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1782 xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
1783 xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
1784 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
1785 XR_INOBT_BLOCK_MAXRECS(mp, 0));
1786 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
1787 XR_INOBT_BLOCK_MAXRECS(mp, 1));
1788 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
1789 xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
1790 xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
1791 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1792 xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
1793 xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
1794 #endif
1795 /*
1796 * make sure the root and realtime inodes show up allocated
1797 */
1798 keep_fsinos(mp);
1799
1800 /* allocate per ag counters */
1801 sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
1802 if (sb_icount_ag == NULL)
1803 do_error(_("cannot alloc sb_icount_ag buffers\n"));
1804
1805 sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
1806 if (sb_ifree_ag == NULL)
1807 do_error(_("cannot alloc sb_ifree_ag buffers\n"));
1808
1809 sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
1810 if (sb_fdblocks_ag == NULL)
1811 do_error(_("cannot alloc sb_fdblocks_ag buffers\n"));
1812
1813 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++)
1814 phase5_func(mp, agno);
1815
1816 print_final_rpt();
1817
1818 /* aggregate per ag counters */
1819 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
1820 sb_icount += sb_icount_ag[agno];
1821 sb_ifree += sb_ifree_ag[agno];
1822 sb_fdblocks += sb_fdblocks_ag[agno];
1823 }
1824 free(sb_icount_ag);
1825 free(sb_ifree_ag);
1826 free(sb_fdblocks_ag);
1827
1828 if (mp->m_sb.sb_rblocks) {
1829 do_log(
1830 _(" - generate realtime summary info and bitmap...\n"));
1831 rtinit(mp);
1832 generate_rtinfo(mp, btmcompute, sumcompute);
1833 }
1834
1835 do_log(_(" - reset superblock...\n"));
1836
1837 /*
1838 * sync superblock counter and set version bits correctly
1839 */
1840 sync_sb(mp);
1841
1842 bad_ino_btree = 0;
1843
1844 }