]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
xfsprogs: remove double-underscore integer types
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libxfs.h"
20 #include "avl.h"
21 #include "globals.h"
22 #include "agheader.h"
23 #include "incore.h"
24 #include "protos.h"
25 #include "err_protos.h"
26 #include "dinode.h"
27 #include "rt.h"
28 #include "versions.h"
29 #include "threads.h"
30 #include "progress.h"
31 #include "slab.h"
32 #include "rmap.h"
33
34 /*
35 * we maintain the current slice (path from root to leaf)
36 * of the btree incore. when we need a new block, we ask
37 * the block allocator for the address of a block on that
38 * level, map the block in, and set up the appropriate
39 * pointers (child, silbing, etc.) and keys that should
40 * point to the new block.
41 */
42 typedef struct bt_stat_level {
43 /*
44 * set in setup_cursor routine and maintained in the tree-building
45 * routines
46 */
47 xfs_buf_t *buf_p; /* 2 buffer pointers to ... */
48 xfs_buf_t *prev_buf_p;
49 xfs_agblock_t agbno; /* current block being filled */
50 xfs_agblock_t prev_agbno; /* previous block */
51 /*
52 * set in calculate/init cursor routines for each btree level
53 */
54 int num_recs_tot; /* # tree recs in level */
55 int num_blocks; /* # tree blocks in level */
56 int num_recs_pb; /* num_recs_tot / num_blocks */
57 int modulo; /* num_recs_tot % num_blocks */
58 } bt_stat_level_t;
59
60 typedef struct bt_status {
61 int init; /* cursor set up once? */
62 int num_levels; /* # of levels in btree */
63 xfs_extlen_t num_tot_blocks; /* # blocks alloc'ed for tree */
64 xfs_extlen_t num_free_blocks;/* # blocks currently unused */
65
66 xfs_agblock_t root; /* root block */
67 /*
68 * list of blocks to be used to set up this tree
69 * and pointer to the first unused block on the list
70 */
71 xfs_agblock_t *btree_blocks; /* block list */
72 xfs_agblock_t *free_btree_blocks; /* first unused block */
73 /*
74 * per-level status info
75 */
76 bt_stat_level_t level[XFS_BTREE_MAXLEVELS];
77 uint64_t owner; /* owner */
78 } bt_status_t;
79
80 /*
81 * extra metadata for the agi
82 */
83 struct agi_stat {
84 xfs_agino_t first_agino;
85 xfs_agino_t count;
86 xfs_agino_t freecount;
87 };
88
89 static uint64_t *sb_icount_ag; /* allocated inodes per ag */
90 static uint64_t *sb_ifree_ag; /* free inodes per ag */
91 static uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
92
93 static int
94 mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
95 {
96 int in_extent;
97 int num_extents;
98 xfs_agblock_t extent_start;
99 xfs_extlen_t extent_len;
100 xfs_agblock_t agbno;
101 xfs_agblock_t ag_end;
102 uint free_blocks;
103 xfs_extlen_t blen;
104 int bstate;
105
106 /*
107 * scan the bitmap for the ag looking for continuous
108 * extents of free blocks. At this point, we know
109 * that blocks in the bitmap are either set to an
110 * "in use" state or set to unknown (0) since the
111 * bmaps were zero'ed in phase 4 and only blocks
112 * being used by inodes, inode bmaps, ag headers,
113 * and the files themselves were put into the bitmap.
114 *
115 */
116 ASSERT(agno < mp->m_sb.sb_agcount);
117
118 extent_start = extent_len = 0;
119 in_extent = 0;
120 num_extents = free_blocks = 0;
121
122 if (agno < mp->m_sb.sb_agcount - 1)
123 ag_end = mp->m_sb.sb_agblocks;
124 else
125 ag_end = mp->m_sb.sb_dblocks -
126 (xfs_rfsblock_t)mp->m_sb.sb_agblocks *
127 (mp->m_sb.sb_agcount - 1);
128
129 /*
130 * ok, now find the number of extents, keep track of the
131 * largest extent.
132 */
133 for (agbno = 0; agbno < ag_end; agbno += blen) {
134 bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
135 if (bstate < XR_E_INUSE) {
136 free_blocks += blen;
137 if (in_extent == 0) {
138 /*
139 * found the start of a free extent
140 */
141 in_extent = 1;
142 num_extents++;
143 extent_start = agbno;
144 extent_len = blen;
145 } else {
146 extent_len += blen;
147 }
148 } else {
149 if (in_extent) {
150 /*
151 * free extent ends here, add extent to the
152 * 2 incore extent (avl-to-be-B+) trees
153 */
154 in_extent = 0;
155 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
156 fprintf(stderr, "adding extent %u [%u %u]\n",
157 agno, extent_start, extent_len);
158 #endif
159 add_bno_extent(agno, extent_start, extent_len);
160 add_bcnt_extent(agno, extent_start, extent_len);
161 }
162 }
163 }
164 if (in_extent) {
165 /*
166 * free extent ends here
167 */
168 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
169 fprintf(stderr, "adding extent %u [%u %u]\n",
170 agno, extent_start, extent_len);
171 #endif
172 add_bno_extent(agno, extent_start, extent_len);
173 add_bcnt_extent(agno, extent_start, extent_len);
174 }
175
176 return(num_extents);
177 }
178
179 static xfs_agblock_t
180 get_next_blockaddr(xfs_agnumber_t agno, int level, bt_status_t *curs)
181 {
182 ASSERT(curs->free_btree_blocks < curs->btree_blocks +
183 curs->num_tot_blocks);
184 ASSERT(curs->num_free_blocks > 0);
185
186 curs->num_free_blocks--;
187 return(*curs->free_btree_blocks++);
188 }
189
190 /*
191 * set up the dynamically allocated block allocation data in the btree
192 * cursor that depends on the info in the static portion of the cursor.
193 * allocates space from the incore bno/bcnt extent trees and sets up
194 * the first path up the left side of the tree. Also sets up the
195 * cursor pointer to the btree root. called by init_freespace_cursor()
196 * and init_ino_cursor()
197 */
198 static void
199 setup_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *curs)
200 {
201 int j;
202 unsigned int u;
203 xfs_extlen_t big_extent_len;
204 xfs_agblock_t big_extent_start;
205 extent_tree_node_t *ext_ptr;
206 extent_tree_node_t *bno_ext_ptr;
207 xfs_extlen_t blocks_allocated;
208 xfs_agblock_t *agb_ptr;
209 int error;
210
211 /*
212 * get the number of blocks we need to allocate, then
213 * set up block number array, set the free block pointer
214 * to the first block in the array, and null the array
215 */
216 big_extent_len = curs->num_tot_blocks;
217 blocks_allocated = 0;
218
219 ASSERT(big_extent_len > 0);
220
221 if ((curs->btree_blocks = malloc(sizeof(xfs_agblock_t)
222 * big_extent_len)) == NULL)
223 do_error(_("could not set up btree block array\n"));
224
225 agb_ptr = curs->free_btree_blocks = curs->btree_blocks;
226
227 for (j = 0; j < curs->num_free_blocks; j++, agb_ptr++)
228 *agb_ptr = NULLAGBLOCK;
229
230 /*
231 * grab the smallest extent and use it up, then get the
232 * next smallest. This mimics the init_*_cursor code.
233 */
234 ext_ptr = findfirst_bcnt_extent(agno);
235
236 agb_ptr = curs->btree_blocks;
237
238 /*
239 * set up the free block array
240 */
241 while (blocks_allocated < big_extent_len) {
242 if (!ext_ptr)
243 do_error(
244 _("error - not enough free space in filesystem\n"));
245 /*
246 * use up the extent we've got
247 */
248 for (u = 0; u < ext_ptr->ex_blockcount &&
249 blocks_allocated < big_extent_len; u++) {
250 ASSERT(agb_ptr < curs->btree_blocks
251 + curs->num_tot_blocks);
252 *agb_ptr++ = ext_ptr->ex_startblock + u;
253 blocks_allocated++;
254 }
255
256 error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, u,
257 curs->owner);
258 if (error)
259 do_error(_("could not set up btree rmaps: %s\n"),
260 strerror(-error));
261
262 /*
263 * if we only used part of this last extent, then we
264 * need only to reset the extent in the extent
265 * trees and we're done
266 */
267 if (u < ext_ptr->ex_blockcount) {
268 big_extent_start = ext_ptr->ex_startblock + u;
269 big_extent_len = ext_ptr->ex_blockcount - u;
270
271 ASSERT(big_extent_len > 0);
272
273 bno_ext_ptr = find_bno_extent(agno,
274 ext_ptr->ex_startblock);
275 ASSERT(bno_ext_ptr != NULL);
276 get_bno_extent(agno, bno_ext_ptr);
277 release_extent_tree_node(bno_ext_ptr);
278
279 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
280 ext_ptr->ex_blockcount);
281 release_extent_tree_node(ext_ptr);
282 #ifdef XR_BLD_FREE_TRACE
283 fprintf(stderr, "releasing extent: %u [%u %u]\n",
284 agno, ext_ptr->ex_startblock,
285 ext_ptr->ex_blockcount);
286 fprintf(stderr, "blocks_allocated = %d\n",
287 blocks_allocated);
288 #endif
289
290 add_bno_extent(agno, big_extent_start, big_extent_len);
291 add_bcnt_extent(agno, big_extent_start, big_extent_len);
292
293 return;
294 }
295 /*
296 * delete the used-up extent from both extent trees and
297 * find next biggest extent
298 */
299 #ifdef XR_BLD_FREE_TRACE
300 fprintf(stderr, "releasing extent: %u [%u %u]\n",
301 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
302 #endif
303 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
304 ASSERT(bno_ext_ptr != NULL);
305 get_bno_extent(agno, bno_ext_ptr);
306 release_extent_tree_node(bno_ext_ptr);
307
308 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
309 ext_ptr->ex_blockcount);
310 ASSERT(ext_ptr != NULL);
311 release_extent_tree_node(ext_ptr);
312
313 ext_ptr = findfirst_bcnt_extent(agno);
314 }
315 #ifdef XR_BLD_FREE_TRACE
316 fprintf(stderr, "blocks_allocated = %d\n",
317 blocks_allocated);
318 #endif
319 }
320
321 static void
322 write_cursor(bt_status_t *curs)
323 {
324 int i;
325
326 for (i = 0; i < curs->num_levels; i++) {
327 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
328 fprintf(stderr, "writing bt block %u\n", curs->level[i].agbno);
329 #endif
330 if (curs->level[i].prev_buf_p != NULL) {
331 ASSERT(curs->level[i].prev_agbno != NULLAGBLOCK);
332 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
333 fprintf(stderr, "writing bt prev block %u\n",
334 curs->level[i].prev_agbno);
335 #endif
336 libxfs_writebuf(curs->level[i].prev_buf_p, 0);
337 }
338 libxfs_writebuf(curs->level[i].buf_p, 0);
339 }
340 }
341
342 static void
343 finish_cursor(bt_status_t *curs)
344 {
345 ASSERT(curs->num_free_blocks == 0);
346 free(curs->btree_blocks);
347 }
348
349 /*
350 * We need to leave some free records in the tree for the corner case of
351 * setting up the AGFL. This may require allocation of blocks, and as
352 * such can require insertion of new records into the tree (e.g. moving
353 * a record in the by-count tree when a long extent is shortened). If we
354 * pack the records into the leaves with no slack space, this requires a
355 * leaf split to occur and a block to be allocated from the free list.
356 * If we don't have any blocks on the free list (because we are setting
357 * it up!), then we fail, and the filesystem will fail with the same
358 * failure at runtime. Hence leave a couple of records slack space in
359 * each block to allow immediate modification of the tree without
360 * requiring splits to be done.
361 *
362 * XXX(hch): any reason we don't just look at mp->m_alloc_mxr?
363 */
364 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
365 (libxfs_allocbt_maxrecs((mp), (mp)->m_sb.sb_blocksize, (level) == 0) - 2)
366
367 /*
368 * this calculates a freespace cursor for an ag.
369 * btree_curs is an in/out. returns the number of
370 * blocks that will show up in the AGFL.
371 */
372 static int
373 calculate_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
374 xfs_agblock_t *extents, bt_status_t *btree_curs)
375 {
376 xfs_extlen_t blocks_needed; /* a running count */
377 xfs_extlen_t blocks_allocated_pt; /* per tree */
378 xfs_extlen_t blocks_allocated_total; /* for both trees */
379 xfs_agblock_t num_extents;
380 int i;
381 int extents_used;
382 int extra_blocks;
383 bt_stat_level_t *lptr;
384 bt_stat_level_t *p_lptr;
385 extent_tree_node_t *ext_ptr;
386 int level;
387
388 num_extents = *extents;
389 extents_used = 0;
390
391 ASSERT(num_extents != 0);
392
393 lptr = &btree_curs->level[0];
394 btree_curs->init = 1;
395
396 /*
397 * figure out how much space we need for the leaf level
398 * of the tree and set up the cursor for the leaf level
399 * (note that the same code is duplicated further down)
400 */
401 lptr->num_blocks = howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0));
402 lptr->num_recs_pb = num_extents / lptr->num_blocks;
403 lptr->modulo = num_extents % lptr->num_blocks;
404 lptr->num_recs_tot = num_extents;
405 level = 1;
406
407 #ifdef XR_BLD_FREE_TRACE
408 fprintf(stderr, "%s 0 %d %d %d %d\n", __func__,
409 lptr->num_blocks,
410 lptr->num_recs_pb,
411 lptr->modulo,
412 lptr->num_recs_tot);
413 #endif
414 /*
415 * if we need more levels, set them up. # of records
416 * per level is the # of blocks in the level below it
417 */
418 if (lptr->num_blocks > 1) {
419 for (; btree_curs->level[level - 1].num_blocks > 1
420 && level < XFS_BTREE_MAXLEVELS;
421 level++) {
422 lptr = &btree_curs->level[level];
423 p_lptr = &btree_curs->level[level - 1];
424 lptr->num_blocks = howmany(p_lptr->num_blocks,
425 XR_ALLOC_BLOCK_MAXRECS(mp, level));
426 lptr->modulo = p_lptr->num_blocks
427 % lptr->num_blocks;
428 lptr->num_recs_pb = p_lptr->num_blocks
429 / lptr->num_blocks;
430 lptr->num_recs_tot = p_lptr->num_blocks;
431 #ifdef XR_BLD_FREE_TRACE
432 fprintf(stderr, "%s %d %d %d %d %d\n", __func__,
433 level,
434 lptr->num_blocks,
435 lptr->num_recs_pb,
436 lptr->modulo,
437 lptr->num_recs_tot);
438 #endif
439 }
440 }
441
442 ASSERT(lptr->num_blocks == 1);
443 btree_curs->num_levels = level;
444
445 /*
446 * ok, now we have a hypothetical cursor that
447 * will work for both the bno and bcnt trees.
448 * now figure out if using up blocks to set up the
449 * trees will perturb the shape of the freespace tree.
450 * if so, we've over-allocated. the freespace trees
451 * as they will be *after* accounting for the free space
452 * we've used up will need fewer blocks to to represent
453 * than we've allocated. We can use the AGFL to hold
454 * XFS_AGFL_SIZE (sector/xfs_agfl_t) blocks but that's it.
455 * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
456 * if the number of extra blocks is more than that,
457 * we'll have to be called again.
458 */
459 for (blocks_needed = 0, i = 0; i < level; i++) {
460 blocks_needed += btree_curs->level[i].num_blocks;
461 }
462
463 /*
464 * record the # of blocks we've allocated
465 */
466 blocks_allocated_pt = blocks_needed;
467 blocks_needed *= 2;
468 blocks_allocated_total = blocks_needed;
469
470 /*
471 * figure out how many free extents will be used up by
472 * our space allocation
473 */
474 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
475 do_error(_("can't rebuild fs trees -- not enough free space "
476 "on ag %u\n"), agno);
477
478 while (ext_ptr != NULL && blocks_needed > 0) {
479 if (ext_ptr->ex_blockcount <= blocks_needed) {
480 blocks_needed -= ext_ptr->ex_blockcount;
481 extents_used++;
482 } else {
483 blocks_needed = 0;
484 }
485
486 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
487
488 #ifdef XR_BLD_FREE_TRACE
489 if (ext_ptr != NULL) {
490 fprintf(stderr, "got next extent [%u %u]\n",
491 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
492 } else {
493 fprintf(stderr, "out of extents\n");
494 }
495 #endif
496 }
497 if (blocks_needed > 0)
498 do_error(_("ag %u - not enough free space to build freespace "
499 "btrees\n"), agno);
500
501 ASSERT(num_extents >= extents_used);
502
503 num_extents -= extents_used;
504
505 /*
506 * see if the number of leaf blocks will change as a result
507 * of the number of extents changing
508 */
509 if (howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0))
510 != btree_curs->level[0].num_blocks) {
511 /*
512 * yes -- recalculate the cursor. If the number of
513 * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
514 * we can put those into the AGFL. we don't try
515 * and get things to converge exactly (reach a
516 * state with zero excess blocks) because there
517 * exist pathological cases which will never
518 * converge. first, check for the zero-case.
519 */
520 if (num_extents == 0) {
521 /*
522 * ok, we've used up all the free blocks
523 * trying to lay out the leaf level. go
524 * to a one block (empty) btree and put the
525 * already allocated blocks into the AGFL
526 */
527 if (btree_curs->level[0].num_blocks != 1) {
528 /*
529 * we really needed more blocks because
530 * the old tree had more than one level.
531 * this is bad.
532 */
533 do_warn(_("not enough free blocks left to "
534 "describe all free blocks in AG "
535 "%u\n"), agno);
536 }
537 #ifdef XR_BLD_FREE_TRACE
538 fprintf(stderr,
539 "ag %u -- no free extents, alloc'ed %d\n",
540 agno, blocks_allocated_pt);
541 #endif
542 lptr->num_blocks = 1;
543 lptr->modulo = 0;
544 lptr->num_recs_pb = 0;
545 lptr->num_recs_tot = 0;
546
547 btree_curs->num_levels = 1;
548
549 /*
550 * don't reset the allocation stats, assume
551 * they're all extra blocks
552 * don't forget to return the total block count
553 * not the per-tree block count. these are the
554 * extras that will go into the AGFL. subtract
555 * two for the root blocks.
556 */
557 btree_curs->num_tot_blocks = blocks_allocated_pt;
558 btree_curs->num_free_blocks = blocks_allocated_pt;
559
560 *extents = 0;
561
562 return(blocks_allocated_total - 2);
563 }
564
565 lptr = &btree_curs->level[0];
566 lptr->num_blocks = howmany(num_extents,
567 XR_ALLOC_BLOCK_MAXRECS(mp, 0));
568 lptr->num_recs_pb = num_extents / lptr->num_blocks;
569 lptr->modulo = num_extents % lptr->num_blocks;
570 lptr->num_recs_tot = num_extents;
571 level = 1;
572
573 /*
574 * if we need more levels, set them up
575 */
576 if (lptr->num_blocks > 1) {
577 for (level = 1; btree_curs->level[level-1].num_blocks
578 > 1 && level < XFS_BTREE_MAXLEVELS;
579 level++) {
580 lptr = &btree_curs->level[level];
581 p_lptr = &btree_curs->level[level-1];
582 lptr->num_blocks = howmany(p_lptr->num_blocks,
583 XR_ALLOC_BLOCK_MAXRECS(mp, level));
584 lptr->modulo = p_lptr->num_blocks
585 % lptr->num_blocks;
586 lptr->num_recs_pb = p_lptr->num_blocks
587 / lptr->num_blocks;
588 lptr->num_recs_tot = p_lptr->num_blocks;
589 }
590 }
591 ASSERT(lptr->num_blocks == 1);
592 btree_curs->num_levels = level;
593
594 /*
595 * now figure out the number of excess blocks
596 */
597 for (blocks_needed = 0, i = 0; i < level; i++) {
598 blocks_needed += btree_curs->level[i].num_blocks;
599 }
600 blocks_needed *= 2;
601
602 ASSERT(blocks_allocated_total >= blocks_needed);
603 extra_blocks = blocks_allocated_total - blocks_needed;
604 } else {
605 if (extents_used > 0) {
606 /*
607 * reset the leaf level geometry to account
608 * for consumed extents. we can leave the
609 * rest of the cursor alone since the number
610 * of leaf blocks hasn't changed.
611 */
612 lptr = &btree_curs->level[0];
613
614 lptr->num_recs_pb = num_extents / lptr->num_blocks;
615 lptr->modulo = num_extents % lptr->num_blocks;
616 lptr->num_recs_tot = num_extents;
617 }
618
619 extra_blocks = 0;
620 }
621
622 btree_curs->num_tot_blocks = blocks_allocated_pt;
623 btree_curs->num_free_blocks = blocks_allocated_pt;
624
625 *extents = num_extents;
626
627 return(extra_blocks);
628 }
629
630 static void
631 prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
632 bt_status_t *btree_curs, xfs_agblock_t startblock,
633 xfs_extlen_t blockcount, int level, xfs_btnum_t btnum)
634 {
635 struct xfs_btree_block *bt_hdr;
636 xfs_alloc_key_t *bt_key;
637 xfs_alloc_ptr_t *bt_ptr;
638 xfs_agblock_t agbno;
639 bt_stat_level_t *lptr;
640
641 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
642
643 level++;
644
645 if (level >= btree_curs->num_levels)
646 return;
647
648 lptr = &btree_curs->level[level];
649 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
650
651 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
652 /*
653 * only happens once when initializing the
654 * left-hand side of the tree.
655 */
656 prop_freespace_cursor(mp, agno, btree_curs, startblock,
657 blockcount, level, btnum);
658 }
659
660 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
661 lptr->num_recs_pb + (lptr->modulo > 0)) {
662 /*
663 * write out current prev block, grab us a new block,
664 * and set the rightsib pointer of current block
665 */
666 #ifdef XR_BLD_FREE_TRACE
667 fprintf(stderr, " %d ", lptr->prev_agbno);
668 #endif
669 if (lptr->prev_agbno != NULLAGBLOCK) {
670 ASSERT(lptr->prev_buf_p != NULL);
671 libxfs_writebuf(lptr->prev_buf_p, 0);
672 }
673 lptr->prev_agbno = lptr->agbno;;
674 lptr->prev_buf_p = lptr->buf_p;
675 agbno = get_next_blockaddr(agno, level, btree_curs);
676
677 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
678
679 lptr->buf_p = libxfs_getbuf(mp->m_dev,
680 XFS_AGB_TO_DADDR(mp, agno, agbno),
681 XFS_FSB_TO_BB(mp, 1));
682 lptr->agbno = agbno;
683
684 if (lptr->modulo)
685 lptr->modulo--;
686
687 /*
688 * initialize block header
689 */
690 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
691 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
692 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
693 libxfs_btree_init_block(mp, lptr->buf_p, btnum, level,
694 0, agno, 0);
695
696 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
697
698 /*
699 * propagate extent record for first extent in new block up
700 */
701 prop_freespace_cursor(mp, agno, btree_curs, startblock,
702 blockcount, level, btnum);
703 }
704 /*
705 * add extent info to current block
706 */
707 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
708
709 bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
710 be16_to_cpu(bt_hdr->bb_numrecs));
711 bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
712 be16_to_cpu(bt_hdr->bb_numrecs),
713 mp->m_alloc_mxr[1]);
714
715 bt_key->ar_startblock = cpu_to_be32(startblock);
716 bt_key->ar_blockcount = cpu_to_be32(blockcount);
717 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
718 }
719
720 /*
721 * rebuilds a freespace tree given a cursor and type
722 * of tree to build (bno or bcnt). returns the number of free blocks
723 * represented by the tree.
724 */
725 static xfs_extlen_t
726 build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
727 bt_status_t *btree_curs, xfs_btnum_t btnum)
728 {
729 xfs_agnumber_t i;
730 xfs_agblock_t j;
731 struct xfs_btree_block *bt_hdr;
732 xfs_alloc_rec_t *bt_rec;
733 int level;
734 xfs_agblock_t agbno;
735 extent_tree_node_t *ext_ptr;
736 bt_stat_level_t *lptr;
737 xfs_extlen_t freeblks;
738
739 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
740
741 #ifdef XR_BLD_FREE_TRACE
742 fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
743 #endif
744 level = btree_curs->num_levels;
745 freeblks = 0;
746
747 ASSERT(level > 0);
748
749 /*
750 * initialize the first block on each btree level
751 */
752 for (i = 0; i < level; i++) {
753 lptr = &btree_curs->level[i];
754
755 agbno = get_next_blockaddr(agno, i, btree_curs);
756 lptr->buf_p = libxfs_getbuf(mp->m_dev,
757 XFS_AGB_TO_DADDR(mp, agno, agbno),
758 XFS_FSB_TO_BB(mp, 1));
759
760 if (i == btree_curs->num_levels - 1)
761 btree_curs->root = agbno;
762
763 lptr->agbno = agbno;
764 lptr->prev_agbno = NULLAGBLOCK;
765 lptr->prev_buf_p = NULL;
766 /*
767 * initialize block header
768 */
769 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
770 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
771 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
772 libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
773 }
774 /*
775 * run along leaf, setting up records. as we have to switch
776 * blocks, call the prop_freespace_cursor routine to set up the new
777 * pointers for the parent. that can recurse up to the root
778 * if required. set the sibling pointers for leaf level here.
779 */
780 if (btnum == XFS_BTNUM_BNO)
781 ext_ptr = findfirst_bno_extent(agno);
782 else
783 ext_ptr = findfirst_bcnt_extent(agno);
784
785 #ifdef XR_BLD_FREE_TRACE
786 fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
787 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
788 #endif
789
790 lptr = &btree_curs->level[0];
791
792 for (i = 0; i < btree_curs->level[0].num_blocks; i++) {
793 /*
794 * block initialization, lay in block header
795 */
796 lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
797 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
798 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
799 libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
800
801 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
802 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
803 (lptr->modulo > 0));
804 #ifdef XR_BLD_FREE_TRACE
805 fprintf(stderr, "bft, bb_numrecs = %d\n",
806 be16_to_cpu(bt_hdr->bb_numrecs));
807 #endif
808
809 if (lptr->modulo > 0)
810 lptr->modulo--;
811
812 /*
813 * initialize values in the path up to the root if
814 * this is a multi-level btree
815 */
816 if (btree_curs->num_levels > 1)
817 prop_freespace_cursor(mp, agno, btree_curs,
818 ext_ptr->ex_startblock,
819 ext_ptr->ex_blockcount,
820 0, btnum);
821
822 bt_rec = (xfs_alloc_rec_t *)
823 ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
824 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
825 ASSERT(ext_ptr != NULL);
826 bt_rec[j].ar_startblock = cpu_to_be32(
827 ext_ptr->ex_startblock);
828 bt_rec[j].ar_blockcount = cpu_to_be32(
829 ext_ptr->ex_blockcount);
830 freeblks += ext_ptr->ex_blockcount;
831 if (btnum == XFS_BTNUM_BNO)
832 ext_ptr = findnext_bno_extent(ext_ptr);
833 else
834 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
835 #if 0
836 #ifdef XR_BLD_FREE_TRACE
837 if (ext_ptr == NULL)
838 fprintf(stderr, "null extent pointer, j = %d\n",
839 j);
840 else
841 fprintf(stderr,
842 "bft, agno = %d, start = %u, count = %u\n",
843 agno, ext_ptr->ex_startblock,
844 ext_ptr->ex_blockcount);
845 #endif
846 #endif
847 }
848
849 if (ext_ptr != NULL) {
850 /*
851 * get next leaf level block
852 */
853 if (lptr->prev_buf_p != NULL) {
854 #ifdef XR_BLD_FREE_TRACE
855 fprintf(stderr, " writing fst agbno %u\n",
856 lptr->prev_agbno);
857 #endif
858 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
859 libxfs_writebuf(lptr->prev_buf_p, 0);
860 }
861 lptr->prev_buf_p = lptr->buf_p;
862 lptr->prev_agbno = lptr->agbno;
863 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
864 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
865
866 lptr->buf_p = libxfs_getbuf(mp->m_dev,
867 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
868 XFS_FSB_TO_BB(mp, 1));
869 }
870 }
871
872 return(freeblks);
873 }
874
875 /*
876 * XXX(hch): any reason we don't just look at mp->m_inobt_mxr?
877 */
878 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
879 libxfs_inobt_maxrecs((mp), (mp)->m_sb.sb_blocksize, \
880 (level) == 0)
881
882 /*
883 * we don't have to worry here about how chewing up free extents
884 * may perturb things because inode tree building happens before
885 * freespace tree building.
886 */
887 static void
888 init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
889 uint64_t *num_inos, uint64_t *num_free_inos, int finobt)
890 {
891 uint64_t ninos;
892 uint64_t nfinos;
893 int rec_nfinos;
894 int rec_ninos;
895 ino_tree_node_t *ino_rec;
896 int num_recs;
897 int level;
898 bt_stat_level_t *lptr;
899 bt_stat_level_t *p_lptr;
900 xfs_extlen_t blocks_allocated;
901 int i;
902
903 *num_inos = *num_free_inos = 0;
904 ninos = nfinos = 0;
905
906 lptr = &btree_curs->level[0];
907 btree_curs->init = 1;
908 btree_curs->owner = XFS_RMAP_OWN_INOBT;
909
910 /*
911 * build up statistics
912 */
913 ino_rec = findfirst_inode_rec(agno);
914 for (num_recs = 0; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
915 rec_ninos = 0;
916 rec_nfinos = 0;
917 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
918 ASSERT(is_inode_confirmed(ino_rec, i));
919 /*
920 * sparse inodes are not factored into superblock (free)
921 * inode counts
922 */
923 if (is_inode_sparse(ino_rec, i))
924 continue;
925 if (is_inode_free(ino_rec, i))
926 rec_nfinos++;
927 rec_ninos++;
928 }
929
930 /*
931 * finobt only considers records with free inodes
932 */
933 if (finobt && !rec_nfinos)
934 continue;
935
936 nfinos += rec_nfinos;
937 ninos += rec_ninos;
938 num_recs++;
939 }
940
941 if (num_recs == 0) {
942 /*
943 * easy corner-case -- no inode records
944 */
945 lptr->num_blocks = 1;
946 lptr->modulo = 0;
947 lptr->num_recs_pb = 0;
948 lptr->num_recs_tot = 0;
949
950 btree_curs->num_levels = 1;
951 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
952
953 setup_cursor(mp, agno, btree_curs);
954
955 return;
956 }
957
958 blocks_allocated = lptr->num_blocks = howmany(num_recs,
959 XR_INOBT_BLOCK_MAXRECS(mp, 0));
960
961 lptr->modulo = num_recs % lptr->num_blocks;
962 lptr->num_recs_pb = num_recs / lptr->num_blocks;
963 lptr->num_recs_tot = num_recs;
964 level = 1;
965
966 if (lptr->num_blocks > 1) {
967 for (; btree_curs->level[level-1].num_blocks > 1
968 && level < XFS_BTREE_MAXLEVELS;
969 level++) {
970 lptr = &btree_curs->level[level];
971 p_lptr = &btree_curs->level[level - 1];
972 lptr->num_blocks = howmany(p_lptr->num_blocks,
973 XR_INOBT_BLOCK_MAXRECS(mp, level));
974 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
975 lptr->num_recs_pb = p_lptr->num_blocks
976 / lptr->num_blocks;
977 lptr->num_recs_tot = p_lptr->num_blocks;
978
979 blocks_allocated += lptr->num_blocks;
980 }
981 }
982 ASSERT(lptr->num_blocks == 1);
983 btree_curs->num_levels = level;
984
985 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
986 = blocks_allocated;
987
988 setup_cursor(mp, agno, btree_curs);
989
990 *num_inos = ninos;
991 *num_free_inos = nfinos;
992
993 return;
994 }
995
996 static void
997 prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
998 xfs_agino_t startino, int level)
999 {
1000 struct xfs_btree_block *bt_hdr;
1001 xfs_inobt_key_t *bt_key;
1002 xfs_inobt_ptr_t *bt_ptr;
1003 xfs_agblock_t agbno;
1004 bt_stat_level_t *lptr;
1005
1006 level++;
1007
1008 if (level >= btree_curs->num_levels)
1009 return;
1010
1011 lptr = &btree_curs->level[level];
1012 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1013
1014 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1015 /*
1016 * this only happens once to initialize the
1017 * first path up the left side of the tree
1018 * where the agbno's are already set up
1019 */
1020 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1021 }
1022
1023 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1024 lptr->num_recs_pb + (lptr->modulo > 0)) {
1025 /*
1026 * write out current prev block, grab us a new block,
1027 * and set the rightsib pointer of current block
1028 */
1029 #ifdef XR_BLD_INO_TRACE
1030 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1031 #endif
1032 if (lptr->prev_agbno != NULLAGBLOCK) {
1033 ASSERT(lptr->prev_buf_p != NULL);
1034 libxfs_writebuf(lptr->prev_buf_p, 0);
1035 }
1036 lptr->prev_agbno = lptr->agbno;;
1037 lptr->prev_buf_p = lptr->buf_p;
1038 agbno = get_next_blockaddr(agno, level, btree_curs);
1039
1040 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1041
1042 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1043 XFS_AGB_TO_DADDR(mp, agno, agbno),
1044 XFS_FSB_TO_BB(mp, 1));
1045 lptr->agbno = agbno;
1046
1047 if (lptr->modulo)
1048 lptr->modulo--;
1049
1050 /*
1051 * initialize block header
1052 */
1053 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1054 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1055 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1056 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_INO,
1057 level, 0, agno, 0);
1058
1059 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1060
1061 /*
1062 * propagate extent record for first extent in new block up
1063 */
1064 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1065 }
1066 /*
1067 * add inode info to current block
1068 */
1069 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1070
1071 bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
1072 be16_to_cpu(bt_hdr->bb_numrecs));
1073 bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
1074 be16_to_cpu(bt_hdr->bb_numrecs),
1075 mp->m_inobt_mxr[1]);
1076
1077 bt_key->ir_startino = cpu_to_be32(startino);
1078 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1079 }
1080
1081 /*
1082 * XXX: yet more code that can be shared with mkfs, growfs.
1083 */
1084 static void
1085 build_agi(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
1086 bt_status_t *finobt_curs, struct agi_stat *agi_stat)
1087 {
1088 xfs_buf_t *agi_buf;
1089 xfs_agi_t *agi;
1090 int i;
1091
1092 agi_buf = libxfs_getbuf(mp->m_dev,
1093 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1094 mp->m_sb.sb_sectsize/BBSIZE);
1095 agi_buf->b_ops = &xfs_agi_buf_ops;
1096 agi = XFS_BUF_TO_AGI(agi_buf);
1097 memset(agi, 0, mp->m_sb.sb_sectsize);
1098
1099 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
1100 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
1101 agi->agi_seqno = cpu_to_be32(agno);
1102 if (agno < mp->m_sb.sb_agcount - 1)
1103 agi->agi_length = cpu_to_be32(mp->m_sb.sb_agblocks);
1104 else
1105 agi->agi_length = cpu_to_be32(mp->m_sb.sb_dblocks -
1106 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
1107 agi->agi_count = cpu_to_be32(agi_stat->count);
1108 agi->agi_root = cpu_to_be32(btree_curs->root);
1109 agi->agi_level = cpu_to_be32(btree_curs->num_levels);
1110 agi->agi_freecount = cpu_to_be32(agi_stat->freecount);
1111 agi->agi_newino = cpu_to_be32(agi_stat->first_agino);
1112 agi->agi_dirino = cpu_to_be32(NULLAGINO);
1113
1114 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1115 agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO);
1116
1117 if (xfs_sb_version_hascrc(&mp->m_sb))
1118 platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
1119
1120 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
1121 agi->agi_free_root = cpu_to_be32(finobt_curs->root);
1122 agi->agi_free_level = cpu_to_be32(finobt_curs->num_levels);
1123 }
1124
1125 libxfs_writebuf(agi_buf, 0);
1126 }
1127
1128 /*
1129 * rebuilds an inode tree given a cursor. We're lazy here and call
1130 * the routine that builds the agi
1131 */
1132 static void
1133 build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
1134 bt_status_t *btree_curs, xfs_btnum_t btnum,
1135 struct agi_stat *agi_stat)
1136 {
1137 xfs_agnumber_t i;
1138 xfs_agblock_t j;
1139 xfs_agblock_t agbno;
1140 xfs_agino_t first_agino;
1141 struct xfs_btree_block *bt_hdr;
1142 xfs_inobt_rec_t *bt_rec;
1143 ino_tree_node_t *ino_rec;
1144 bt_stat_level_t *lptr;
1145 xfs_agino_t count = 0;
1146 xfs_agino_t freecount = 0;
1147 int inocnt;
1148 uint8_t finocnt;
1149 int k;
1150 int level = btree_curs->num_levels;
1151 int spmask;
1152 uint64_t sparse;
1153 uint16_t holemask;
1154
1155 ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
1156
1157 for (i = 0; i < level; i++) {
1158 lptr = &btree_curs->level[i];
1159
1160 agbno = get_next_blockaddr(agno, i, btree_curs);
1161 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1162 XFS_AGB_TO_DADDR(mp, agno, agbno),
1163 XFS_FSB_TO_BB(mp, 1));
1164
1165 if (i == btree_curs->num_levels - 1)
1166 btree_curs->root = agbno;
1167
1168 lptr->agbno = agbno;
1169 lptr->prev_agbno = NULLAGBLOCK;
1170 lptr->prev_buf_p = NULL;
1171 /*
1172 * initialize block header
1173 */
1174
1175 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1176 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1177 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1178 libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
1179 }
1180
1181 /*
1182 * run along leaf, setting up records. as we have to switch
1183 * blocks, call the prop_ino_cursor routine to set up the new
1184 * pointers for the parent. that can recurse up to the root
1185 * if required. set the sibling pointers for leaf level here.
1186 */
1187 if (btnum == XFS_BTNUM_FINO)
1188 ino_rec = findfirst_free_inode_rec(agno);
1189 else
1190 ino_rec = findfirst_inode_rec(agno);
1191
1192 if (ino_rec != NULL)
1193 first_agino = ino_rec->ino_startnum;
1194 else
1195 first_agino = NULLAGINO;
1196
1197 lptr = &btree_curs->level[0];
1198
1199 for (i = 0; i < lptr->num_blocks; i++) {
1200 /*
1201 * block initialization, lay in block header
1202 */
1203 lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
1204 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1205 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1206 libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
1207
1208 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1209 bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
1210 (lptr->modulo > 0));
1211
1212 if (lptr->modulo > 0)
1213 lptr->modulo--;
1214
1215 if (lptr->num_recs_pb > 0)
1216 prop_ino_cursor(mp, agno, btree_curs,
1217 ino_rec->ino_startnum, 0);
1218
1219 bt_rec = (xfs_inobt_rec_t *)
1220 ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
1221 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1222 ASSERT(ino_rec != NULL);
1223 bt_rec[j].ir_startino =
1224 cpu_to_be32(ino_rec->ino_startnum);
1225 bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free);
1226
1227 inocnt = finocnt = 0;
1228 for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) {
1229 ASSERT(is_inode_confirmed(ino_rec, k));
1230
1231 if (is_inode_sparse(ino_rec, k))
1232 continue;
1233 if (is_inode_free(ino_rec, k))
1234 finocnt++;
1235 inocnt++;
1236 }
1237
1238 /*
1239 * Set the freecount and check whether we need to update
1240 * the sparse format fields. Otherwise, skip to the next
1241 * record.
1242 */
1243 inorec_set_freecount(mp, &bt_rec[j], finocnt);
1244 if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
1245 goto nextrec;
1246
1247 /*
1248 * Convert the 64-bit in-core sparse inode state to the
1249 * 16-bit on-disk holemask.
1250 */
1251 holemask = 0;
1252 spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
1253 sparse = ino_rec->ir_sparse;
1254 for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
1255 if (sparse & spmask) {
1256 ASSERT((sparse & spmask) == spmask);
1257 holemask |= (1 << k);
1258 } else
1259 ASSERT((sparse & spmask) == 0);
1260 sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
1261 }
1262
1263 bt_rec[j].ir_u.sp.ir_count = inocnt;
1264 bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask);
1265
1266 nextrec:
1267 freecount += finocnt;
1268 count += inocnt;
1269
1270 if (btnum == XFS_BTNUM_FINO)
1271 ino_rec = next_free_ino_rec(ino_rec);
1272 else
1273 ino_rec = next_ino_rec(ino_rec);
1274 }
1275
1276 if (ino_rec != NULL) {
1277 /*
1278 * get next leaf level block
1279 */
1280 if (lptr->prev_buf_p != NULL) {
1281 #ifdef XR_BLD_INO_TRACE
1282 fprintf(stderr, "writing inobt agbno %u\n",
1283 lptr->prev_agbno);
1284 #endif
1285 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1286 libxfs_writebuf(lptr->prev_buf_p, 0);
1287 }
1288 lptr->prev_buf_p = lptr->buf_p;
1289 lptr->prev_agbno = lptr->agbno;
1290 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1291 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1292
1293 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1294 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1295 XFS_FSB_TO_BB(mp, 1));
1296 }
1297 }
1298
1299 if (agi_stat) {
1300 agi_stat->first_agino = first_agino;
1301 agi_stat->count = count;
1302 agi_stat->freecount = freecount;
1303 }
1304 }
1305
1306 /* rebuild the rmap tree */
1307
1308 /*
1309 * we don't have to worry here about how chewing up free extents
1310 * may perturb things because rmap tree building happens before
1311 * freespace tree building.
1312 */
1313 static void
1314 init_rmapbt_cursor(
1315 struct xfs_mount *mp,
1316 xfs_agnumber_t agno,
1317 struct bt_status *btree_curs)
1318 {
1319 size_t num_recs;
1320 int level;
1321 struct bt_stat_level *lptr;
1322 struct bt_stat_level *p_lptr;
1323 xfs_extlen_t blocks_allocated;
1324 int maxrecs;
1325
1326 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1327 memset(btree_curs, 0, sizeof(struct bt_status));
1328 return;
1329 }
1330
1331 lptr = &btree_curs->level[0];
1332 btree_curs->init = 1;
1333 btree_curs->owner = XFS_RMAP_OWN_AG;
1334
1335 /*
1336 * build up statistics
1337 */
1338 num_recs = rmap_record_count(mp, agno);
1339 if (num_recs == 0) {
1340 /*
1341 * easy corner-case -- no rmap records
1342 */
1343 lptr->num_blocks = 1;
1344 lptr->modulo = 0;
1345 lptr->num_recs_pb = 0;
1346 lptr->num_recs_tot = 0;
1347
1348 btree_curs->num_levels = 1;
1349 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
1350
1351 setup_cursor(mp, agno, btree_curs);
1352
1353 return;
1354 }
1355
1356 /*
1357 * Leave enough slack in the rmapbt that we can insert the
1358 * metadata AG entries without too many splits.
1359 */
1360 maxrecs = mp->m_rmap_mxr[0];
1361 if (num_recs > maxrecs)
1362 maxrecs -= 10;
1363 blocks_allocated = lptr->num_blocks = howmany(num_recs, maxrecs);
1364
1365 lptr->modulo = num_recs % lptr->num_blocks;
1366 lptr->num_recs_pb = num_recs / lptr->num_blocks;
1367 lptr->num_recs_tot = num_recs;
1368 level = 1;
1369
1370 if (lptr->num_blocks > 1) {
1371 for (; btree_curs->level[level-1].num_blocks > 1
1372 && level < XFS_BTREE_MAXLEVELS;
1373 level++) {
1374 lptr = &btree_curs->level[level];
1375 p_lptr = &btree_curs->level[level - 1];
1376 lptr->num_blocks = howmany(p_lptr->num_blocks,
1377 mp->m_rmap_mxr[1]);
1378 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
1379 lptr->num_recs_pb = p_lptr->num_blocks
1380 / lptr->num_blocks;
1381 lptr->num_recs_tot = p_lptr->num_blocks;
1382
1383 blocks_allocated += lptr->num_blocks;
1384 }
1385 }
1386 ASSERT(lptr->num_blocks == 1);
1387 btree_curs->num_levels = level;
1388
1389 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
1390 = blocks_allocated;
1391
1392 setup_cursor(mp, agno, btree_curs);
1393 }
1394
1395 static void
1396 prop_rmap_cursor(
1397 struct xfs_mount *mp,
1398 xfs_agnumber_t agno,
1399 struct bt_status *btree_curs,
1400 struct xfs_rmap_irec *rm_rec,
1401 int level)
1402 {
1403 struct xfs_btree_block *bt_hdr;
1404 struct xfs_rmap_key *bt_key;
1405 xfs_rmap_ptr_t *bt_ptr;
1406 xfs_agblock_t agbno;
1407 struct bt_stat_level *lptr;
1408
1409 level++;
1410
1411 if (level >= btree_curs->num_levels)
1412 return;
1413
1414 lptr = &btree_curs->level[level];
1415 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1416
1417 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1418 /*
1419 * this only happens once to initialize the
1420 * first path up the left side of the tree
1421 * where the agbno's are already set up
1422 */
1423 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, level);
1424 }
1425
1426 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1427 lptr->num_recs_pb + (lptr->modulo > 0)) {
1428 /*
1429 * write out current prev block, grab us a new block,
1430 * and set the rightsib pointer of current block
1431 */
1432 #ifdef XR_BLD_INO_TRACE
1433 fprintf(stderr, " rmap prop agbno %d ", lptr->prev_agbno);
1434 #endif
1435 if (lptr->prev_agbno != NULLAGBLOCK) {
1436 ASSERT(lptr->prev_buf_p != NULL);
1437 libxfs_writebuf(lptr->prev_buf_p, 0);
1438 }
1439 lptr->prev_agbno = lptr->agbno;
1440 lptr->prev_buf_p = lptr->buf_p;
1441 agbno = get_next_blockaddr(agno, level, btree_curs);
1442
1443 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1444
1445 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1446 XFS_AGB_TO_DADDR(mp, agno, agbno),
1447 XFS_FSB_TO_BB(mp, 1));
1448 lptr->agbno = agbno;
1449
1450 if (lptr->modulo)
1451 lptr->modulo--;
1452
1453 /*
1454 * initialize block header
1455 */
1456 lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
1457 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1458 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1459 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1460 level, 0, agno, 0);
1461
1462 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1463
1464 /*
1465 * propagate extent record for first extent in new block up
1466 */
1467 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, level);
1468 }
1469 /*
1470 * add rmap info to current block
1471 */
1472 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1473
1474 bt_key = XFS_RMAP_KEY_ADDR(bt_hdr,
1475 be16_to_cpu(bt_hdr->bb_numrecs));
1476 bt_ptr = XFS_RMAP_PTR_ADDR(bt_hdr,
1477 be16_to_cpu(bt_hdr->bb_numrecs),
1478 mp->m_rmap_mxr[1]);
1479
1480 bt_key->rm_startblock = cpu_to_be32(rm_rec->rm_startblock);
1481 bt_key->rm_owner = cpu_to_be64(rm_rec->rm_owner);
1482 bt_key->rm_offset = cpu_to_be64(rm_rec->rm_offset);
1483
1484 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1485 }
1486
1487 static void
1488 prop_rmap_highkey(
1489 struct xfs_mount *mp,
1490 xfs_agnumber_t agno,
1491 struct bt_status *btree_curs,
1492 struct xfs_rmap_irec *rm_highkey)
1493 {
1494 struct xfs_btree_block *bt_hdr;
1495 struct xfs_rmap_key *bt_key;
1496 struct bt_stat_level *lptr;
1497 struct xfs_rmap_irec key = {0};
1498 struct xfs_rmap_irec high_key;
1499 int level;
1500 int i;
1501 int numrecs;
1502
1503 high_key = *rm_highkey;
1504 for (level = 1; level < btree_curs->num_levels; level++) {
1505 lptr = &btree_curs->level[level];
1506 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1507 numrecs = be16_to_cpu(bt_hdr->bb_numrecs);
1508 bt_key = XFS_RMAP_HIGH_KEY_ADDR(bt_hdr, numrecs);
1509
1510 bt_key->rm_startblock = cpu_to_be32(high_key.rm_startblock);
1511 bt_key->rm_owner = cpu_to_be64(high_key.rm_owner);
1512 bt_key->rm_offset = cpu_to_be64(
1513 libxfs_rmap_irec_offset_pack(&high_key));
1514
1515 for (i = 1; i < numrecs - 1; i++) {
1516 bt_key = XFS_RMAP_HIGH_KEY_ADDR(bt_hdr, i);
1517 key.rm_startblock = be32_to_cpu(bt_key->rm_startblock);
1518 key.rm_owner = be64_to_cpu(bt_key->rm_owner);
1519 key.rm_offset = be64_to_cpu(bt_key->rm_offset);
1520 if (rmap_diffkeys(&key, &high_key) > 0)
1521 high_key = key;
1522 }
1523 }
1524 }
1525
1526 /*
1527 * rebuilds a rmap btree given a cursor.
1528 */
1529 static void
1530 build_rmap_tree(
1531 struct xfs_mount *mp,
1532 xfs_agnumber_t agno,
1533 struct bt_status *btree_curs)
1534 {
1535 xfs_agnumber_t i;
1536 xfs_agblock_t j;
1537 xfs_agblock_t agbno;
1538 struct xfs_btree_block *bt_hdr;
1539 struct xfs_rmap_irec *rm_rec;
1540 struct xfs_slab_cursor *rmap_cur;
1541 struct xfs_rmap_rec *bt_rec;
1542 struct xfs_rmap_irec highest_key = {0};
1543 struct xfs_rmap_irec hi_key = {0};
1544 struct bt_stat_level *lptr;
1545 int numrecs;
1546 int level = btree_curs->num_levels;
1547 int error;
1548
1549 highest_key.rm_flags = 0;
1550 for (i = 0; i < level; i++) {
1551 lptr = &btree_curs->level[i];
1552
1553 agbno = get_next_blockaddr(agno, i, btree_curs);
1554 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1555 XFS_AGB_TO_DADDR(mp, agno, agbno),
1556 XFS_FSB_TO_BB(mp, 1));
1557
1558 if (i == btree_curs->num_levels - 1)
1559 btree_curs->root = agbno;
1560
1561 lptr->agbno = agbno;
1562 lptr->prev_agbno = NULLAGBLOCK;
1563 lptr->prev_buf_p = NULL;
1564 /*
1565 * initialize block header
1566 */
1567
1568 lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
1569 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1570 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1571 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1572 i, 0, agno, 0);
1573 }
1574
1575 /*
1576 * run along leaf, setting up records. as we have to switch
1577 * blocks, call the prop_rmap_cursor routine to set up the new
1578 * pointers for the parent. that can recurse up to the root
1579 * if required. set the sibling pointers for leaf level here.
1580 */
1581 error = rmap_init_cursor(agno, &rmap_cur);
1582 if (error)
1583 do_error(
1584 _("Insufficient memory to construct reverse-map cursor."));
1585 rm_rec = pop_slab_cursor(rmap_cur);
1586 lptr = &btree_curs->level[0];
1587
1588 for (i = 0; i < lptr->num_blocks; i++) {
1589 numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
1590 ASSERT(rm_rec != NULL || numrecs == 0);
1591
1592 /*
1593 * block initialization, lay in block header
1594 */
1595 lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
1596 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1597 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1598 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
1599 0, 0, agno, 0);
1600
1601 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1602 bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
1603
1604 if (lptr->modulo > 0)
1605 lptr->modulo--;
1606
1607 if (lptr->num_recs_pb > 0) {
1608 ASSERT(rm_rec != NULL);
1609 prop_rmap_cursor(mp, agno, btree_curs, rm_rec, 0);
1610 }
1611
1612 bt_rec = (struct xfs_rmap_rec *)
1613 ((char *)bt_hdr + XFS_RMAP_BLOCK_LEN);
1614 highest_key.rm_startblock = 0;
1615 highest_key.rm_owner = 0;
1616 highest_key.rm_offset = 0;
1617 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1618 ASSERT(rm_rec != NULL);
1619 bt_rec[j].rm_startblock =
1620 cpu_to_be32(rm_rec->rm_startblock);
1621 bt_rec[j].rm_blockcount =
1622 cpu_to_be32(rm_rec->rm_blockcount);
1623 bt_rec[j].rm_owner = cpu_to_be64(rm_rec->rm_owner);
1624 bt_rec[j].rm_offset = cpu_to_be64(
1625 libxfs_rmap_irec_offset_pack(rm_rec));
1626 rmap_high_key_from_rec(rm_rec, &hi_key);
1627 if (rmap_diffkeys(&hi_key, &highest_key) > 0)
1628 highest_key = hi_key;
1629
1630 rm_rec = pop_slab_cursor(rmap_cur);
1631 }
1632
1633 /* Now go set the parent key */
1634 prop_rmap_highkey(mp, agno, btree_curs, &highest_key);
1635
1636 if (rm_rec != NULL) {
1637 /*
1638 * get next leaf level block
1639 */
1640 if (lptr->prev_buf_p != NULL) {
1641 #ifdef XR_BLD_RL_TRACE
1642 fprintf(stderr, "writing rmapbt agbno %u\n",
1643 lptr->prev_agbno);
1644 #endif
1645 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1646 libxfs_writebuf(lptr->prev_buf_p, 0);
1647 }
1648 lptr->prev_buf_p = lptr->buf_p;
1649 lptr->prev_agbno = lptr->agbno;
1650 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1651 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1652
1653 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1654 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1655 XFS_FSB_TO_BB(mp, 1));
1656 }
1657 }
1658 free_slab_cursor(&rmap_cur);
1659 }
1660
1661 /* rebuild the refcount tree */
1662
1663 /*
1664 * we don't have to worry here about how chewing up free extents
1665 * may perturb things because reflink tree building happens before
1666 * freespace tree building.
1667 */
1668 static void
1669 init_refc_cursor(
1670 struct xfs_mount *mp,
1671 xfs_agnumber_t agno,
1672 struct bt_status *btree_curs)
1673 {
1674 size_t num_recs;
1675 int level;
1676 struct bt_stat_level *lptr;
1677 struct bt_stat_level *p_lptr;
1678 xfs_extlen_t blocks_allocated;
1679
1680 if (!xfs_sb_version_hasreflink(&mp->m_sb)) {
1681 memset(btree_curs, 0, sizeof(struct bt_status));
1682 return;
1683 }
1684
1685 lptr = &btree_curs->level[0];
1686 btree_curs->init = 1;
1687 btree_curs->owner = XFS_RMAP_OWN_REFC;
1688
1689 /*
1690 * build up statistics
1691 */
1692 num_recs = refcount_record_count(mp, agno);
1693 if (num_recs == 0) {
1694 /*
1695 * easy corner-case -- no refcount records
1696 */
1697 lptr->num_blocks = 1;
1698 lptr->modulo = 0;
1699 lptr->num_recs_pb = 0;
1700 lptr->num_recs_tot = 0;
1701
1702 btree_curs->num_levels = 1;
1703 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
1704
1705 setup_cursor(mp, agno, btree_curs);
1706
1707 return;
1708 }
1709
1710 blocks_allocated = lptr->num_blocks = howmany(num_recs,
1711 mp->m_refc_mxr[0]);
1712
1713 lptr->modulo = num_recs % lptr->num_blocks;
1714 lptr->num_recs_pb = num_recs / lptr->num_blocks;
1715 lptr->num_recs_tot = num_recs;
1716 level = 1;
1717
1718 if (lptr->num_blocks > 1) {
1719 for (; btree_curs->level[level-1].num_blocks > 1
1720 && level < XFS_BTREE_MAXLEVELS;
1721 level++) {
1722 lptr = &btree_curs->level[level];
1723 p_lptr = &btree_curs->level[level - 1];
1724 lptr->num_blocks = howmany(p_lptr->num_blocks,
1725 mp->m_refc_mxr[1]);
1726 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
1727 lptr->num_recs_pb = p_lptr->num_blocks
1728 / lptr->num_blocks;
1729 lptr->num_recs_tot = p_lptr->num_blocks;
1730
1731 blocks_allocated += lptr->num_blocks;
1732 }
1733 }
1734 ASSERT(lptr->num_blocks == 1);
1735 btree_curs->num_levels = level;
1736
1737 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
1738 = blocks_allocated;
1739
1740 setup_cursor(mp, agno, btree_curs);
1741 }
1742
1743 static void
1744 prop_refc_cursor(
1745 struct xfs_mount *mp,
1746 xfs_agnumber_t agno,
1747 struct bt_status *btree_curs,
1748 xfs_agblock_t startbno,
1749 int level)
1750 {
1751 struct xfs_btree_block *bt_hdr;
1752 struct xfs_refcount_key *bt_key;
1753 xfs_refcount_ptr_t *bt_ptr;
1754 xfs_agblock_t agbno;
1755 struct bt_stat_level *lptr;
1756
1757 level++;
1758
1759 if (level >= btree_curs->num_levels)
1760 return;
1761
1762 lptr = &btree_curs->level[level];
1763 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1764
1765 if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
1766 /*
1767 * this only happens once to initialize the
1768 * first path up the left side of the tree
1769 * where the agbno's are already set up
1770 */
1771 prop_refc_cursor(mp, agno, btree_curs, startbno, level);
1772 }
1773
1774 if (be16_to_cpu(bt_hdr->bb_numrecs) ==
1775 lptr->num_recs_pb + (lptr->modulo > 0)) {
1776 /*
1777 * write out current prev block, grab us a new block,
1778 * and set the rightsib pointer of current block
1779 */
1780 #ifdef XR_BLD_INO_TRACE
1781 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1782 #endif
1783 if (lptr->prev_agbno != NULLAGBLOCK) {
1784 ASSERT(lptr->prev_buf_p != NULL);
1785 libxfs_writebuf(lptr->prev_buf_p, 0);
1786 }
1787 lptr->prev_agbno = lptr->agbno;
1788 lptr->prev_buf_p = lptr->buf_p;
1789 agbno = get_next_blockaddr(agno, level, btree_curs);
1790
1791 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
1792
1793 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1794 XFS_AGB_TO_DADDR(mp, agno, agbno),
1795 XFS_FSB_TO_BB(mp, 1));
1796 lptr->agbno = agbno;
1797
1798 if (lptr->modulo)
1799 lptr->modulo--;
1800
1801 /*
1802 * initialize block header
1803 */
1804 lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
1805 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1806 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1807 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1808 level, 0, agno, 0);
1809
1810 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1811
1812 /*
1813 * propagate extent record for first extent in new block up
1814 */
1815 prop_refc_cursor(mp, agno, btree_curs, startbno, level);
1816 }
1817 /*
1818 * add inode info to current block
1819 */
1820 be16_add_cpu(&bt_hdr->bb_numrecs, 1);
1821
1822 bt_key = XFS_REFCOUNT_KEY_ADDR(bt_hdr,
1823 be16_to_cpu(bt_hdr->bb_numrecs));
1824 bt_ptr = XFS_REFCOUNT_PTR_ADDR(bt_hdr,
1825 be16_to_cpu(bt_hdr->bb_numrecs),
1826 mp->m_refc_mxr[1]);
1827
1828 bt_key->rc_startblock = cpu_to_be32(startbno);
1829 *bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
1830 }
1831
1832 /*
1833 * rebuilds a refcount btree given a cursor.
1834 */
1835 static void
1836 build_refcount_tree(
1837 struct xfs_mount *mp,
1838 xfs_agnumber_t agno,
1839 struct bt_status *btree_curs)
1840 {
1841 xfs_agnumber_t i;
1842 xfs_agblock_t j;
1843 xfs_agblock_t agbno;
1844 struct xfs_btree_block *bt_hdr;
1845 struct xfs_refcount_irec *refc_rec;
1846 struct xfs_slab_cursor *refc_cur;
1847 struct xfs_refcount_rec *bt_rec;
1848 struct bt_stat_level *lptr;
1849 int numrecs;
1850 int level = btree_curs->num_levels;
1851 int error;
1852
1853 for (i = 0; i < level; i++) {
1854 lptr = &btree_curs->level[i];
1855
1856 agbno = get_next_blockaddr(agno, i, btree_curs);
1857 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1858 XFS_AGB_TO_DADDR(mp, agno, agbno),
1859 XFS_FSB_TO_BB(mp, 1));
1860
1861 if (i == btree_curs->num_levels - 1)
1862 btree_curs->root = agbno;
1863
1864 lptr->agbno = agbno;
1865 lptr->prev_agbno = NULLAGBLOCK;
1866 lptr->prev_buf_p = NULL;
1867 /*
1868 * initialize block header
1869 */
1870
1871 lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
1872 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1873 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1874 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1875 i, 0, agno, 0);
1876 }
1877
1878 /*
1879 * run along leaf, setting up records. as we have to switch
1880 * blocks, call the prop_refc_cursor routine to set up the new
1881 * pointers for the parent. that can recurse up to the root
1882 * if required. set the sibling pointers for leaf level here.
1883 */
1884 error = init_refcount_cursor(agno, &refc_cur);
1885 if (error)
1886 do_error(
1887 _("Insufficient memory to construct refcount cursor."));
1888 refc_rec = pop_slab_cursor(refc_cur);
1889 lptr = &btree_curs->level[0];
1890
1891 for (i = 0; i < lptr->num_blocks; i++) {
1892 numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
1893 ASSERT(refc_rec != NULL || numrecs == 0);
1894
1895 /*
1896 * block initialization, lay in block header
1897 */
1898 lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
1899 bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
1900 memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
1901 libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
1902 0, 0, agno, 0);
1903
1904 bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
1905 bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
1906
1907 if (lptr->modulo > 0)
1908 lptr->modulo--;
1909
1910 if (lptr->num_recs_pb > 0)
1911 prop_refc_cursor(mp, agno, btree_curs,
1912 refc_rec->rc_startblock, 0);
1913
1914 bt_rec = (struct xfs_refcount_rec *)
1915 ((char *)bt_hdr + XFS_REFCOUNT_BLOCK_LEN);
1916 for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
1917 ASSERT(refc_rec != NULL);
1918 bt_rec[j].rc_startblock =
1919 cpu_to_be32(refc_rec->rc_startblock);
1920 bt_rec[j].rc_blockcount =
1921 cpu_to_be32(refc_rec->rc_blockcount);
1922 bt_rec[j].rc_refcount = cpu_to_be32(refc_rec->rc_refcount);
1923
1924 refc_rec = pop_slab_cursor(refc_cur);
1925 }
1926
1927 if (refc_rec != NULL) {
1928 /*
1929 * get next leaf level block
1930 */
1931 if (lptr->prev_buf_p != NULL) {
1932 #ifdef XR_BLD_RL_TRACE
1933 fprintf(stderr, "writing refcntbt agbno %u\n",
1934 lptr->prev_agbno);
1935 #endif
1936 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1937 libxfs_writebuf(lptr->prev_buf_p, 0);
1938 }
1939 lptr->prev_buf_p = lptr->buf_p;
1940 lptr->prev_agbno = lptr->agbno;
1941 lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
1942 bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
1943
1944 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1945 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1946 XFS_FSB_TO_BB(mp, 1));
1947 }
1948 }
1949 free_slab_cursor(&refc_cur);
1950 }
1951
1952 /*
1953 * build both the agf and the agfl for an agno given both
1954 * btree cursors.
1955 *
1956 * XXX: yet more common code that can be shared with mkfs/growfs.
1957 */
1958 static void
1959 build_agf_agfl(
1960 struct xfs_mount *mp,
1961 xfs_agnumber_t agno,
1962 struct bt_status *bno_bt,
1963 struct bt_status *bcnt_bt,
1964 xfs_extlen_t freeblks, /* # free blocks in tree */
1965 int lostblocks, /* # blocks that will be lost */
1966 struct bt_status *rmap_bt,
1967 struct bt_status *refcnt_bt,
1968 struct xfs_slab *lost_fsb)
1969 {
1970 struct extent_tree_node *ext_ptr;
1971 struct xfs_buf *agf_buf, *agfl_buf;
1972 int i;
1973 struct xfs_agfl *agfl;
1974 struct xfs_agf *agf;
1975 xfs_fsblock_t fsb;
1976 __be32 *freelist;
1977 int error;
1978
1979 agf_buf = libxfs_getbuf(mp->m_dev,
1980 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1981 mp->m_sb.sb_sectsize/BBSIZE);
1982 agf_buf->b_ops = &xfs_agf_buf_ops;
1983 agf = XFS_BUF_TO_AGF(agf_buf);
1984 memset(agf, 0, mp->m_sb.sb_sectsize);
1985
1986 #ifdef XR_BLD_FREE_TRACE
1987 fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n",
1988 agf, agf_buf->b_addr);
1989 #endif
1990
1991 /*
1992 * set up fixed part of agf
1993 */
1994 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
1995 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
1996 agf->agf_seqno = cpu_to_be32(agno);
1997
1998 if (agno < mp->m_sb.sb_agcount - 1)
1999 agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
2000 else
2001 agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
2002 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
2003
2004 agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root);
2005 agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels);
2006 agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root);
2007 agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels);
2008 agf->agf_roots[XFS_BTNUM_RMAP] = cpu_to_be32(rmap_bt->root);
2009 agf->agf_levels[XFS_BTNUM_RMAP] = cpu_to_be32(rmap_bt->num_levels);
2010 agf->agf_freeblks = cpu_to_be32(freeblks);
2011 agf->agf_rmap_blocks = cpu_to_be32(rmap_bt->num_tot_blocks -
2012 rmap_bt->num_free_blocks);
2013 agf->agf_refcount_root = cpu_to_be32(refcnt_bt->root);
2014 agf->agf_refcount_level = cpu_to_be32(refcnt_bt->num_levels);
2015 agf->agf_refcount_blocks = cpu_to_be32(refcnt_bt->num_tot_blocks -
2016 refcnt_bt->num_free_blocks);
2017
2018 /*
2019 * Count and record the number of btree blocks consumed if required.
2020 */
2021 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
2022 unsigned int blks;
2023 /*
2024 * Don't count the root blocks as they are already
2025 * accounted for.
2026 */
2027 blks = (bno_bt->num_tot_blocks - bno_bt->num_free_blocks) +
2028 (bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) -
2029 2;
2030 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2031 blks += rmap_bt->num_tot_blocks - rmap_bt->num_free_blocks - 1;
2032 agf->agf_btreeblks = cpu_to_be32(blks);
2033 #ifdef XR_BLD_FREE_TRACE
2034 fprintf(stderr, "agf->agf_btreeblks = %u\n",
2035 be32_to_cpu(agf->agf_btreeblks));
2036 #endif
2037 }
2038
2039 #ifdef XR_BLD_FREE_TRACE
2040 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
2041 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
2042 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
2043 XFS_BTNUM_BNO,
2044 XFS_BTNUM_CNT);
2045 #endif
2046
2047 if (xfs_sb_version_hascrc(&mp->m_sb))
2048 platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
2049
2050 /* initialise the AGFL, then fill it if there are blocks left over. */
2051 agfl_buf = libxfs_getbuf(mp->m_dev,
2052 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
2053 mp->m_sb.sb_sectsize/BBSIZE);
2054 agfl_buf->b_ops = &xfs_agfl_buf_ops;
2055 agfl = XFS_BUF_TO_AGFL(agfl_buf);
2056
2057 /* setting to 0xff results in initialisation to NULLAGBLOCK */
2058 memset(agfl, 0xff, mp->m_sb.sb_sectsize);
2059 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2060 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
2061 agfl->agfl_seqno = cpu_to_be32(agno);
2062 platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
2063 for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
2064 agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
2065 }
2066 freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);
2067
2068 /*
2069 * do we have left-over blocks in the btree cursors that should
2070 * be used to fill the AGFL?
2071 */
2072 if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) {
2073 /*
2074 * yes, now grab as many blocks as we can
2075 */
2076 i = 0;
2077 while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
2078 freelist[i] = cpu_to_be32(
2079 get_next_blockaddr(agno, 0, bno_bt));
2080 i++;
2081 }
2082
2083 while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
2084 freelist[i] = cpu_to_be32(
2085 get_next_blockaddr(agno, 0, bcnt_bt));
2086 i++;
2087 }
2088 /*
2089 * now throw the rest of the blocks away and complain
2090 */
2091 while (bno_bt->num_free_blocks > 0) {
2092 fsb = XFS_AGB_TO_FSB(mp, agno,
2093 get_next_blockaddr(agno, 0, bno_bt));
2094 error = slab_add(lost_fsb, &fsb);
2095 if (error)
2096 do_error(
2097 _("Insufficient memory saving lost blocks.\n"));
2098 }
2099 while (bcnt_bt->num_free_blocks > 0) {
2100 fsb = XFS_AGB_TO_FSB(mp, agno,
2101 get_next_blockaddr(agno, 0, bcnt_bt));
2102 error = slab_add(lost_fsb, &fsb);
2103 if (error)
2104 do_error(
2105 _("Insufficient memory saving lost blocks.\n"));
2106 }
2107
2108 agf->agf_flfirst = 0;
2109 agf->agf_fllast = cpu_to_be32(i - 1);
2110 agf->agf_flcount = cpu_to_be32(i);
2111 rmap_store_agflcount(mp, agno, i);
2112
2113 #ifdef XR_BLD_FREE_TRACE
2114 fprintf(stderr, "writing agfl for ag %u\n", agno);
2115 #endif
2116
2117 } else {
2118 agf->agf_flfirst = 0;
2119 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
2120 agf->agf_flcount = 0;
2121 }
2122
2123 libxfs_writebuf(agfl_buf, 0);
2124
2125 ext_ptr = findbiggest_bcnt_extent(agno);
2126 agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
2127 ext_ptr->ex_blockcount : 0);
2128
2129 ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
2130 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
2131 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
2132 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]));
2133 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
2134 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
2135
2136 libxfs_writebuf(agf_buf, 0);
2137
2138 /*
2139 * now fix up the free list appropriately
2140 */
2141 fix_freelist(mp, agno, true);
2142
2143 #ifdef XR_BLD_FREE_TRACE
2144 fprintf(stderr, "wrote agf for ag %u\n", agno);
2145 #endif
2146 }
2147
2148 /*
2149 * update the superblock counters, sync the sb version numbers and
2150 * feature bits to the filesystem, and sync up the on-disk superblock
2151 * to match the incore superblock.
2152 */
2153 static void
2154 sync_sb(xfs_mount_t *mp)
2155 {
2156 xfs_buf_t *bp;
2157
2158 bp = libxfs_getsb(mp, 0);
2159 if (!bp)
2160 do_error(_("couldn't get superblock\n"));
2161
2162 mp->m_sb.sb_icount = sb_icount;
2163 mp->m_sb.sb_ifree = sb_ifree;
2164 mp->m_sb.sb_fdblocks = sb_fdblocks;
2165 mp->m_sb.sb_frextents = sb_frextents;
2166
2167 update_sb_version(mp);
2168
2169 libxfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
2170 libxfs_writebuf(bp, 0);
2171 }
2172
2173 /*
2174 * make sure the root and realtime inodes show up allocated
2175 * even if they've been freed. they get reinitialized in phase6.
2176 */
2177 static void
2178 keep_fsinos(xfs_mount_t *mp)
2179 {
2180 ino_tree_node_t *irec;
2181 int i;
2182
2183 irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
2184 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
2185
2186 for (i = 0; i < 3; i++)
2187 set_inode_used(irec, i);
2188 }
2189
2190 static void
2191 phase5_func(
2192 xfs_mount_t *mp,
2193 xfs_agnumber_t agno,
2194 struct xfs_slab *lost_fsb)
2195 {
2196 uint64_t num_inos;
2197 uint64_t num_free_inos;
2198 uint64_t finobt_num_inos;
2199 uint64_t finobt_num_free_inos;
2200 bt_status_t bno_btree_curs;
2201 bt_status_t bcnt_btree_curs;
2202 bt_status_t ino_btree_curs;
2203 bt_status_t fino_btree_curs;
2204 bt_status_t rmap_btree_curs;
2205 bt_status_t refcnt_btree_curs;
2206 int extra_blocks = 0;
2207 uint num_freeblocks;
2208 xfs_extlen_t freeblks1;
2209 #ifdef DEBUG
2210 xfs_extlen_t freeblks2;
2211 #endif
2212 xfs_agblock_t num_extents;
2213 struct agi_stat agi_stat = {0,};
2214 int error;
2215
2216 if (verbose)
2217 do_log(_(" - agno = %d\n"), agno);
2218
2219 {
2220 /*
2221 * build up incore bno and bcnt extent btrees
2222 */
2223 num_extents = mk_incore_fstree(mp, agno);
2224
2225 #ifdef XR_BLD_FREE_TRACE
2226 fprintf(stderr, "# of bno extents is %d\n",
2227 count_bno_extents(agno));
2228 #endif
2229
2230 if (num_extents == 0) {
2231 /*
2232 * XXX - what we probably should do here is pick an
2233 * inode for a regular file in the allocation group
2234 * that has space allocated and shoot it by traversing
2235 * the bmap list and putting all its extents on the
2236 * incore freespace trees, clearing the inode,
2237 * and clearing the in-use bit in the incore inode
2238 * tree. Then try mk_incore_fstree() again.
2239 */
2240 do_error(_("unable to rebuild AG %u. "
2241 "Not enough free space in on-disk AG.\n"),
2242 agno);
2243 }
2244
2245 /*
2246 * ok, now set up the btree cursors for the
2247 * on-disk btrees (includs pre-allocating all
2248 * required blocks for the trees themselves)
2249 */
2250 init_ino_cursor(mp, agno, &ino_btree_curs, &num_inos,
2251 &num_free_inos, 0);
2252
2253 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2254 init_ino_cursor(mp, agno, &fino_btree_curs,
2255 &finobt_num_inos, &finobt_num_free_inos,
2256 1);
2257
2258 sb_icount_ag[agno] += num_inos;
2259 sb_ifree_ag[agno] += num_free_inos;
2260
2261 /*
2262 * Set up the btree cursors for the on-disk rmap btrees,
2263 * which includes pre-allocating all required blocks.
2264 */
2265 init_rmapbt_cursor(mp, agno, &rmap_btree_curs);
2266
2267 /*
2268 * Set up the btree cursors for the on-disk refcount btrees,
2269 * which includes pre-allocating all required blocks.
2270 */
2271 init_refc_cursor(mp, agno, &refcnt_btree_curs);
2272
2273 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
2274 /*
2275 * lose two blocks per AG -- the space tree roots
2276 * are counted as allocated since the space trees
2277 * always have roots
2278 */
2279 sb_fdblocks_ag[agno] += num_freeblocks - 2;
2280
2281 if (num_extents == 0) {
2282 /*
2283 * XXX - what we probably should do here is pick an
2284 * inode for a regular file in the allocation group
2285 * that has space allocated and shoot it by traversing
2286 * the bmap list and putting all its extents on the
2287 * incore freespace trees, clearing the inode,
2288 * and clearing the in-use bit in the incore inode
2289 * tree. Then try mk_incore_fstree() again.
2290 */
2291 do_error(
2292 _("unable to rebuild AG %u. No free space.\n"), agno);
2293 }
2294
2295 #ifdef XR_BLD_FREE_TRACE
2296 fprintf(stderr, "# of bno extents is %d\n", num_extents);
2297 #endif
2298
2299 /*
2300 * track blocks that we might really lose
2301 */
2302 extra_blocks = calculate_freespace_cursor(mp, agno,
2303 &num_extents, &bno_btree_curs);
2304
2305 /*
2306 * freespace btrees live in the "free space" but
2307 * the filesystem treats AGFL blocks as allocated
2308 * since they aren't described by the freespace trees
2309 */
2310
2311 /*
2312 * see if we can fit all the extra blocks into the AGFL
2313 */
2314 extra_blocks = (extra_blocks - XFS_AGFL_SIZE(mp) > 0)
2315 ? extra_blocks - XFS_AGFL_SIZE(mp)
2316 : 0;
2317
2318 if (extra_blocks > 0)
2319 sb_fdblocks_ag[agno] -= extra_blocks;
2320
2321 bcnt_btree_curs = bno_btree_curs;
2322
2323 bno_btree_curs.owner = XFS_RMAP_OWN_AG;
2324 bcnt_btree_curs.owner = XFS_RMAP_OWN_AG;
2325 setup_cursor(mp, agno, &bno_btree_curs);
2326 setup_cursor(mp, agno, &bcnt_btree_curs);
2327
2328 #ifdef XR_BLD_FREE_TRACE
2329 fprintf(stderr, "# of bno extents is %d\n",
2330 count_bno_extents(agno));
2331 fprintf(stderr, "# of bcnt extents is %d\n",
2332 count_bcnt_extents(agno));
2333 #endif
2334
2335 /*
2336 * now rebuild the freespace trees
2337 */
2338 freeblks1 = build_freespace_tree(mp, agno,
2339 &bno_btree_curs, XFS_BTNUM_BNO);
2340 #ifdef XR_BLD_FREE_TRACE
2341 fprintf(stderr, "# of free blocks == %d\n", freeblks1);
2342 #endif
2343 write_cursor(&bno_btree_curs);
2344
2345 #ifdef DEBUG
2346 freeblks2 = build_freespace_tree(mp, agno,
2347 &bcnt_btree_curs, XFS_BTNUM_CNT);
2348 #else
2349 (void) build_freespace_tree(mp, agno,
2350 &bcnt_btree_curs, XFS_BTNUM_CNT);
2351 #endif
2352 write_cursor(&bcnt_btree_curs);
2353
2354 ASSERT(freeblks1 == freeblks2);
2355
2356 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
2357 build_rmap_tree(mp, agno, &rmap_btree_curs);
2358 write_cursor(&rmap_btree_curs);
2359 sb_fdblocks_ag[agno] += (rmap_btree_curs.num_tot_blocks -
2360 rmap_btree_curs.num_free_blocks) - 1;
2361 }
2362
2363 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2364 build_refcount_tree(mp, agno, &refcnt_btree_curs);
2365 write_cursor(&refcnt_btree_curs);
2366 }
2367
2368 /*
2369 * set up agf and agfl
2370 */
2371 build_agf_agfl(mp, agno, &bno_btree_curs,
2372 &bcnt_btree_curs, freeblks1, extra_blocks,
2373 &rmap_btree_curs, &refcnt_btree_curs, lost_fsb);
2374 /*
2375 * build inode allocation tree.
2376 */
2377 build_ino_tree(mp, agno, &ino_btree_curs, XFS_BTNUM_INO,
2378 &agi_stat);
2379 write_cursor(&ino_btree_curs);
2380
2381 /*
2382 * build free inode tree
2383 */
2384 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2385 build_ino_tree(mp, agno, &fino_btree_curs,
2386 XFS_BTNUM_FINO, NULL);
2387 write_cursor(&fino_btree_curs);
2388 }
2389
2390 /* build the agi */
2391 build_agi(mp, agno, &ino_btree_curs, &fino_btree_curs,
2392 &agi_stat);
2393
2394 /*
2395 * tear down cursors
2396 */
2397 finish_cursor(&bno_btree_curs);
2398 finish_cursor(&ino_btree_curs);
2399 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2400 finish_cursor(&rmap_btree_curs);
2401 if (xfs_sb_version_hasreflink(&mp->m_sb))
2402 finish_cursor(&refcnt_btree_curs);
2403 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2404 finish_cursor(&fino_btree_curs);
2405 finish_cursor(&bcnt_btree_curs);
2406
2407 /*
2408 * Put the per-AG btree rmap data into the rmapbt
2409 */
2410 error = rmap_store_ag_btree_rec(mp, agno);
2411 if (error)
2412 do_error(
2413 _("unable to add AG %u reverse-mapping data to btree.\n"), agno);
2414
2415 /*
2416 * release the incore per-AG bno/bcnt trees so
2417 * the extent nodes can be recycled
2418 */
2419 release_agbno_extent_tree(agno);
2420 release_agbcnt_extent_tree(agno);
2421 }
2422 PROG_RPT_INC(prog_rpt_done[agno], 1);
2423 }
2424
2425 /* Inject lost blocks back into the filesystem. */
2426 static int
2427 inject_lost_blocks(
2428 struct xfs_mount *mp,
2429 struct xfs_slab *lost_fsbs)
2430 {
2431 struct xfs_trans *tp = NULL;
2432 struct xfs_slab_cursor *cur = NULL;
2433 xfs_fsblock_t *fsb;
2434 struct xfs_trans_res tres = {0};
2435 struct xfs_owner_info oinfo;
2436 int error;
2437
2438 libxfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
2439 error = init_slab_cursor(lost_fsbs, NULL, &cur);
2440 if (error)
2441 return error;
2442
2443 while ((fsb = pop_slab_cursor(cur)) != NULL) {
2444 error = -libxfs_trans_alloc(mp, &tres, 16, 0, 0, &tp);
2445 if (error)
2446 goto out_cancel;
2447
2448 error = -libxfs_free_extent(tp, *fsb, 1, &oinfo,
2449 XFS_AG_RESV_NONE);
2450 if (error)
2451 goto out_cancel;
2452
2453 error = -libxfs_trans_commit(tp);
2454 if (error)
2455 goto out_cancel;
2456 tp = NULL;
2457 }
2458
2459 out_cancel:
2460 if (tp)
2461 libxfs_trans_cancel(tp);
2462 free_slab_cursor(&cur);
2463 return error;
2464 }
2465
2466 void
2467 phase5(xfs_mount_t *mp)
2468 {
2469 struct xfs_slab *lost_fsb;
2470 xfs_agnumber_t agno;
2471 int error;
2472
2473 do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
2474 set_progress_msg(PROG_FMT_REBUILD_AG, (uint64_t)glob_agcount);
2475
2476 #ifdef XR_BLD_FREE_TRACE
2477 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
2478 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
2479 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
2480 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
2481 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
2482 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
2483 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
2484 XR_INOBT_BLOCK_MAXRECS(mp, 0));
2485 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
2486 XR_INOBT_BLOCK_MAXRECS(mp, 1));
2487 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
2488 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
2489 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
2490 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
2491 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
2492 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
2493 #endif
2494 /*
2495 * make sure the root and realtime inodes show up allocated
2496 */
2497 keep_fsinos(mp);
2498
2499 /* allocate per ag counters */
2500 sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2501 if (sb_icount_ag == NULL)
2502 do_error(_("cannot alloc sb_icount_ag buffers\n"));
2503
2504 sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2505 if (sb_ifree_ag == NULL)
2506 do_error(_("cannot alloc sb_ifree_ag buffers\n"));
2507
2508 sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
2509 if (sb_fdblocks_ag == NULL)
2510 do_error(_("cannot alloc sb_fdblocks_ag buffers\n"));
2511
2512 error = init_slab(&lost_fsb, sizeof(xfs_fsblock_t));
2513 if (error)
2514 do_error(_("cannot alloc lost block slab\n"));
2515
2516 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++)
2517 phase5_func(mp, agno, lost_fsb);
2518
2519 print_final_rpt();
2520
2521 /* aggregate per ag counters */
2522 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2523 sb_icount += sb_icount_ag[agno];
2524 sb_ifree += sb_ifree_ag[agno];
2525 sb_fdblocks += sb_fdblocks_ag[agno];
2526 }
2527 free(sb_icount_ag);
2528 free(sb_ifree_ag);
2529 free(sb_fdblocks_ag);
2530
2531 if (mp->m_sb.sb_rblocks) {
2532 do_log(
2533 _(" - generate realtime summary info and bitmap...\n"));
2534 rtinit(mp);
2535 generate_rtinfo(mp, btmcompute, sumcompute);
2536 }
2537
2538 do_log(_(" - reset superblock...\n"));
2539
2540 /*
2541 * sync superblock counter and set version bits correctly
2542 */
2543 sync_sb(mp);
2544
2545 error = inject_lost_blocks(mp, lost_fsb);
2546 if (error)
2547 do_error(_("Unable to reinsert lost blocks into filesystem.\n"));
2548 free_slab(&lost_fsb);
2549
2550 bad_ino_btree = 0;
2551
2552 }