]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
Undoes mod: xfs-cmds:slinx:120772a
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 /*
2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <libxfs.h>
34 #include "avl.h"
35 #include "globals.h"
36 #include "agheader.h"
37 #include "incore.h"
38 #include "protos.h"
39 #include "err_protos.h"
40 #include "dinode.h"
41 #include "rt.h"
42 #include "versions.h"
43
44 /*
45 * we maintain the current slice (path from root to leaf)
46 * of the btree incore. when we need a new block, we ask
47 * the block allocator for the address of a block on that
48 * level, map the block in, and set up the appropriate
49 * pointers (child, silbing, etc.) and keys that should
50 * point to the new block.
51 */
52 typedef struct bt_stat_level {
53 /*
54 * set in setup_cursor routine and maintained in the tree-building
55 * routines
56 */
57 xfs_buf_t *buf_p; /* 2 buffer pointers to ... */
58 xfs_buf_t *prev_buf_p;
59 xfs_agblock_t agbno; /* current block being filled */
60 xfs_agblock_t prev_agbno; /* previous block */
61 /*
62 * set in calculate/init cursor routines for each btree level
63 */
64 int num_recs_tot; /* # tree recs in level */
65 int num_blocks; /* # tree blocks in level */
66 int num_recs_pb; /* num_recs_tot / num_blocks */
67 int modulo; /* num_recs_tot % num_blocks */
68 } bt_stat_level_t;
69
70 typedef struct bt_status {
71 int init; /* cursor set up once? */
72 int num_levels; /* # of levels in btree */
73 xfs_extlen_t num_tot_blocks; /* # blocks alloc'ed for tree */
74 xfs_extlen_t num_free_blocks;/* # blocks currently unused */
75
76 xfs_agblock_t root; /* root block */
77 /*
78 * list of blocks to be used to set up this tree
79 * and pointer to the first unused block on the list
80 */
81 xfs_agblock_t *btree_blocks; /* block list */
82 xfs_agblock_t *free_btree_blocks; /* first unused block */
83 /*
84 * per-level status info
85 */
86 bt_stat_level_t level[XFS_BTREE_MAXLEVELS];
87 } bt_status_t;
88
89
90 int
91 mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
92 {
93 int in_extent;
94 int num_extents;
95 xfs_agblock_t extent_start;
96 xfs_extlen_t extent_len;
97 xfs_agblock_t agbno;
98 xfs_agblock_t ag_end;
99 uint free_blocks;
100 #ifdef XR_BLD_FREE_TRACE
101 int old_state;
102 int state = XR_E_BAD_STATE;
103 #endif
104
105 /*
106 * scan the bitmap for the ag looking for continuous
107 * extents of free blocks. At this point, we know
108 * that blocks in the bitmap are either set to an
109 * "in use" state or set to unknown (0) since the
110 * bmaps were bzero'ed in phase 4 and only blocks
111 * being used by inodes, inode bmaps, ag headers,
112 * and the files themselves were put into the bitmap.
113 *
114 */
115 ASSERT(agno < mp->m_sb.sb_agcount);
116
117 extent_start = extent_len = 0;
118 in_extent = 0;
119 num_extents = free_blocks = 0;
120
121 if (agno < mp->m_sb.sb_agcount - 1)
122 ag_end = mp->m_sb.sb_agblocks;
123 else
124 ag_end = mp->m_sb.sb_dblocks -
125 mp->m_sb.sb_agblocks * (mp->m_sb.sb_agcount - 1);
126
127 /*
128 * ok, now find the number of extents, keep track of the
129 * largest extent.
130 */
131 for (agbno = 0; agbno < ag_end; agbno++) {
132 #if 0
133 old_state = state;
134 state = get_agbno_state(mp, agno, agbno);
135 if (state != old_state) {
136 fprintf(stderr, "agbno %u - new state is %d\n",
137 agbno, state);
138 }
139 #endif
140 if (get_agbno_state(mp, agno, agbno) < XR_E_INUSE) {
141 free_blocks++;
142 if (in_extent == 0) {
143 /*
144 * found the start of a free extent
145 */
146 in_extent = 1;
147 num_extents++;
148 extent_start = agbno;
149 extent_len = 1;
150 } else {
151 extent_len++;
152 }
153 } else {
154 if (in_extent) {
155 /*
156 * free extent ends here, add extent to the
157 * 2 incore extent (avl-to-be-B+) trees
158 */
159 in_extent = 0;
160 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
161 fprintf(stderr, "adding extent %u [%u %u]\n",
162 agno, extent_start, extent_len);
163 #endif
164 add_bno_extent(agno, extent_start, extent_len);
165 add_bcnt_extent(agno, extent_start, extent_len);
166 }
167 }
168 }
169 if (in_extent) {
170 /*
171 * free extent ends here
172 */
173 in_extent = 0;
174 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
175 fprintf(stderr, "adding extent %u [%u %u]\n",
176 agno, extent_start, extent_len);
177 #endif
178 add_bno_extent(agno, extent_start, extent_len);
179 add_bcnt_extent(agno, extent_start, extent_len);
180 }
181
182 return(num_extents);
183 }
184
185 /* ARGSUSED */
186 xfs_agblock_t
187 get_next_blockaddr(xfs_agnumber_t agno, int level, bt_status_t *curs)
188 {
189 ASSERT(curs->free_btree_blocks < curs->btree_blocks +
190 curs->num_tot_blocks);
191 ASSERT(curs->num_free_blocks > 0);
192
193 curs->num_free_blocks--;
194 return(*curs->free_btree_blocks++);
195 }
196
197 /*
198 * set up the dynamically allocated block allocation data in the btree
199 * cursor that depends on the info in the static portion of the cursor.
200 * allocates space from the incore bno/bcnt extent trees and sets up
201 * the first path up the left side of the tree. Also sets up the
202 * cursor pointer to the btree root. called by init_freespace_cursor()
203 * and init_ino_cursor()
204 */
205 /* ARGSUSED */
206 void
207 setup_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *curs)
208 {
209 int j;
210 unsigned int u;
211 xfs_extlen_t big_extent_len;
212 xfs_agblock_t big_extent_start;
213 extent_tree_node_t *ext_ptr;
214 extent_tree_node_t *bno_ext_ptr;
215 xfs_extlen_t blocks_allocated;
216 xfs_agblock_t *agb_ptr;
217
218 /*
219 * get the number of blocks we need to allocate, then
220 * set up block number array, set the free block pointer
221 * to the first block in the array, and null the array
222 */
223 big_extent_len = curs->num_tot_blocks;
224 blocks_allocated = 0;
225
226 ASSERT(big_extent_len > 0);
227
228 if ((curs->btree_blocks = malloc(sizeof(xfs_agblock_t *)
229 * big_extent_len)) == NULL) {
230 do_error("could not set up btree block array\n");
231 exit(1);
232 }
233
234 agb_ptr = curs->free_btree_blocks = curs->btree_blocks;
235
236 for (j = 0; j < curs->num_free_blocks; j++, agb_ptr++)
237 *agb_ptr = NULLAGBLOCK;
238
239 /*
240 * grab the smallest extent and use it up, then get the
241 * next smallest. This mimics the init_*_cursor code.
242 */
243 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL) {
244 do_error("error - not enough free space in filesystem\n");
245 exit(1);
246 }
247
248 agb_ptr = curs->btree_blocks;
249 j = curs->level[0].num_blocks;
250
251 /*
252 * set up the free block array
253 */
254 while (blocks_allocated < big_extent_len) {
255 /*
256 * use up the extent we've got
257 */
258 for (u = 0; u < ext_ptr->ex_blockcount &&
259 blocks_allocated < big_extent_len; u++) {
260 ASSERT(agb_ptr < curs->btree_blocks
261 + curs->num_tot_blocks);
262 *agb_ptr++ = ext_ptr->ex_startblock + u;
263 blocks_allocated++;
264 }
265
266 /*
267 * if we only used part of this last extent, then we
268 * need only to reset the extent in the extent
269 * trees and we're done
270 */
271 if (u < ext_ptr->ex_blockcount) {
272 big_extent_start = ext_ptr->ex_startblock + u;
273 big_extent_len = ext_ptr->ex_blockcount - u;
274
275 ASSERT(big_extent_len > 0);
276
277 bno_ext_ptr = find_bno_extent(agno,
278 ext_ptr->ex_startblock);
279 ASSERT(bno_ext_ptr != NULL);
280 get_bno_extent(agno, bno_ext_ptr);
281 release_extent_tree_node(bno_ext_ptr);
282
283 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
284 ext_ptr->ex_blockcount);
285 release_extent_tree_node(ext_ptr);
286 #ifdef XR_BLD_FREE_TRACE
287 fprintf(stderr, "releasing extent: %u [%u %u]\n",
288 agno, ext_ptr->ex_startblock,
289 ext_ptr->ex_blockcount);
290 fprintf(stderr, "blocks_allocated = %d\n",
291 blocks_allocated);
292 #endif
293
294 add_bno_extent(agno, big_extent_start, big_extent_len);
295 add_bcnt_extent(agno, big_extent_start, big_extent_len);
296
297 return;
298 }
299 /*
300 * delete the used-up extent from both extent trees and
301 * find next biggest extent
302 */
303 #ifdef XR_BLD_FREE_TRACE
304 fprintf(stderr, "releasing extent: %u [%u %u]\n",
305 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
306 #endif
307 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
308 ASSERT(bno_ext_ptr != NULL);
309 get_bno_extent(agno, bno_ext_ptr);
310 release_extent_tree_node(bno_ext_ptr);
311
312 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
313 ext_ptr->ex_blockcount);
314 ASSERT(ext_ptr != NULL);
315 release_extent_tree_node(ext_ptr);
316
317 ext_ptr = findfirst_bcnt_extent(agno);
318 }
319 #ifdef XR_BLD_FREE_TRACE
320 fprintf(stderr, "blocks_allocated = %d\n",
321 blocks_allocated);
322 #endif
323 }
324
325 void
326 write_cursor(bt_status_t *curs)
327 {
328 int i;
329
330 for (i = 0; i < curs->num_levels; i++) {
331 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
332 fprintf(stderr, "writing bt block %u\n", curs->level[i].agbno);
333 #endif
334 if (curs->level[i].prev_buf_p != NULL) {
335 ASSERT(curs->level[i].prev_agbno != NULLAGBLOCK);
336 libxfs_writebuf(curs->level[i].prev_buf_p, 0);
337 }
338 libxfs_writebuf(curs->level[i].buf_p, 0);
339 }
340 }
341
342 void
343 finish_cursor(bt_status_t *curs)
344 {
345 ASSERT(curs->num_free_blocks == 0);
346 free(curs->btree_blocks);
347 }
348
349 /*
350 * no-cursor versions of the XFS equivalents. The address calculators
351 * should be used only for interior btree nodes.
352 * these are adapted from xfs_alloc_btree.h and xfs_tree.h
353 */
354 #define XR_ALLOC_KEY_ADDR(mp, bp, i) \
355 (xfs_alloc_key_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
356 + ((i)-1) * sizeof(xfs_alloc_key_t))
357
358 #define XR_ALLOC_PTR_ADDR(mp, bp, i) \
359 (xfs_alloc_ptr_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
360 + (mp)->m_alloc_mxr[1] * sizeof(xfs_alloc_key_t) \
361 + ((i)-1) * sizeof(xfs_alloc_ptr_t))
362
363 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
364 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
365 xfs_alloc, (level) == 0)
366
367 /*
368 * this calculates a freespace cursor for an ag.
369 * btree_curs is an in/out. returns the number of
370 * blocks that will show up in the AGFL.
371 */
372
373 int
374 calculate_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
375 xfs_agblock_t *extents, bt_status_t *btree_curs)
376 {
377 xfs_extlen_t blocks_needed; /* a running count */
378 xfs_extlen_t blocks_allocated_pt; /* per tree */
379 xfs_extlen_t blocks_allocated_total; /* for both trees */
380 xfs_agblock_t num_extents;
381 int i;
382 int extents_used;
383 int extra_blocks;
384 bt_stat_level_t *lptr;
385 bt_stat_level_t *p_lptr;
386 extent_tree_node_t *ext_ptr;
387 int level;
388 #ifdef XR_BLD_FREE_TRACE
389 int old_state;
390 int state = XR_E_BAD_STATE;
391 #endif
392 #ifdef XR_BLD_FREE_TRACE
393 fprintf(stderr,
394 "in init_freespace_cursor, agno = %d\n", agno);
395 #endif
396
397 num_extents = *extents;
398 extents_used = 0;
399
400 ASSERT(num_extents != 0);
401
402 lptr = &btree_curs->level[0];
403 btree_curs->init = 1;
404
405 /*
406 * figure out how much space we need for the leaf level
407 * of the tree and set up the cursor for the leaf level
408 * (note that the same code is duplicated further down)
409 */
410 lptr->num_blocks = howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0));
411 lptr->num_recs_pb = num_extents / lptr->num_blocks;
412 lptr->modulo = num_extents % lptr->num_blocks;
413 lptr->num_recs_tot = num_extents;
414 level = 1;
415
416 /*
417 * if we need more levels, set them up. # of records
418 * per level is the # of blocks in the level below it
419 */
420 if (lptr->num_blocks > 1) {
421 for (; btree_curs->level[level - 1].num_blocks > 1
422 && level < XFS_BTREE_MAXLEVELS;
423 level++) {
424 lptr = &btree_curs->level[level];
425 p_lptr = &btree_curs->level[level - 1];
426 lptr->num_blocks = howmany(p_lptr->num_blocks,
427 XR_ALLOC_BLOCK_MAXRECS(mp, level));
428 lptr->modulo = p_lptr->num_blocks
429 % lptr->num_blocks;
430 lptr->num_recs_pb = p_lptr->num_blocks
431 / lptr->num_blocks;
432 lptr->num_recs_tot = p_lptr->num_blocks;
433 }
434 }
435
436 ASSERT(lptr->num_blocks == 1);
437 btree_curs->num_levels = level;
438
439 /*
440 * ok, now we have a hypothetical cursor that
441 * will work for both the bno and bcnt trees.
442 * now figure out if using up blocks to set up the
443 * trees will perturb the shape of the freespace tree.
444 * if so, we've over-allocated. the freespace trees
445 * as they will be *after* accounting for the free space
446 * we've used up will need fewer blocks to to represent
447 * than we've allocated. We can use the AGFL to hold
448 * XFS_AGFL_SIZE (128) blocks but that's it.
449 * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
450 * if the number of extra blocks is more than that,
451 * we'll have to be called again.
452 */
453 for (blocks_needed = 0, i = 0; i < level; i++) {
454 blocks_needed += btree_curs->level[i].num_blocks;
455 }
456
457 /*
458 * record the # of blocks we've allocated
459 */
460 blocks_allocated_pt = blocks_needed;
461 blocks_needed *= 2;
462 blocks_allocated_total = blocks_needed;
463
464 /*
465 * figure out how many free extents will be used up by
466 * our space allocation
467 */
468 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL) {
469 do_error("can't rebuild fs trees -- not enough free space "
470 "on ag %u\n", agno);
471 exit(1);
472 }
473
474 i = 0;
475 while (ext_ptr != NULL && blocks_needed > 0) {
476 if (ext_ptr->ex_blockcount <= blocks_needed) {
477 blocks_needed -= ext_ptr->ex_blockcount;
478 extents_used++;
479 } else {
480 blocks_needed = 0;
481 }
482
483 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
484
485 #ifdef XR_BLD_FREE_TRACE
486 if (ext_ptr != NULL) {
487 fprintf(stderr, "got next extent [%u %u]\n",
488 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
489 } else {
490 fprintf(stderr, "out of extents\n");
491 }
492 #endif
493 }
494 if (blocks_needed > 0) {
495 do_error("ag %u - not enough free space to build freespace "
496 "btrees\n", agno);
497 exit(1);
498 }
499
500 ASSERT(num_extents >= extents_used);
501
502 num_extents -= extents_used;
503
504 /*
505 * see if the number of leaf blocks will change as a result
506 * of the number of extents changing
507 */
508 if (howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0))
509 != btree_curs->level[0].num_blocks) {
510 /*
511 * yes -- recalculate the cursor. If the number of
512 * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
513 * we can put those into the AGFL. we don't try
514 * and get things to converge exactly (reach a
515 * state with zero excess blocks) because there
516 * exist pathological cases which will never
517 * converge. first, check for the zero-case.
518 */
519 if (num_extents == 0) {
520 /*
521 * ok, we've used up all the free blocks
522 * trying to lay out the leaf level. go
523 * to a one block (empty) btree and put the
524 * already allocated blocks into the AGFL
525 */
526 if (btree_curs->level[0].num_blocks != 1) {
527 /*
528 * we really needed more blocks because
529 * the old tree had more than one level.
530 * this is bad.
531 */
532 do_warn("not enough free blocks left to "
533 "describe all free blocks in AG %u\n",
534 agno);
535 }
536 #ifdef XR_BLD_FREE_TRACE
537 fprintf(stderr,
538 "ag %u -- no free extents, alloc'ed %d\n",
539 agno, blocks_allocated_pt);
540 #endif
541 lptr->num_blocks = 1;
542 lptr->modulo = 0;
543 lptr->num_recs_pb = 0;
544 lptr->num_recs_tot = 0;
545
546 btree_curs->num_levels = 1;
547
548 /*
549 * don't reset the allocation stats, assume
550 * they're all extra blocks
551 * don't forget to return the total block count
552 * not the per-tree block count. these are the
553 * extras that will go into the AGFL. subtract
554 * two for the root blocks.
555 */
556 btree_curs->num_tot_blocks = blocks_allocated_pt;
557 btree_curs->num_free_blocks = blocks_allocated_pt;
558
559 *extents = 0;
560
561 return(blocks_allocated_total - 2);
562 }
563
564 lptr = &btree_curs->level[0];
565 lptr->num_blocks = howmany(num_extents,
566 XR_ALLOC_BLOCK_MAXRECS(mp, 0));
567 lptr->num_recs_pb = num_extents / lptr->num_blocks;
568 lptr->modulo = num_extents % lptr->num_blocks;
569 lptr->num_recs_tot = num_extents;
570 level = 1;
571
572 /*
573 * if we need more levels, set them up
574 */
575 if (lptr->num_blocks > 1) {
576 for (level = 1; btree_curs->level[level-1].num_blocks
577 > 1 && level < XFS_BTREE_MAXLEVELS;
578 level++) {
579 lptr = &btree_curs->level[level];
580 p_lptr = &btree_curs->level[level-1];
581 lptr->num_blocks = howmany(p_lptr->num_blocks,
582 XR_ALLOC_BLOCK_MAXRECS(mp,
583 level));
584 lptr->modulo = p_lptr->num_blocks
585 % lptr->num_blocks;
586 lptr->num_recs_pb = p_lptr->num_blocks
587 / lptr->num_blocks;
588 lptr->num_recs_tot = p_lptr->num_blocks;
589 }
590 }
591 ASSERT(lptr->num_blocks == 1);
592 btree_curs->num_levels = level;
593
594 /*
595 * now figure out the number of excess blocks
596 */
597 for (blocks_needed = 0, i = 0; i < level; i++) {
598 blocks_needed += btree_curs->level[i].num_blocks;
599 }
600 blocks_needed *= 2;
601
602 ASSERT(blocks_allocated_total >= blocks_needed);
603 extra_blocks = blocks_allocated_total - blocks_needed;
604 } else {
605 if (extents_used > 0) {
606 /*
607 * reset the leaf level geometry to account
608 * for consumed extents. we can leave the
609 * rest of the cursor alone since the number
610 * of leaf blocks hasn't changed.
611 */
612 lptr = &btree_curs->level[0];
613
614 lptr->num_recs_pb = num_extents / lptr->num_blocks;
615 lptr->modulo = num_extents % lptr->num_blocks;
616 lptr->num_recs_tot = num_extents;
617 }
618
619 extra_blocks = 0;
620 }
621
622 btree_curs->num_tot_blocks = blocks_allocated_pt;
623 btree_curs->num_free_blocks = blocks_allocated_pt;
624
625 *extents = num_extents;
626
627 return(extra_blocks);
628 }
629
630 void
631 prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
632 bt_status_t *btree_curs, xfs_agblock_t startblock,
633 xfs_extlen_t blockcount, int level, __uint32_t magic)
634 {
635 xfs_alloc_block_t *bt_hdr;
636 xfs_alloc_key_t *bt_key;
637 xfs_alloc_ptr_t *bt_ptr;
638 xfs_agblock_t agbno;
639 bt_stat_level_t *lptr;
640
641 level++;
642
643 if (level >= btree_curs->num_levels)
644 return;
645
646 lptr = &btree_curs->level[level];
647 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
648
649 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) == 0) {
650 /*
651 * only happens once when initializing the
652 * left-hand side of the tree.
653 */
654 prop_freespace_cursor(mp, agno, btree_curs, startblock,
655 blockcount, level, magic);
656 }
657
658 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) ==
659 lptr->num_recs_pb + (lptr->modulo > 0)) {
660 /*
661 * write out current prev block, grab us a new block,
662 * and set the rightsib pointer of current block
663 */
664 #ifdef XR_BLD_FREE_TRACE
665 fprintf(stderr, " %d ", lptr->prev_agbno);
666 #endif
667 if (lptr->prev_agbno != NULLAGBLOCK) {
668 ASSERT(lptr->prev_buf_p != NULL);
669 libxfs_writebuf(lptr->prev_buf_p, 0);
670 }
671 lptr->prev_agbno = lptr->agbno;;
672 lptr->prev_buf_p = lptr->buf_p;
673 agbno = get_next_blockaddr(agno, level, btree_curs);
674
675 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, agbno);
676
677 lptr->buf_p = libxfs_getbuf(mp->m_dev,
678 XFS_AGB_TO_DADDR(mp, agno, agbno),
679 XFS_FSB_TO_BB(mp, 1));
680 lptr->agbno = agbno;
681
682 if (lptr->modulo)
683 lptr->modulo--;
684
685 /*
686 * initialize block header
687 */
688 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
689 bzero(bt_hdr, mp->m_sb.sb_blocksize);
690
691 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
692 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, level);
693 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
694 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
695 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
696
697 /*
698 * propagate extent record for first extent in new block up
699 */
700 prop_freespace_cursor(mp, agno, btree_curs, startblock,
701 blockcount, level, magic);
702 }
703 /*
704 * add extent info to current block
705 */
706 INT_MOD(bt_hdr->bb_numrecs, ARCH_CONVERT, +1);
707
708 bt_key = XR_ALLOC_KEY_ADDR(mp, bt_hdr,
709 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
710 bt_ptr = XR_ALLOC_PTR_ADDR(mp, bt_hdr,
711 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
712
713 INT_SET(bt_key->ar_startblock, ARCH_CONVERT, startblock);
714 INT_SET(bt_key->ar_blockcount, ARCH_CONVERT, blockcount);
715 INT_SET(*bt_ptr, ARCH_CONVERT, btree_curs->level[level-1].agbno);
716 }
717
718 /*
719 * rebuilds a freespace tree given a cursor and magic number of type
720 * of tree to build (bno or bcnt). returns the number of free blocks
721 * represented by the tree.
722 */
723 xfs_extlen_t
724 build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
725 bt_status_t *btree_curs, __uint32_t magic)
726 {
727 xfs_agnumber_t i;
728 xfs_agblock_t j;
729 xfs_alloc_block_t *bt_hdr;
730 xfs_alloc_rec_t *bt_rec;
731 int level;
732 xfs_agblock_t agbno;
733 extent_tree_node_t *ext_ptr;
734 bt_stat_level_t *lptr;
735 xfs_extlen_t freeblks;
736
737 #ifdef XR_BLD_FREE_TRACE
738 fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
739 #endif
740 level = btree_curs->num_levels;
741 freeblks = 0;
742
743 ASSERT(level > 0);
744
745 /*
746 * initialize the first block on each btree level
747 */
748 for (i = 0; i < level; i++) {
749 lptr = &btree_curs->level[i];
750
751 agbno = get_next_blockaddr(agno, i, btree_curs);
752 lptr->buf_p = libxfs_getbuf(mp->m_dev,
753 XFS_AGB_TO_DADDR(mp, agno, agbno),
754 XFS_FSB_TO_BB(mp, 1));
755
756 if (i == btree_curs->num_levels - 1)
757 btree_curs->root = agbno;
758
759 lptr->agbno = agbno;
760 lptr->prev_agbno = NULLAGBLOCK;
761 lptr->prev_buf_p = NULL;
762 /*
763 * initialize block header
764 */
765 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
766 bzero(bt_hdr, mp->m_sb.sb_blocksize);
767
768 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
769 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, i);
770 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT,
771 bt_hdr->bb_rightsib = NULLAGBLOCK);
772 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
773 }
774 /*
775 * run along leaf, setting up records. as we have to switch
776 * blocks, call the prop_freespace_cursor routine to set up the new
777 * pointers for the parent. that can recurse up to the root
778 * if required. set the sibling pointers for leaf level here.
779 */
780 if (magic == XFS_ABTB_MAGIC)
781 ext_ptr = findfirst_bno_extent(agno);
782 else
783 ext_ptr = findfirst_bcnt_extent(agno);
784
785 #ifdef XR_BLD_FREE_TRACE
786 fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
787 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
788 #endif
789
790 lptr = &btree_curs->level[0];
791
792 for (i = 0; i < btree_curs->level[0].num_blocks; i++) {
793 /*
794 * block initialization, lay in block header
795 */
796 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
797 bzero(bt_hdr, mp->m_sb.sb_blocksize);
798
799 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
800 INT_ZERO(bt_hdr->bb_level, ARCH_CONVERT);
801 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
802 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
803 INT_SET(bt_hdr->bb_numrecs, ARCH_CONVERT,
804 lptr->num_recs_pb + (lptr->modulo > 0));
805 #ifdef XR_BLD_FREE_TRACE
806 fprintf(stderr, "bft, bb_numrecs = %d\n",
807 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
808 #endif
809
810 if (lptr->modulo > 0)
811 lptr->modulo--;
812
813 /*
814 * initialize values in the path up to the root if
815 * this is a multi-level btree
816 */
817 if (btree_curs->num_levels > 1)
818 prop_freespace_cursor(mp, agno, btree_curs,
819 ext_ptr->ex_startblock,
820 ext_ptr->ex_blockcount,
821 0, magic);
822
823 bt_rec = (xfs_alloc_rec_t *) ((char *) bt_hdr +
824 sizeof(xfs_alloc_block_t));
825 for (j = 0; j < INT_GET(bt_hdr->bb_numrecs,ARCH_CONVERT); j++) {
826 ASSERT(ext_ptr != NULL);
827 INT_SET(bt_rec[j].ar_startblock, ARCH_CONVERT,
828 ext_ptr->ex_startblock);
829 INT_SET(bt_rec[j].ar_blockcount, ARCH_CONVERT,
830 ext_ptr->ex_blockcount);
831 freeblks += ext_ptr->ex_blockcount;
832 if (magic == XFS_ABTB_MAGIC)
833 ext_ptr = findnext_bno_extent(ext_ptr);
834 else
835 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
836 #if 0
837 #ifdef XR_BLD_FREE_TRACE
838 if (ext_ptr == NULL)
839 fprintf(stderr, "null extent pointer, j = %d\n",
840 j);
841 else
842 fprintf(stderr,
843 "bft, agno = %d, start = %u, count = %u\n",
844 agno, ext_ptr->ex_startblock,
845 ext_ptr->ex_blockcount);
846 #endif
847 #endif
848 }
849
850 if (ext_ptr != NULL) {
851 /*
852 * get next leaf level block
853 */
854 if (lptr->prev_buf_p != NULL) {
855 #ifdef XR_BLD_FREE_TRACE
856 fprintf(stderr, " writing fst agbno %u\n",
857 lptr->prev_agbno);
858 #endif
859 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
860 libxfs_writebuf(lptr->prev_buf_p, 0);
861 }
862 lptr->prev_buf_p = lptr->buf_p;
863 lptr->prev_agbno = lptr->agbno;
864
865 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, lptr->agbno =
866 get_next_blockaddr(agno, 0, btree_curs));
867
868 lptr->buf_p = libxfs_getbuf(mp->m_dev,
869 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
870 XFS_FSB_TO_BB(mp, 1));
871 }
872 }
873
874 return(freeblks);
875 }
876
877 /*
878 * no-cursor versions of the XFS equivalents. The address calculators
879 * should be used only for interior btree nodes.
880 * these are adapted from xfs_ialloc_btree.h and xfs_tree.h
881 */
882 #define XR_INOBT_KEY_ADDR(mp, bp, i) \
883 (xfs_inobt_key_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
884 + ((i)-1) * sizeof(xfs_inobt_key_t))
885
886 #define XR_INOBT_PTR_ADDR(mp, bp, i) \
887 (xfs_inobt_ptr_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
888 + (mp)->m_inobt_mxr[1] * sizeof(xfs_inobt_key_t) \
889 + ((i)-1) * sizeof(xfs_inobt_ptr_t))
890
891 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
892 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
893 xfs_inobt, (level) == 0)
894
895 /*
896 * we don't have to worry here about how chewing up free extents
897 * may perturb things because inode tree building happens before
898 * freespace tree building.
899 */
900 void
901 init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
902 __uint64_t *num_inos, __uint64_t *num_free_inos)
903 {
904 __uint64_t ninos;
905 __uint64_t nfinos;
906 ino_tree_node_t *ino_rec;
907 int num_recs;
908 int level;
909 bt_stat_level_t *lptr;
910 bt_stat_level_t *p_lptr;
911 xfs_extlen_t blocks_allocated;
912 int i;
913
914 *num_inos = *num_free_inos = 0;
915 ninos = nfinos = 0;
916
917 lptr = &btree_curs->level[0];
918 btree_curs->init = 1;
919
920 if ((ino_rec = findfirst_inode_rec(agno)) == NULL) {
921 /*
922 * easy corner-case -- no inode records
923 */
924 lptr->num_blocks = 1;
925 lptr->modulo = 0;
926 lptr->num_recs_pb = 0;
927 lptr->num_recs_tot = 0;
928
929 btree_curs->num_levels = 1;
930 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
931
932 setup_cursor(mp, agno, btree_curs);
933
934 return;
935 }
936
937 /*
938 * build up statistics
939 */
940 for (num_recs = 0; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
941 ninos += XFS_INODES_PER_CHUNK;
942 num_recs++;
943 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
944 ASSERT(is_inode_confirmed(ino_rec, i));
945 if (is_inode_free(ino_rec, i))
946 nfinos++;
947 }
948 }
949
950 blocks_allocated = lptr->num_blocks = howmany(num_recs,
951 XR_INOBT_BLOCK_MAXRECS(mp, 0));
952
953 lptr->modulo = num_recs % lptr->num_blocks;
954 lptr->num_recs_pb = num_recs / lptr->num_blocks;
955 lptr->num_recs_tot = num_recs;
956 level = 1;
957
958 if (lptr->num_blocks > 1) {
959 for (; btree_curs->level[level-1].num_blocks > 1
960 && level < XFS_BTREE_MAXLEVELS;
961 level++) {
962 lptr = &btree_curs->level[level];
963 p_lptr = &btree_curs->level[level - 1];
964 lptr->num_blocks = howmany(p_lptr->num_blocks,
965 XR_INOBT_BLOCK_MAXRECS(mp, level));
966 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
967 lptr->num_recs_pb = p_lptr->num_blocks
968 / lptr->num_blocks;
969 lptr->num_recs_tot = p_lptr->num_blocks;
970
971 blocks_allocated += lptr->num_blocks;
972 }
973 }
974 ASSERT(lptr->num_blocks == 1);
975 btree_curs->num_levels = level;
976
977 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
978 = blocks_allocated;
979
980 setup_cursor(mp, agno, btree_curs);
981
982 *num_inos = ninos;
983 *num_free_inos = nfinos;
984
985 return;
986 }
987
988 void
989 prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
990 xfs_agino_t startino, int level)
991 {
992 xfs_inobt_block_t *bt_hdr;
993 xfs_inobt_key_t *bt_key;
994 xfs_inobt_ptr_t *bt_ptr;
995 xfs_agblock_t agbno;
996 bt_stat_level_t *lptr;
997
998 level++;
999
1000 if (level >= btree_curs->num_levels)
1001 return;
1002
1003 lptr = &btree_curs->level[level];
1004 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1005
1006 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) == 0) {
1007 /*
1008 * this only happens once to initialize the
1009 * first path up the left side of the tree
1010 * where the agbno's are already set up
1011 */
1012 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1013 }
1014
1015 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) ==
1016 lptr->num_recs_pb + (lptr->modulo > 0)) {
1017 /*
1018 * write out current prev block, grab us a new block,
1019 * and set the rightsib pointer of current block
1020 */
1021 #ifdef XR_BLD_INO_TRACE
1022 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1023 #endif
1024 if (lptr->prev_agbno != NULLAGBLOCK) {
1025 ASSERT(lptr->prev_buf_p != NULL);
1026 libxfs_writebuf(lptr->prev_buf_p, 0);
1027 }
1028 lptr->prev_agbno = lptr->agbno;;
1029 lptr->prev_buf_p = lptr->buf_p;
1030 agbno = get_next_blockaddr(agno, level, btree_curs);
1031
1032 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, agbno);
1033
1034 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1035 XFS_AGB_TO_DADDR(mp, agno, agbno),
1036 XFS_FSB_TO_BB(mp, 1));
1037 lptr->agbno = agbno;
1038
1039 if (lptr->modulo)
1040 lptr->modulo--;
1041
1042 /*
1043 * initialize block header
1044 */
1045 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1046 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1047
1048 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1049 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, level);
1050 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
1051 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
1052 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
1053 /*
1054 * propagate extent record for first extent in new block up
1055 */
1056 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1057 }
1058 /*
1059 * add inode info to current block
1060 */
1061 INT_MOD(bt_hdr->bb_numrecs, ARCH_CONVERT, +1);
1062
1063 bt_key = XR_INOBT_KEY_ADDR(mp, bt_hdr,
1064 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
1065 bt_ptr = XR_INOBT_PTR_ADDR(mp, bt_hdr,
1066 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
1067
1068 INT_SET(bt_key->ir_startino, ARCH_CONVERT, startino);
1069 INT_SET(*bt_ptr, ARCH_CONVERT, btree_curs->level[level-1].agbno);
1070 }
1071
1072 void
1073 build_agi(xfs_mount_t *mp, xfs_agnumber_t agno,
1074 bt_status_t *btree_curs, xfs_agino_t first_agino,
1075 xfs_agino_t count, xfs_agino_t freecount)
1076 {
1077 xfs_buf_t *agi_buf;
1078 xfs_agi_t *agi;
1079 int i;
1080
1081 agi_buf = libxfs_getbuf(mp->m_dev,
1082 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR),
1083 mp->m_sb.sb_sectsize/BBSIZE);
1084 agi = XFS_BUF_TO_AGI(agi_buf);
1085 bzero(agi, mp->m_sb.sb_sectsize);
1086
1087 INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC);
1088 INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION);
1089 INT_SET(agi->agi_seqno, ARCH_CONVERT, agno);
1090 if (agno < mp->m_sb.sb_agcount - 1)
1091 INT_SET(agi->agi_length, ARCH_CONVERT, mp->m_sb.sb_agblocks);
1092 else
1093 INT_SET(agi->agi_length, ARCH_CONVERT, mp->m_sb.sb_dblocks -
1094 (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno);
1095 INT_SET(agi->agi_count, ARCH_CONVERT, count);
1096 INT_SET(agi->agi_root, ARCH_CONVERT, btree_curs->root);
1097 INT_SET(agi->agi_level, ARCH_CONVERT, btree_curs->num_levels);
1098 INT_SET(agi->agi_freecount, ARCH_CONVERT, freecount);
1099 INT_SET(agi->agi_newino, ARCH_CONVERT, first_agino);
1100 INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO);
1101
1102 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
1103 INT_SET(agi->agi_unlinked[i], ARCH_CONVERT, NULLAGINO);
1104 }
1105
1106 libxfs_writebuf(agi_buf, 0);
1107 }
1108
1109 /*
1110 * rebuilds an inode tree given a cursor. We're lazy here and call
1111 * the routine that builds the agi
1112 */
1113 void
1114 build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
1115 bt_status_t *btree_curs)
1116 {
1117 xfs_agnumber_t i;
1118 xfs_agblock_t j;
1119 xfs_agblock_t agbno;
1120 xfs_agino_t first_agino;
1121 xfs_inobt_block_t *bt_hdr;
1122 xfs_inobt_rec_t *bt_rec;
1123 ino_tree_node_t *ino_rec;
1124 bt_stat_level_t *lptr;
1125 xfs_agino_t count = 0;
1126 xfs_agino_t freecount = 0;
1127 int inocnt;
1128 int k;
1129 int level = btree_curs->num_levels;
1130
1131 for (i = 0; i < level; i++) {
1132 lptr = &btree_curs->level[i];
1133
1134 agbno = get_next_blockaddr(agno, i, btree_curs);
1135 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1136 XFS_AGB_TO_DADDR(mp, agno, agbno),
1137 XFS_FSB_TO_BB(mp, 1));
1138
1139 if (i == btree_curs->num_levels - 1)
1140 btree_curs->root = agbno;
1141
1142 lptr->agbno = agbno;
1143 lptr->prev_agbno = NULLAGBLOCK;
1144 lptr->prev_buf_p = NULL;
1145 /*
1146 * initialize block header
1147 */
1148 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1149 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1150
1151 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1152 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, i);
1153 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT,
1154 bt_hdr->bb_rightsib = NULLAGBLOCK);
1155 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
1156 }
1157 /*
1158 * run along leaf, setting up records. as we have to switch
1159 * blocks, call the prop_ino_cursor routine to set up the new
1160 * pointers for the parent. that can recurse up to the root
1161 * if required. set the sibling pointers for leaf level here.
1162 */
1163 ino_rec = findfirst_inode_rec(agno);
1164
1165 if (ino_rec != NULL)
1166 first_agino = ino_rec->ino_startnum;
1167 else
1168 first_agino = NULLAGINO;
1169
1170 lptr = &btree_curs->level[0];
1171
1172 for (i = 0; i < lptr->num_blocks; i++) {
1173 /*
1174 * block initialization, lay in block header
1175 */
1176 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1177 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1178
1179 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1180 INT_ZERO(bt_hdr->bb_level, ARCH_CONVERT);
1181 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
1182 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
1183 INT_SET(bt_hdr->bb_numrecs, ARCH_CONVERT,
1184 lptr->num_recs_pb + (lptr->modulo > 0));
1185
1186 if (lptr->modulo > 0)
1187 lptr->modulo--;
1188
1189 if (lptr->num_recs_pb > 0)
1190 prop_ino_cursor(mp, agno, btree_curs,
1191 ino_rec->ino_startnum, 0);
1192
1193 bt_rec = (xfs_inobt_rec_t *) ((char *) bt_hdr +
1194 sizeof(xfs_inobt_block_t));
1195 for (j = 0; j < INT_GET(bt_hdr->bb_numrecs,ARCH_CONVERT); j++) {
1196 ASSERT(ino_rec != NULL);
1197 INT_SET(bt_rec[j].ir_startino, ARCH_CONVERT,
1198 ino_rec->ino_startnum);
1199 INT_SET(bt_rec[j].ir_free, ARCH_CONVERT,
1200 ino_rec->ir_free);
1201
1202 inocnt = 0;
1203 for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) {
1204 ASSERT(is_inode_confirmed(ino_rec, k));
1205 inocnt += is_inode_free(ino_rec, k);
1206 }
1207
1208 INT_SET(bt_rec[j].ir_freecount, ARCH_CONVERT, inocnt);
1209 freecount += inocnt;
1210 count += XFS_INODES_PER_CHUNK;
1211 ino_rec = next_ino_rec(ino_rec);
1212 }
1213
1214 if (ino_rec != NULL) {
1215 /*
1216 * get next leaf level block
1217 */
1218 if (lptr->prev_buf_p != NULL) {
1219 #ifdef XR_BLD_INO_TRACE
1220 fprintf(stderr, "writing inobt agbno %u\n",
1221 lptr->prev_agbno);
1222 #endif
1223 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1224 libxfs_writebuf(lptr->prev_buf_p, 0);
1225 }
1226 lptr->prev_buf_p = lptr->buf_p;
1227 lptr->prev_agbno = lptr->agbno;
1228
1229 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, lptr->agbno=
1230 get_next_blockaddr(agno, 0, btree_curs));
1231
1232 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1233 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1234 XFS_FSB_TO_BB(mp, 1));
1235 }
1236 }
1237
1238 build_agi(mp, agno, btree_curs, first_agino, count, freecount);
1239 }
1240
1241 /*
1242 * build both the agf and the agfl for an agno given both
1243 * btree cursors
1244 */
1245 void
1246 build_agf_agfl(xfs_mount_t *mp,
1247 xfs_agnumber_t agno,
1248 bt_status_t *bno_bt,
1249 bt_status_t *bcnt_bt,
1250 xfs_extlen_t freeblks, /* # free blocks in tree */
1251 int lostblocks) /* # blocks that will be lost */
1252 {
1253 extent_tree_node_t *ext_ptr;
1254 xfs_buf_t *agf_buf, *agfl_buf;
1255 int i;
1256 int j;
1257 xfs_agfl_t *agfl;
1258 xfs_agf_t *agf;
1259
1260 agf_buf = libxfs_getbuf(mp->m_dev,
1261 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR),
1262 mp->m_sb.sb_sectsize/BBSIZE);
1263 agf = XFS_BUF_TO_AGF(agf_buf);
1264 bzero(agf, mp->m_sb.sb_sectsize);
1265
1266 #ifdef XR_BLD_FREE_TRACE
1267 fprintf(stderr, "agf = 0x%x, agf_buf->b_un.b_addr = 0x%x\n",
1268 (__psint_t) agf, (__psint_t) agf_buf->b_un.b_addr);
1269 #endif
1270
1271 /*
1272 * set up fixed part of agf
1273 */
1274 INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC);
1275 INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION);
1276 INT_SET(agf->agf_seqno, ARCH_CONVERT, agno);
1277
1278 if (agno < mp->m_sb.sb_agcount - 1)
1279 INT_SET(agf->agf_length, ARCH_CONVERT, mp->m_sb.sb_agblocks);
1280 else
1281 INT_SET(agf->agf_length, ARCH_CONVERT, mp->m_sb.sb_dblocks -
1282 (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno);
1283
1284 INT_SET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT, bno_bt->root);
1285 INT_SET(agf->agf_levels[XFS_BTNUM_BNO], ARCH_CONVERT,
1286 bno_bt->num_levels);
1287 INT_SET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT, bcnt_bt->root);
1288 INT_SET(agf->agf_levels[XFS_BTNUM_CNT], ARCH_CONVERT,
1289 bcnt_bt->num_levels);
1290 INT_SET(agf->agf_freeblks, ARCH_CONVERT, freeblks);
1291
1292 #ifdef XR_BLD_FREE_TRACE
1293 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
1294 INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT),
1295 INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT),
1296 XFS_BTNUM_BNO,
1297 XFS_BTNUM_CNT);
1298 #endif
1299
1300 /*
1301 * do we have left-over blocks in the btree cursors that should
1302 * be used to fill the AGFL?
1303 */
1304 if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) {
1305 /*
1306 * yes - grab the AGFL buffer
1307 */
1308 agfl_buf = libxfs_getbuf(mp->m_dev,
1309 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR),
1310 mp->m_sb.sb_sectsize/BBSIZE);
1311 agfl = XFS_BUF_TO_AGFL(agfl_buf);
1312 bzero(agfl, mp->m_sb.sb_sectsize);
1313 /*
1314 * ok, now grab as many blocks as we can
1315 */
1316 i = j = 0;
1317 while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE) {
1318 INT_SET(agfl->agfl_bno[i], ARCH_CONVERT,
1319 get_next_blockaddr(agno, 0, bno_bt));
1320 i++;
1321 }
1322
1323 while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE) {
1324 INT_SET(agfl->agfl_bno[i], ARCH_CONVERT,
1325 get_next_blockaddr(agno, 0, bcnt_bt));
1326 i++;
1327 }
1328 /*
1329 * now throw the rest of the blocks away and complain
1330 */
1331 while (bno_bt->num_free_blocks > 0) {
1332 (void) get_next_blockaddr(agno, 0, bno_bt);
1333 j++;
1334 }
1335 while (bcnt_bt->num_free_blocks > 0) {
1336 (void) get_next_blockaddr(agno, 0, bcnt_bt);
1337 j++;
1338 }
1339
1340 if (j > 0) {
1341 if (j == lostblocks)
1342 do_warn("lost %d blocks in ag %u\n", j, agno);
1343 else
1344 do_warn("thought we were going to lose %d "
1345 "blocks in ag %u, actually lost %d\n",
1346 lostblocks, j, agno);
1347 }
1348
1349 INT_ZERO(agf->agf_flfirst, ARCH_CONVERT);
1350 INT_SET(agf->agf_fllast, ARCH_CONVERT, i - 1);
1351 INT_SET(agf->agf_flcount, ARCH_CONVERT, i);
1352
1353 #ifdef XR_BLD_FREE_TRACE
1354 fprintf(stderr, "writing agfl for ag %u\n", agno);
1355 #endif
1356
1357 libxfs_writebuf(agfl_buf, 0);
1358 } else {
1359 INT_ZERO(agf->agf_flfirst, ARCH_CONVERT);
1360 INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE - 1);
1361 INT_ZERO(agf->agf_flcount, ARCH_CONVERT);
1362 }
1363
1364 ext_ptr = findbiggest_bcnt_extent(agno);
1365 INT_SET(agf->agf_longest, ARCH_CONVERT,
1366 (ext_ptr != NULL) ? ext_ptr->ex_blockcount : 0);
1367
1368 ASSERT(INT_GET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT) !=
1369 INT_GET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT));
1370
1371 libxfs_writebuf(agf_buf, 0);
1372
1373 #ifdef XR_BLD_FREE_TRACE
1374 fprintf(stderr, "wrote agf for ag %u, error = %d\n", agno, error);
1375 #endif
1376 }
1377
1378 /*
1379 * update the superblock counters, sync the sb version numbers and
1380 * feature bits to the filesystem, and sync up the on-disk superblock
1381 * to match the incore superblock.
1382 */
1383 void
1384 sync_sb(xfs_mount_t *mp)
1385 {
1386 xfs_sb_t *sbp;
1387 xfs_buf_t *bp;
1388
1389 bp = libxfs_getsb(mp, 0);
1390 if (!bp)
1391 do_error("couldn't get superblock\n");
1392
1393 sbp = XFS_BUF_TO_SBP(bp);
1394
1395 mp->m_sb.sb_icount = sb_icount;
1396 mp->m_sb.sb_ifree = sb_ifree;
1397 mp->m_sb.sb_fdblocks = sb_fdblocks;
1398 mp->m_sb.sb_frextents = sb_frextents;
1399
1400 update_sb_version(mp);
1401
1402 *sbp = mp->m_sb;
1403 libxfs_xlate_sb(XFS_BUF_PTR(bp), sbp, -1, ARCH_CONVERT,
1404 XFS_SB_ALL_BITS);
1405 libxfs_writebuf(bp, 0);
1406 }
1407
1408 /*
1409 * make sure the root and realtime inodes show up allocated
1410 * even if they've been freed. they get reinitialized in phase6.
1411 */
1412 void
1413 keep_fsinos(xfs_mount_t *mp)
1414 {
1415 ino_tree_node_t *irec;
1416 int i;
1417
1418 irec = find_inode_rec(XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
1419 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
1420
1421 for (i = 0; i < 3; i++)
1422 set_inode_used(irec, i);
1423 }
1424
1425 void
1426 phase5(xfs_mount_t *mp)
1427 {
1428 __uint64_t num_inos;
1429 __uint64_t num_free_inos;
1430 bt_status_t bno_btree_curs;
1431 bt_status_t bcnt_btree_curs;
1432 bt_status_t ino_btree_curs;
1433 xfs_agnumber_t agno;
1434 int extra_blocks = 0;
1435 uint num_freeblocks;
1436 xfs_extlen_t freeblks1;
1437 xfs_extlen_t freeblks2;
1438 xfs_agblock_t num_extents;
1439 extern int count_bno_extents(xfs_agnumber_t);
1440 extern int count_bno_extents_blocks(xfs_agnumber_t, uint *);
1441 #ifdef XR_BLD_FREE_TRACE
1442 extern int count_bcnt_extents(xfs_agnumber_t);
1443 #endif
1444
1445 do_log("Phase 5 - rebuild AG headers and trees...\n");
1446
1447 #ifdef XR_BLD_FREE_TRACE
1448 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
1449 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0),
1450 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0)
1451 );
1452 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1453 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1),
1454 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1)
1455 );
1456 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
1457 XR_INOBT_BLOCK_MAXRECS(mp, 0));
1458 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
1459 XR_INOBT_BLOCK_MAXRECS(mp, 1));
1460 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
1461 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0),
1462 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0));
1463 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1464 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1),
1465 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1));
1466 #endif
1467
1468 /*
1469 * make sure the root and realtime inodes show up allocated
1470 */
1471 keep_fsinos(mp);
1472
1473 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
1474 /*
1475 * build up incore bno and bcnt extent btrees
1476 */
1477 num_extents = mk_incore_fstree(mp, agno);
1478
1479 #ifdef XR_BLD_FREE_TRACE
1480 fprintf(stderr, "# of bno extents is %d\n",
1481 count_bno_extents(agno));
1482 #endif
1483
1484 if (num_extents == 0) {
1485 /*
1486 * XXX - what we probably should do here is pick an
1487 * inode for a regular file in the allocation group
1488 * that has space allocated and shoot it by traversing
1489 * the bmap list and putting all its extents on the
1490 * incore freespace trees, clearing the inode,
1491 * and clearing the in-use bit in the incore inode
1492 * tree. Then try mk_incore_fstree() again.
1493 */
1494 do_error("unable to rebuild AG %u. "
1495 "Not enough free space in on-disk AG.\n", agno);
1496 }
1497
1498 /*
1499 * done with the AG bitmap, toss it...
1500 */
1501 teardown_ag_bmap(mp, agno);
1502
1503 /*
1504 * ok, now set up the btree cursors for the
1505 * on-disk btrees (includs pre-allocating all
1506 * required blocks for the trees themselves)
1507 */
1508 init_ino_cursor(mp, agno, &ino_btree_curs,
1509 &num_inos, &num_free_inos);
1510
1511 sb_icount += num_inos;
1512 sb_ifree += num_free_inos;
1513
1514 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
1515 /*
1516 * lose two blocks per AG -- the space tree roots
1517 * are counted as allocated since the space trees
1518 * always have roots
1519 */
1520 sb_fdblocks += num_freeblocks - 2;
1521
1522 if (num_extents == 0) {
1523 /*
1524 * XXX - what we probably should do here is pick an
1525 * inode for a regular file in the allocation group
1526 * that has space allocated and shoot it by traversing
1527 * the bmap list and putting all its extents on the
1528 * incore freespace trees, clearing the inode,
1529 * and clearing the in-use bit in the incore inode
1530 * tree. Then try mk_incore_fstree() again.
1531 */
1532 do_error("unable to rebuild AG %u. No free space.\n",
1533 agno);
1534 exit(1);
1535 }
1536
1537 #ifdef XR_BLD_FREE_TRACE
1538 fprintf(stderr, "# of bno extents is %d\n", num_extents);
1539 #endif
1540
1541 /*
1542 * track blocks that we might really lose
1543 */
1544 extra_blocks = calculate_freespace_cursor(mp, agno,
1545 &num_extents, &bno_btree_curs);
1546
1547 /*
1548 * freespace btrees live in the "free space" but
1549 * the filesystem treats AGFL blocks as allocated
1550 * since they aren't described by the freespace trees
1551 */
1552
1553 /*
1554 * see if we can fit all the extra blocks into the AGFL
1555 */
1556 extra_blocks = (extra_blocks - XFS_AGFL_SIZE > 0)
1557 ? extra_blocks - XFS_AGFL_SIZE
1558 : 0;
1559
1560 if (extra_blocks > 0) {
1561 do_warn("lost %d blocks in agno %d, sorry.\n",
1562 extra_blocks, agno);
1563 sb_fdblocks -= extra_blocks;
1564 }
1565
1566 bcnt_btree_curs = bno_btree_curs;
1567
1568 setup_cursor(mp, agno, &bno_btree_curs);
1569 setup_cursor(mp, agno, &bcnt_btree_curs);
1570
1571 #ifdef XR_BLD_FREE_TRACE
1572 fprintf(stderr, "# of bno extents is %d\n",
1573 count_bno_extents(agno));
1574 fprintf(stderr, "# of bcnt extents is %d\n",
1575 count_bcnt_extents(agno));
1576 #endif
1577 /*
1578 * now rebuild the freespace trees
1579 */
1580 freeblks1 = build_freespace_tree(mp, agno, &bno_btree_curs,
1581 XFS_ABTB_MAGIC);
1582 #ifdef XR_BLD_FREE_TRACE
1583 fprintf(stderr, "# of free blocks == %d\n", freeblks1);
1584 #endif
1585 write_cursor(&bno_btree_curs);
1586
1587 freeblks2 = build_freespace_tree(mp, agno, &bcnt_btree_curs,
1588 XFS_ABTC_MAGIC);
1589 write_cursor(&bcnt_btree_curs);
1590
1591 ASSERT(freeblks1 == freeblks2);
1592
1593 /*
1594 * set up agf and agfl
1595 */
1596 build_agf_agfl(mp, agno, &bno_btree_curs,
1597 &bcnt_btree_curs, freeblks1, extra_blocks);
1598 /*
1599 * build inode allocation tree. this also build the agi
1600 */
1601 build_ino_tree(mp, agno, &ino_btree_curs);
1602 write_cursor(&ino_btree_curs);
1603 /*
1604 * tear down cursors
1605 */
1606 finish_cursor(&bno_btree_curs);
1607 finish_cursor(&ino_btree_curs);
1608 finish_cursor(&bcnt_btree_curs);
1609 /*
1610 * release the incore per-AG bno/bcnt trees so
1611 * the extent nodes can be recycled
1612 */
1613 release_agbno_extent_tree(agno);
1614 release_agbcnt_extent_tree(agno);
1615 }
1616
1617 if (mp->m_sb.sb_rblocks) {
1618 do_log(
1619 " - generate realtime summary info and bitmap...\n");
1620 rtinit(mp);
1621 generate_rtinfo(mp, btmcompute, sumcompute);
1622 teardown_rt_bmap(mp);
1623 }
1624
1625 do_log(" - reset superblock...\n");
1626
1627 /*
1628 * sync superblock counter and set version bits correctly
1629 */
1630 sync_sb(mp);
1631
1632 bad_ino_btree = 0;
1633 }