]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
Merge whitespace changes over
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 /*
2 * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <libxfs.h>
34 #include "avl.h"
35 #include "globals.h"
36 #include "agheader.h"
37 #include "incore.h"
38 #include "protos.h"
39 #include "err_protos.h"
40 #include "dinode.h"
41 #include "rt.h"
42 #include "versions.h"
43
44 /*
45 * we maintain the current slice (path from root to leaf)
46 * of the btree incore. when we need a new block, we ask
47 * the block allocator for the address of a block on that
48 * level, map the block in, and set up the appropriate
49 * pointers (child, silbing, etc.) and keys that should
50 * point to the new block.
51 */
52 typedef struct bt_stat_level {
53 /*
54 * set in setup_cursor routine and maintained in the tree-building
55 * routines
56 */
57 xfs_buf_t *buf_p; /* 2 buffer pointers to ... */
58 xfs_buf_t *prev_buf_p;
59 xfs_agblock_t agbno; /* current block being filled */
60 xfs_agblock_t prev_agbno; /* previous block */
61 /*
62 * set in calculate/init cursor routines for each btree level
63 */
64 int num_recs_tot; /* # tree recs in level */
65 int num_blocks; /* # tree blocks in level */
66 int num_recs_pb; /* num_recs_tot / num_blocks */
67 int modulo; /* num_recs_tot % num_blocks */
68 } bt_stat_level_t;
69
70 typedef struct bt_status {
71 int init; /* cursor set up once? */
72 int num_levels; /* # of levels in btree */
73 xfs_extlen_t num_tot_blocks; /* # blocks alloc'ed for tree */
74 xfs_extlen_t num_free_blocks;/* # blocks currently unused */
75
76 xfs_agblock_t root; /* root block */
77 /*
78 * list of blocks to be used to set up this tree
79 * and pointer to the first unused block on the list
80 */
81 xfs_agblock_t *btree_blocks; /* block list */
82 xfs_agblock_t *free_btree_blocks; /* first unused block */
83 /*
84 * per-level status info
85 */
86 bt_stat_level_t level[XFS_BTREE_MAXLEVELS];
87 } bt_status_t;
88
89
90 int
91 mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
92 {
93 int in_extent;
94 int num_extents;
95 xfs_agblock_t extent_start;
96 xfs_extlen_t extent_len;
97 xfs_agblock_t agbno;
98 xfs_agblock_t ag_end;
99 uint free_blocks;
100 #ifdef XR_BLD_FREE_TRACE
101 int old_state;
102 int state = XR_E_BAD_STATE;
103 #endif
104
105 /*
106 * scan the bitmap for the ag looking for continuous
107 * extents of free blocks. At this point, we know
108 * that blocks in the bitmap are either set to an
109 * "in use" state or set to unknown (0) since the
110 * bmaps were bzero'ed in phase 4 and only blocks
111 * being used by inodes, inode bmaps, ag headers,
112 * and the files themselves were put into the bitmap.
113 *
114 */
115 ASSERT(agno < mp->m_sb.sb_agcount);
116
117 extent_start = extent_len = 0;
118 in_extent = 0;
119 num_extents = free_blocks = 0;
120
121 if (agno < mp->m_sb.sb_agcount - 1)
122 ag_end = mp->m_sb.sb_agblocks;
123 else
124 ag_end = mp->m_sb.sb_dblocks -
125 mp->m_sb.sb_agblocks * (mp->m_sb.sb_agcount - 1);
126
127 /*
128 * ok, now find the number of extents, keep track of the
129 * largest extent.
130 */
131 for (agbno = 0; agbno < ag_end; agbno++) {
132 #if 0
133 old_state = state;
134 state = get_agbno_state(mp, agno, agbno);
135 if (state != old_state) {
136 fprintf(stderr, "agbno %u - new state is %d\n",
137 agbno, state);
138 }
139 #endif
140 if (get_agbno_state(mp, agno, agbno) < XR_E_INUSE) {
141 free_blocks++;
142 if (in_extent == 0) {
143 /*
144 * found the start of a free extent
145 */
146 in_extent = 1;
147 num_extents++;
148 extent_start = agbno;
149 extent_len = 1;
150 } else {
151 extent_len++;
152 }
153 } else {
154 if (in_extent) {
155 /*
156 * free extent ends here, add extent to the
157 * 2 incore extent (avl-to-be-B+) trees
158 */
159 in_extent = 0;
160 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
161 fprintf(stderr, "adding extent %u [%u %u]\n",
162 agno, extent_start, extent_len);
163 #endif
164 add_bno_extent(agno, extent_start, extent_len);
165 add_bcnt_extent(agno, extent_start, extent_len);
166 }
167 }
168 }
169 if (in_extent) {
170 /*
171 * free extent ends here
172 */
173 in_extent = 0;
174 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
175 fprintf(stderr, "adding extent %u [%u %u]\n",
176 agno, extent_start, extent_len);
177 #endif
178 add_bno_extent(agno, extent_start, extent_len);
179 add_bcnt_extent(agno, extent_start, extent_len);
180 }
181
182 return(num_extents);
183 }
184
185 /* ARGSUSED */
186 xfs_agblock_t
187 get_next_blockaddr(xfs_agnumber_t agno, int level, bt_status_t *curs)
188 {
189 ASSERT(curs->free_btree_blocks < curs->btree_blocks +
190 curs->num_tot_blocks);
191 ASSERT(curs->num_free_blocks > 0);
192
193 curs->num_free_blocks--;
194 return(*curs->free_btree_blocks++);
195 }
196
197 /*
198 * set up the dynamically allocated block allocation data in the btree
199 * cursor that depends on the info in the static portion of the cursor.
200 * allocates space from the incore bno/bcnt extent trees and sets up
201 * the first path up the left side of the tree. Also sets up the
202 * cursor pointer to the btree root. called by init_freespace_cursor()
203 * and init_ino_cursor()
204 */
205 /* ARGSUSED */
206 void
207 setup_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *curs)
208 {
209 int j;
210 unsigned int u;
211 xfs_extlen_t big_extent_len;
212 xfs_agblock_t big_extent_start;
213 extent_tree_node_t *ext_ptr;
214 extent_tree_node_t *bno_ext_ptr;
215 xfs_extlen_t blocks_allocated;
216 xfs_agblock_t *agb_ptr;
217
218 /*
219 * get the number of blocks we need to allocate, then
220 * set up block number array, set the free block pointer
221 * to the first block in the array, and null the array
222 */
223 big_extent_len = curs->num_tot_blocks;
224 blocks_allocated = 0;
225
226 ASSERT(big_extent_len > 0);
227
228 if ((curs->btree_blocks = malloc(sizeof(xfs_agblock_t *)
229 * big_extent_len)) == NULL)
230 do_error(_("could not set up btree block array\n"));
231
232 agb_ptr = curs->free_btree_blocks = curs->btree_blocks;
233
234 for (j = 0; j < curs->num_free_blocks; j++, agb_ptr++)
235 *agb_ptr = NULLAGBLOCK;
236
237 /*
238 * grab the smallest extent and use it up, then get the
239 * next smallest. This mimics the init_*_cursor code.
240 */
241 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
242 do_error(_("error - not enough free space in filesystem\n"));
243
244 agb_ptr = curs->btree_blocks;
245 j = curs->level[0].num_blocks;
246
247 /*
248 * set up the free block array
249 */
250 while (blocks_allocated < big_extent_len) {
251 /*
252 * use up the extent we've got
253 */
254 for (u = 0; u < ext_ptr->ex_blockcount &&
255 blocks_allocated < big_extent_len; u++) {
256 ASSERT(agb_ptr < curs->btree_blocks
257 + curs->num_tot_blocks);
258 *agb_ptr++ = ext_ptr->ex_startblock + u;
259 blocks_allocated++;
260 }
261
262 /*
263 * if we only used part of this last extent, then we
264 * need only to reset the extent in the extent
265 * trees and we're done
266 */
267 if (u < ext_ptr->ex_blockcount) {
268 big_extent_start = ext_ptr->ex_startblock + u;
269 big_extent_len = ext_ptr->ex_blockcount - u;
270
271 ASSERT(big_extent_len > 0);
272
273 bno_ext_ptr = find_bno_extent(agno,
274 ext_ptr->ex_startblock);
275 ASSERT(bno_ext_ptr != NULL);
276 get_bno_extent(agno, bno_ext_ptr);
277 release_extent_tree_node(bno_ext_ptr);
278
279 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
280 ext_ptr->ex_blockcount);
281 release_extent_tree_node(ext_ptr);
282 #ifdef XR_BLD_FREE_TRACE
283 fprintf(stderr, "releasing extent: %u [%u %u]\n",
284 agno, ext_ptr->ex_startblock,
285 ext_ptr->ex_blockcount);
286 fprintf(stderr, "blocks_allocated = %d\n",
287 blocks_allocated);
288 #endif
289
290 add_bno_extent(agno, big_extent_start, big_extent_len);
291 add_bcnt_extent(agno, big_extent_start, big_extent_len);
292
293 return;
294 }
295 /*
296 * delete the used-up extent from both extent trees and
297 * find next biggest extent
298 */
299 #ifdef XR_BLD_FREE_TRACE
300 fprintf(stderr, "releasing extent: %u [%u %u]\n",
301 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
302 #endif
303 bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock);
304 ASSERT(bno_ext_ptr != NULL);
305 get_bno_extent(agno, bno_ext_ptr);
306 release_extent_tree_node(bno_ext_ptr);
307
308 ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock,
309 ext_ptr->ex_blockcount);
310 ASSERT(ext_ptr != NULL);
311 release_extent_tree_node(ext_ptr);
312
313 ext_ptr = findfirst_bcnt_extent(agno);
314 }
315 #ifdef XR_BLD_FREE_TRACE
316 fprintf(stderr, "blocks_allocated = %d\n",
317 blocks_allocated);
318 #endif
319 }
320
321 void
322 write_cursor(bt_status_t *curs)
323 {
324 int i;
325
326 for (i = 0; i < curs->num_levels; i++) {
327 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
328 fprintf(stderr, "writing bt block %u\n", curs->level[i].agbno);
329 #endif
330 if (curs->level[i].prev_buf_p != NULL) {
331 ASSERT(curs->level[i].prev_agbno != NULLAGBLOCK);
332 libxfs_writebuf(curs->level[i].prev_buf_p, 0);
333 }
334 libxfs_writebuf(curs->level[i].buf_p, 0);
335 }
336 }
337
338 void
339 finish_cursor(bt_status_t *curs)
340 {
341 ASSERT(curs->num_free_blocks == 0);
342 free(curs->btree_blocks);
343 }
344
345 /*
346 * no-cursor versions of the XFS equivalents. The address calculators
347 * should be used only for interior btree nodes.
348 * these are adapted from xfs_alloc_btree.h and xfs_tree.h
349 */
350 #define XR_ALLOC_KEY_ADDR(mp, bp, i) \
351 (xfs_alloc_key_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
352 + ((i)-1) * sizeof(xfs_alloc_key_t))
353
354 #define XR_ALLOC_PTR_ADDR(mp, bp, i) \
355 (xfs_alloc_ptr_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
356 + (mp)->m_alloc_mxr[1] * sizeof(xfs_alloc_key_t) \
357 + ((i)-1) * sizeof(xfs_alloc_ptr_t))
358
359 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
360 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
361 xfs_alloc, (level) == 0)
362
363 /*
364 * this calculates a freespace cursor for an ag.
365 * btree_curs is an in/out. returns the number of
366 * blocks that will show up in the AGFL.
367 */
368
369 int
370 calculate_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
371 xfs_agblock_t *extents, bt_status_t *btree_curs)
372 {
373 xfs_extlen_t blocks_needed; /* a running count */
374 xfs_extlen_t blocks_allocated_pt; /* per tree */
375 xfs_extlen_t blocks_allocated_total; /* for both trees */
376 xfs_agblock_t num_extents;
377 int i;
378 int extents_used;
379 int extra_blocks;
380 bt_stat_level_t *lptr;
381 bt_stat_level_t *p_lptr;
382 extent_tree_node_t *ext_ptr;
383 int level;
384 #ifdef XR_BLD_FREE_TRACE
385 int old_state;
386 int state = XR_E_BAD_STATE;
387 #endif
388 #ifdef XR_BLD_FREE_TRACE
389 fprintf(stderr,
390 "in init_freespace_cursor, agno = %d\n", agno);
391 #endif
392
393 num_extents = *extents;
394 extents_used = 0;
395
396 ASSERT(num_extents != 0);
397
398 lptr = &btree_curs->level[0];
399 btree_curs->init = 1;
400
401 /*
402 * figure out how much space we need for the leaf level
403 * of the tree and set up the cursor for the leaf level
404 * (note that the same code is duplicated further down)
405 */
406 lptr->num_blocks = howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0));
407 lptr->num_recs_pb = num_extents / lptr->num_blocks;
408 lptr->modulo = num_extents % lptr->num_blocks;
409 lptr->num_recs_tot = num_extents;
410 level = 1;
411
412 /*
413 * if we need more levels, set them up. # of records
414 * per level is the # of blocks in the level below it
415 */
416 if (lptr->num_blocks > 1) {
417 for (; btree_curs->level[level - 1].num_blocks > 1
418 && level < XFS_BTREE_MAXLEVELS;
419 level++) {
420 lptr = &btree_curs->level[level];
421 p_lptr = &btree_curs->level[level - 1];
422 lptr->num_blocks = howmany(p_lptr->num_blocks,
423 XR_ALLOC_BLOCK_MAXRECS(mp, level));
424 lptr->modulo = p_lptr->num_blocks
425 % lptr->num_blocks;
426 lptr->num_recs_pb = p_lptr->num_blocks
427 / lptr->num_blocks;
428 lptr->num_recs_tot = p_lptr->num_blocks;
429 }
430 }
431
432 ASSERT(lptr->num_blocks == 1);
433 btree_curs->num_levels = level;
434
435 /*
436 * ok, now we have a hypothetical cursor that
437 * will work for both the bno and bcnt trees.
438 * now figure out if using up blocks to set up the
439 * trees will perturb the shape of the freespace tree.
440 * if so, we've over-allocated. the freespace trees
441 * as they will be *after* accounting for the free space
442 * we've used up will need fewer blocks to to represent
443 * than we've allocated. We can use the AGFL to hold
444 * XFS_AGFL_SIZE (sector/xfs_agfl_t) blocks but that's it.
445 * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
446 * if the number of extra blocks is more than that,
447 * we'll have to be called again.
448 */
449 for (blocks_needed = 0, i = 0; i < level; i++) {
450 blocks_needed += btree_curs->level[i].num_blocks;
451 }
452
453 /*
454 * record the # of blocks we've allocated
455 */
456 blocks_allocated_pt = blocks_needed;
457 blocks_needed *= 2;
458 blocks_allocated_total = blocks_needed;
459
460 /*
461 * figure out how many free extents will be used up by
462 * our space allocation
463 */
464 if ((ext_ptr = findfirst_bcnt_extent(agno)) == NULL)
465 do_error(_("can't rebuild fs trees -- not enough free space "
466 "on ag %u\n"), agno);
467
468 i = 0;
469 while (ext_ptr != NULL && blocks_needed > 0) {
470 if (ext_ptr->ex_blockcount <= blocks_needed) {
471 blocks_needed -= ext_ptr->ex_blockcount;
472 extents_used++;
473 } else {
474 blocks_needed = 0;
475 }
476
477 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
478
479 #ifdef XR_BLD_FREE_TRACE
480 if (ext_ptr != NULL) {
481 fprintf(stderr, "got next extent [%u %u]\n",
482 ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
483 } else {
484 fprintf(stderr, "out of extents\n");
485 }
486 #endif
487 }
488 if (blocks_needed > 0)
489 do_error(_("ag %u - not enough free space to build freespace "
490 "btrees\n"), agno);
491
492 ASSERT(num_extents >= extents_used);
493
494 num_extents -= extents_used;
495
496 /*
497 * see if the number of leaf blocks will change as a result
498 * of the number of extents changing
499 */
500 if (howmany(num_extents, XR_ALLOC_BLOCK_MAXRECS(mp, 0))
501 != btree_curs->level[0].num_blocks) {
502 /*
503 * yes -- recalculate the cursor. If the number of
504 * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
505 * we can put those into the AGFL. we don't try
506 * and get things to converge exactly (reach a
507 * state with zero excess blocks) because there
508 * exist pathological cases which will never
509 * converge. first, check for the zero-case.
510 */
511 if (num_extents == 0) {
512 /*
513 * ok, we've used up all the free blocks
514 * trying to lay out the leaf level. go
515 * to a one block (empty) btree and put the
516 * already allocated blocks into the AGFL
517 */
518 if (btree_curs->level[0].num_blocks != 1) {
519 /*
520 * we really needed more blocks because
521 * the old tree had more than one level.
522 * this is bad.
523 */
524 do_warn(_("not enough free blocks left to "
525 "describe all free blocks in AG "
526 "%u\n"), agno);
527 }
528 #ifdef XR_BLD_FREE_TRACE
529 fprintf(stderr,
530 "ag %u -- no free extents, alloc'ed %d\n",
531 agno, blocks_allocated_pt);
532 #endif
533 lptr->num_blocks = 1;
534 lptr->modulo = 0;
535 lptr->num_recs_pb = 0;
536 lptr->num_recs_tot = 0;
537
538 btree_curs->num_levels = 1;
539
540 /*
541 * don't reset the allocation stats, assume
542 * they're all extra blocks
543 * don't forget to return the total block count
544 * not the per-tree block count. these are the
545 * extras that will go into the AGFL. subtract
546 * two for the root blocks.
547 */
548 btree_curs->num_tot_blocks = blocks_allocated_pt;
549 btree_curs->num_free_blocks = blocks_allocated_pt;
550
551 *extents = 0;
552
553 return(blocks_allocated_total - 2);
554 }
555
556 lptr = &btree_curs->level[0];
557 lptr->num_blocks = howmany(num_extents,
558 XR_ALLOC_BLOCK_MAXRECS(mp, 0));
559 lptr->num_recs_pb = num_extents / lptr->num_blocks;
560 lptr->modulo = num_extents % lptr->num_blocks;
561 lptr->num_recs_tot = num_extents;
562 level = 1;
563
564 /*
565 * if we need more levels, set them up
566 */
567 if (lptr->num_blocks > 1) {
568 for (level = 1; btree_curs->level[level-1].num_blocks
569 > 1 && level < XFS_BTREE_MAXLEVELS;
570 level++) {
571 lptr = &btree_curs->level[level];
572 p_lptr = &btree_curs->level[level-1];
573 lptr->num_blocks = howmany(p_lptr->num_blocks,
574 XR_ALLOC_BLOCK_MAXRECS(mp,
575 level));
576 lptr->modulo = p_lptr->num_blocks
577 % lptr->num_blocks;
578 lptr->num_recs_pb = p_lptr->num_blocks
579 / lptr->num_blocks;
580 lptr->num_recs_tot = p_lptr->num_blocks;
581 }
582 }
583 ASSERT(lptr->num_blocks == 1);
584 btree_curs->num_levels = level;
585
586 /*
587 * now figure out the number of excess blocks
588 */
589 for (blocks_needed = 0, i = 0; i < level; i++) {
590 blocks_needed += btree_curs->level[i].num_blocks;
591 }
592 blocks_needed *= 2;
593
594 ASSERT(blocks_allocated_total >= blocks_needed);
595 extra_blocks = blocks_allocated_total - blocks_needed;
596 } else {
597 if (extents_used > 0) {
598 /*
599 * reset the leaf level geometry to account
600 * for consumed extents. we can leave the
601 * rest of the cursor alone since the number
602 * of leaf blocks hasn't changed.
603 */
604 lptr = &btree_curs->level[0];
605
606 lptr->num_recs_pb = num_extents / lptr->num_blocks;
607 lptr->modulo = num_extents % lptr->num_blocks;
608 lptr->num_recs_tot = num_extents;
609 }
610
611 extra_blocks = 0;
612 }
613
614 btree_curs->num_tot_blocks = blocks_allocated_pt;
615 btree_curs->num_free_blocks = blocks_allocated_pt;
616
617 *extents = num_extents;
618
619 return(extra_blocks);
620 }
621
622 void
623 prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
624 bt_status_t *btree_curs, xfs_agblock_t startblock,
625 xfs_extlen_t blockcount, int level, __uint32_t magic)
626 {
627 xfs_alloc_block_t *bt_hdr;
628 xfs_alloc_key_t *bt_key;
629 xfs_alloc_ptr_t *bt_ptr;
630 xfs_agblock_t agbno;
631 bt_stat_level_t *lptr;
632
633 level++;
634
635 if (level >= btree_curs->num_levels)
636 return;
637
638 lptr = &btree_curs->level[level];
639 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
640
641 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) == 0) {
642 /*
643 * only happens once when initializing the
644 * left-hand side of the tree.
645 */
646 prop_freespace_cursor(mp, agno, btree_curs, startblock,
647 blockcount, level, magic);
648 }
649
650 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) ==
651 lptr->num_recs_pb + (lptr->modulo > 0)) {
652 /*
653 * write out current prev block, grab us a new block,
654 * and set the rightsib pointer of current block
655 */
656 #ifdef XR_BLD_FREE_TRACE
657 fprintf(stderr, " %d ", lptr->prev_agbno);
658 #endif
659 if (lptr->prev_agbno != NULLAGBLOCK) {
660 ASSERT(lptr->prev_buf_p != NULL);
661 libxfs_writebuf(lptr->prev_buf_p, 0);
662 }
663 lptr->prev_agbno = lptr->agbno;;
664 lptr->prev_buf_p = lptr->buf_p;
665 agbno = get_next_blockaddr(agno, level, btree_curs);
666
667 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, agbno);
668
669 lptr->buf_p = libxfs_getbuf(mp->m_dev,
670 XFS_AGB_TO_DADDR(mp, agno, agbno),
671 XFS_FSB_TO_BB(mp, 1));
672 lptr->agbno = agbno;
673
674 if (lptr->modulo)
675 lptr->modulo--;
676
677 /*
678 * initialize block header
679 */
680 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
681 bzero(bt_hdr, mp->m_sb.sb_blocksize);
682
683 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
684 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, level);
685 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
686 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
687 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
688
689 /*
690 * propagate extent record for first extent in new block up
691 */
692 prop_freespace_cursor(mp, agno, btree_curs, startblock,
693 blockcount, level, magic);
694 }
695 /*
696 * add extent info to current block
697 */
698 INT_MOD(bt_hdr->bb_numrecs, ARCH_CONVERT, +1);
699
700 bt_key = XR_ALLOC_KEY_ADDR(mp, bt_hdr,
701 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
702 bt_ptr = XR_ALLOC_PTR_ADDR(mp, bt_hdr,
703 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
704
705 INT_SET(bt_key->ar_startblock, ARCH_CONVERT, startblock);
706 INT_SET(bt_key->ar_blockcount, ARCH_CONVERT, blockcount);
707 INT_SET(*bt_ptr, ARCH_CONVERT, btree_curs->level[level-1].agbno);
708 }
709
710 /*
711 * rebuilds a freespace tree given a cursor and magic number of type
712 * of tree to build (bno or bcnt). returns the number of free blocks
713 * represented by the tree.
714 */
715 xfs_extlen_t
716 build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
717 bt_status_t *btree_curs, __uint32_t magic)
718 {
719 xfs_agnumber_t i;
720 xfs_agblock_t j;
721 xfs_alloc_block_t *bt_hdr;
722 xfs_alloc_rec_t *bt_rec;
723 int level;
724 xfs_agblock_t agbno;
725 extent_tree_node_t *ext_ptr;
726 bt_stat_level_t *lptr;
727 xfs_extlen_t freeblks;
728
729 #ifdef XR_BLD_FREE_TRACE
730 fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
731 #endif
732 level = btree_curs->num_levels;
733 freeblks = 0;
734
735 ASSERT(level > 0);
736
737 /*
738 * initialize the first block on each btree level
739 */
740 for (i = 0; i < level; i++) {
741 lptr = &btree_curs->level[i];
742
743 agbno = get_next_blockaddr(agno, i, btree_curs);
744 lptr->buf_p = libxfs_getbuf(mp->m_dev,
745 XFS_AGB_TO_DADDR(mp, agno, agbno),
746 XFS_FSB_TO_BB(mp, 1));
747
748 if (i == btree_curs->num_levels - 1)
749 btree_curs->root = agbno;
750
751 lptr->agbno = agbno;
752 lptr->prev_agbno = NULLAGBLOCK;
753 lptr->prev_buf_p = NULL;
754 /*
755 * initialize block header
756 */
757 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
758 bzero(bt_hdr, mp->m_sb.sb_blocksize);
759
760 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
761 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, i);
762 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT,
763 bt_hdr->bb_rightsib = NULLAGBLOCK);
764 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
765 }
766 /*
767 * run along leaf, setting up records. as we have to switch
768 * blocks, call the prop_freespace_cursor routine to set up the new
769 * pointers for the parent. that can recurse up to the root
770 * if required. set the sibling pointers for leaf level here.
771 */
772 if (magic == XFS_ABTB_MAGIC)
773 ext_ptr = findfirst_bno_extent(agno);
774 else
775 ext_ptr = findfirst_bcnt_extent(agno);
776
777 #ifdef XR_BLD_FREE_TRACE
778 fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
779 agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
780 #endif
781
782 lptr = &btree_curs->level[0];
783
784 for (i = 0; i < btree_curs->level[0].num_blocks; i++) {
785 /*
786 * block initialization, lay in block header
787 */
788 bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
789 bzero(bt_hdr, mp->m_sb.sb_blocksize);
790
791 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, magic);
792 INT_ZERO(bt_hdr->bb_level, ARCH_CONVERT);
793 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
794 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
795 INT_SET(bt_hdr->bb_numrecs, ARCH_CONVERT,
796 lptr->num_recs_pb + (lptr->modulo > 0));
797 #ifdef XR_BLD_FREE_TRACE
798 fprintf(stderr, "bft, bb_numrecs = %d\n",
799 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
800 #endif
801
802 if (lptr->modulo > 0)
803 lptr->modulo--;
804
805 /*
806 * initialize values in the path up to the root if
807 * this is a multi-level btree
808 */
809 if (btree_curs->num_levels > 1)
810 prop_freespace_cursor(mp, agno, btree_curs,
811 ext_ptr->ex_startblock,
812 ext_ptr->ex_blockcount,
813 0, magic);
814
815 bt_rec = (xfs_alloc_rec_t *) ((char *) bt_hdr +
816 sizeof(xfs_alloc_block_t));
817 for (j = 0; j < INT_GET(bt_hdr->bb_numrecs,ARCH_CONVERT); j++) {
818 ASSERT(ext_ptr != NULL);
819 INT_SET(bt_rec[j].ar_startblock, ARCH_CONVERT,
820 ext_ptr->ex_startblock);
821 INT_SET(bt_rec[j].ar_blockcount, ARCH_CONVERT,
822 ext_ptr->ex_blockcount);
823 freeblks += ext_ptr->ex_blockcount;
824 if (magic == XFS_ABTB_MAGIC)
825 ext_ptr = findnext_bno_extent(ext_ptr);
826 else
827 ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
828 #if 0
829 #ifdef XR_BLD_FREE_TRACE
830 if (ext_ptr == NULL)
831 fprintf(stderr, "null extent pointer, j = %d\n",
832 j);
833 else
834 fprintf(stderr,
835 "bft, agno = %d, start = %u, count = %u\n",
836 agno, ext_ptr->ex_startblock,
837 ext_ptr->ex_blockcount);
838 #endif
839 #endif
840 }
841
842 if (ext_ptr != NULL) {
843 /*
844 * get next leaf level block
845 */
846 if (lptr->prev_buf_p != NULL) {
847 #ifdef XR_BLD_FREE_TRACE
848 fprintf(stderr, " writing fst agbno %u\n",
849 lptr->prev_agbno);
850 #endif
851 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
852 libxfs_writebuf(lptr->prev_buf_p, 0);
853 }
854 lptr->prev_buf_p = lptr->buf_p;
855 lptr->prev_agbno = lptr->agbno;
856
857 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, lptr->agbno =
858 get_next_blockaddr(agno, 0, btree_curs));
859
860 lptr->buf_p = libxfs_getbuf(mp->m_dev,
861 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
862 XFS_FSB_TO_BB(mp, 1));
863 }
864 }
865
866 return(freeblks);
867 }
868
869 /*
870 * no-cursor versions of the XFS equivalents. The address calculators
871 * should be used only for interior btree nodes.
872 * these are adapted from xfs_ialloc_btree.h and xfs_tree.h
873 */
874 #define XR_INOBT_KEY_ADDR(mp, bp, i) \
875 (xfs_inobt_key_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
876 + ((i)-1) * sizeof(xfs_inobt_key_t))
877
878 #define XR_INOBT_PTR_ADDR(mp, bp, i) \
879 (xfs_inobt_ptr_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
880 + (mp)->m_inobt_mxr[1] * sizeof(xfs_inobt_key_t) \
881 + ((i)-1) * sizeof(xfs_inobt_ptr_t))
882
883 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
884 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
885 xfs_inobt, (level) == 0)
886
887 /*
888 * we don't have to worry here about how chewing up free extents
889 * may perturb things because inode tree building happens before
890 * freespace tree building.
891 */
892 void
893 init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
894 __uint64_t *num_inos, __uint64_t *num_free_inos)
895 {
896 __uint64_t ninos;
897 __uint64_t nfinos;
898 ino_tree_node_t *ino_rec;
899 int num_recs;
900 int level;
901 bt_stat_level_t *lptr;
902 bt_stat_level_t *p_lptr;
903 xfs_extlen_t blocks_allocated;
904 int i;
905
906 *num_inos = *num_free_inos = 0;
907 ninos = nfinos = 0;
908
909 lptr = &btree_curs->level[0];
910 btree_curs->init = 1;
911
912 if ((ino_rec = findfirst_inode_rec(agno)) == NULL) {
913 /*
914 * easy corner-case -- no inode records
915 */
916 lptr->num_blocks = 1;
917 lptr->modulo = 0;
918 lptr->num_recs_pb = 0;
919 lptr->num_recs_tot = 0;
920
921 btree_curs->num_levels = 1;
922 btree_curs->num_tot_blocks = btree_curs->num_free_blocks = 1;
923
924 setup_cursor(mp, agno, btree_curs);
925
926 return;
927 }
928
929 /*
930 * build up statistics
931 */
932 for (num_recs = 0; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
933 ninos += XFS_INODES_PER_CHUNK;
934 num_recs++;
935 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
936 ASSERT(is_inode_confirmed(ino_rec, i));
937 if (is_inode_free(ino_rec, i))
938 nfinos++;
939 }
940 }
941
942 blocks_allocated = lptr->num_blocks = howmany(num_recs,
943 XR_INOBT_BLOCK_MAXRECS(mp, 0));
944
945 lptr->modulo = num_recs % lptr->num_blocks;
946 lptr->num_recs_pb = num_recs / lptr->num_blocks;
947 lptr->num_recs_tot = num_recs;
948 level = 1;
949
950 if (lptr->num_blocks > 1) {
951 for (; btree_curs->level[level-1].num_blocks > 1
952 && level < XFS_BTREE_MAXLEVELS;
953 level++) {
954 lptr = &btree_curs->level[level];
955 p_lptr = &btree_curs->level[level - 1];
956 lptr->num_blocks = howmany(p_lptr->num_blocks,
957 XR_INOBT_BLOCK_MAXRECS(mp, level));
958 lptr->modulo = p_lptr->num_blocks % lptr->num_blocks;
959 lptr->num_recs_pb = p_lptr->num_blocks
960 / lptr->num_blocks;
961 lptr->num_recs_tot = p_lptr->num_blocks;
962
963 blocks_allocated += lptr->num_blocks;
964 }
965 }
966 ASSERT(lptr->num_blocks == 1);
967 btree_curs->num_levels = level;
968
969 btree_curs->num_tot_blocks = btree_curs->num_free_blocks
970 = blocks_allocated;
971
972 setup_cursor(mp, agno, btree_curs);
973
974 *num_inos = ninos;
975 *num_free_inos = nfinos;
976
977 return;
978 }
979
980 void
981 prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
982 xfs_agino_t startino, int level)
983 {
984 xfs_inobt_block_t *bt_hdr;
985 xfs_inobt_key_t *bt_key;
986 xfs_inobt_ptr_t *bt_ptr;
987 xfs_agblock_t agbno;
988 bt_stat_level_t *lptr;
989
990 level++;
991
992 if (level >= btree_curs->num_levels)
993 return;
994
995 lptr = &btree_curs->level[level];
996 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
997
998 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) == 0) {
999 /*
1000 * this only happens once to initialize the
1001 * first path up the left side of the tree
1002 * where the agbno's are already set up
1003 */
1004 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1005 }
1006
1007 if (INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT) ==
1008 lptr->num_recs_pb + (lptr->modulo > 0)) {
1009 /*
1010 * write out current prev block, grab us a new block,
1011 * and set the rightsib pointer of current block
1012 */
1013 #ifdef XR_BLD_INO_TRACE
1014 fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
1015 #endif
1016 if (lptr->prev_agbno != NULLAGBLOCK) {
1017 ASSERT(lptr->prev_buf_p != NULL);
1018 libxfs_writebuf(lptr->prev_buf_p, 0);
1019 }
1020 lptr->prev_agbno = lptr->agbno;;
1021 lptr->prev_buf_p = lptr->buf_p;
1022 agbno = get_next_blockaddr(agno, level, btree_curs);
1023
1024 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, agbno);
1025
1026 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1027 XFS_AGB_TO_DADDR(mp, agno, agbno),
1028 XFS_FSB_TO_BB(mp, 1));
1029 lptr->agbno = agbno;
1030
1031 if (lptr->modulo)
1032 lptr->modulo--;
1033
1034 /*
1035 * initialize block header
1036 */
1037 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1038 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1039
1040 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1041 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, level);
1042 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
1043 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
1044 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
1045 /*
1046 * propagate extent record for first extent in new block up
1047 */
1048 prop_ino_cursor(mp, agno, btree_curs, startino, level);
1049 }
1050 /*
1051 * add inode info to current block
1052 */
1053 INT_MOD(bt_hdr->bb_numrecs, ARCH_CONVERT, +1);
1054
1055 bt_key = XR_INOBT_KEY_ADDR(mp, bt_hdr,
1056 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
1057 bt_ptr = XR_INOBT_PTR_ADDR(mp, bt_hdr,
1058 INT_GET(bt_hdr->bb_numrecs, ARCH_CONVERT));
1059
1060 INT_SET(bt_key->ir_startino, ARCH_CONVERT, startino);
1061 INT_SET(*bt_ptr, ARCH_CONVERT, btree_curs->level[level-1].agbno);
1062 }
1063
1064 void
1065 build_agi(xfs_mount_t *mp, xfs_agnumber_t agno,
1066 bt_status_t *btree_curs, xfs_agino_t first_agino,
1067 xfs_agino_t count, xfs_agino_t freecount)
1068 {
1069 xfs_buf_t *agi_buf;
1070 xfs_agi_t *agi;
1071 int i;
1072
1073 agi_buf = libxfs_getbuf(mp->m_dev,
1074 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1075 mp->m_sb.sb_sectsize/BBSIZE);
1076 agi = XFS_BUF_TO_AGI(agi_buf);
1077 bzero(agi, mp->m_sb.sb_sectsize);
1078
1079 INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC);
1080 INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION);
1081 INT_SET(agi->agi_seqno, ARCH_CONVERT, agno);
1082 if (agno < mp->m_sb.sb_agcount - 1)
1083 INT_SET(agi->agi_length, ARCH_CONVERT, mp->m_sb.sb_agblocks);
1084 else
1085 INT_SET(agi->agi_length, ARCH_CONVERT, mp->m_sb.sb_dblocks -
1086 (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno);
1087 INT_SET(agi->agi_count, ARCH_CONVERT, count);
1088 INT_SET(agi->agi_root, ARCH_CONVERT, btree_curs->root);
1089 INT_SET(agi->agi_level, ARCH_CONVERT, btree_curs->num_levels);
1090 INT_SET(agi->agi_freecount, ARCH_CONVERT, freecount);
1091 INT_SET(agi->agi_newino, ARCH_CONVERT, first_agino);
1092 INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO);
1093
1094 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
1095 INT_SET(agi->agi_unlinked[i], ARCH_CONVERT, NULLAGINO);
1096 }
1097
1098 libxfs_writebuf(agi_buf, 0);
1099 }
1100
1101 /*
1102 * rebuilds an inode tree given a cursor. We're lazy here and call
1103 * the routine that builds the agi
1104 */
1105 void
1106 build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
1107 bt_status_t *btree_curs)
1108 {
1109 xfs_agnumber_t i;
1110 xfs_agblock_t j;
1111 xfs_agblock_t agbno;
1112 xfs_agino_t first_agino;
1113 xfs_inobt_block_t *bt_hdr;
1114 xfs_inobt_rec_t *bt_rec;
1115 ino_tree_node_t *ino_rec;
1116 bt_stat_level_t *lptr;
1117 xfs_agino_t count = 0;
1118 xfs_agino_t freecount = 0;
1119 int inocnt;
1120 int k;
1121 int level = btree_curs->num_levels;
1122
1123 for (i = 0; i < level; i++) {
1124 lptr = &btree_curs->level[i];
1125
1126 agbno = get_next_blockaddr(agno, i, btree_curs);
1127 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1128 XFS_AGB_TO_DADDR(mp, agno, agbno),
1129 XFS_FSB_TO_BB(mp, 1));
1130
1131 if (i == btree_curs->num_levels - 1)
1132 btree_curs->root = agbno;
1133
1134 lptr->agbno = agbno;
1135 lptr->prev_agbno = NULLAGBLOCK;
1136 lptr->prev_buf_p = NULL;
1137 /*
1138 * initialize block header
1139 */
1140 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1141 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1142
1143 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1144 INT_SET(bt_hdr->bb_level, ARCH_CONVERT, i);
1145 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT,
1146 bt_hdr->bb_rightsib = NULLAGBLOCK);
1147 INT_ZERO(bt_hdr->bb_numrecs, ARCH_CONVERT);
1148 }
1149 /*
1150 * run along leaf, setting up records. as we have to switch
1151 * blocks, call the prop_ino_cursor routine to set up the new
1152 * pointers for the parent. that can recurse up to the root
1153 * if required. set the sibling pointers for leaf level here.
1154 */
1155 ino_rec = findfirst_inode_rec(agno);
1156
1157 if (ino_rec != NULL)
1158 first_agino = ino_rec->ino_startnum;
1159 else
1160 first_agino = NULLAGINO;
1161
1162 lptr = &btree_curs->level[0];
1163
1164 for (i = 0; i < lptr->num_blocks; i++) {
1165 /*
1166 * block initialization, lay in block header
1167 */
1168 bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
1169 bzero(bt_hdr, mp->m_sb.sb_blocksize);
1170
1171 INT_SET(bt_hdr->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
1172 INT_ZERO(bt_hdr->bb_level, ARCH_CONVERT);
1173 INT_SET(bt_hdr->bb_leftsib, ARCH_CONVERT, lptr->prev_agbno);
1174 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
1175 INT_SET(bt_hdr->bb_numrecs, ARCH_CONVERT,
1176 lptr->num_recs_pb + (lptr->modulo > 0));
1177
1178 if (lptr->modulo > 0)
1179 lptr->modulo--;
1180
1181 if (lptr->num_recs_pb > 0)
1182 prop_ino_cursor(mp, agno, btree_curs,
1183 ino_rec->ino_startnum, 0);
1184
1185 bt_rec = (xfs_inobt_rec_t *) ((char *) bt_hdr +
1186 sizeof(xfs_inobt_block_t));
1187 for (j = 0; j < INT_GET(bt_hdr->bb_numrecs,ARCH_CONVERT); j++) {
1188 ASSERT(ino_rec != NULL);
1189 INT_SET(bt_rec[j].ir_startino, ARCH_CONVERT,
1190 ino_rec->ino_startnum);
1191 INT_SET(bt_rec[j].ir_free, ARCH_CONVERT,
1192 ino_rec->ir_free);
1193
1194 inocnt = 0;
1195 for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) {
1196 ASSERT(is_inode_confirmed(ino_rec, k));
1197 inocnt += is_inode_free(ino_rec, k);
1198 }
1199
1200 INT_SET(bt_rec[j].ir_freecount, ARCH_CONVERT, inocnt);
1201 freecount += inocnt;
1202 count += XFS_INODES_PER_CHUNK;
1203 ino_rec = next_ino_rec(ino_rec);
1204 }
1205
1206 if (ino_rec != NULL) {
1207 /*
1208 * get next leaf level block
1209 */
1210 if (lptr->prev_buf_p != NULL) {
1211 #ifdef XR_BLD_INO_TRACE
1212 fprintf(stderr, "writing inobt agbno %u\n",
1213 lptr->prev_agbno);
1214 #endif
1215 ASSERT(lptr->prev_agbno != NULLAGBLOCK);
1216 libxfs_writebuf(lptr->prev_buf_p, 0);
1217 }
1218 lptr->prev_buf_p = lptr->buf_p;
1219 lptr->prev_agbno = lptr->agbno;
1220
1221 INT_SET(bt_hdr->bb_rightsib, ARCH_CONVERT, lptr->agbno=
1222 get_next_blockaddr(agno, 0, btree_curs));
1223
1224 lptr->buf_p = libxfs_getbuf(mp->m_dev,
1225 XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
1226 XFS_FSB_TO_BB(mp, 1));
1227 }
1228 }
1229
1230 build_agi(mp, agno, btree_curs, first_agino, count, freecount);
1231 }
1232
1233 /*
1234 * build both the agf and the agfl for an agno given both
1235 * btree cursors
1236 */
1237 void
1238 build_agf_agfl(xfs_mount_t *mp,
1239 xfs_agnumber_t agno,
1240 bt_status_t *bno_bt,
1241 bt_status_t *bcnt_bt,
1242 xfs_extlen_t freeblks, /* # free blocks in tree */
1243 int lostblocks) /* # blocks that will be lost */
1244 {
1245 extent_tree_node_t *ext_ptr;
1246 xfs_buf_t *agf_buf, *agfl_buf;
1247 int i;
1248 int j;
1249 xfs_agfl_t *agfl;
1250 xfs_agf_t *agf;
1251
1252 agf_buf = libxfs_getbuf(mp->m_dev,
1253 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1254 mp->m_sb.sb_sectsize/BBSIZE);
1255 agf = XFS_BUF_TO_AGF(agf_buf);
1256 bzero(agf, mp->m_sb.sb_sectsize);
1257
1258 #ifdef XR_BLD_FREE_TRACE
1259 fprintf(stderr, "agf = 0x%x, agf_buf->b_un.b_addr = 0x%x\n",
1260 (__psint_t) agf, (__psint_t) agf_buf->b_un.b_addr);
1261 #endif
1262
1263 /*
1264 * set up fixed part of agf
1265 */
1266 INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC);
1267 INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION);
1268 INT_SET(agf->agf_seqno, ARCH_CONVERT, agno);
1269
1270 if (agno < mp->m_sb.sb_agcount - 1)
1271 INT_SET(agf->agf_length, ARCH_CONVERT, mp->m_sb.sb_agblocks);
1272 else
1273 INT_SET(agf->agf_length, ARCH_CONVERT, mp->m_sb.sb_dblocks -
1274 (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno);
1275
1276 INT_SET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT, bno_bt->root);
1277 INT_SET(agf->agf_levels[XFS_BTNUM_BNO], ARCH_CONVERT,
1278 bno_bt->num_levels);
1279 INT_SET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT, bcnt_bt->root);
1280 INT_SET(agf->agf_levels[XFS_BTNUM_CNT], ARCH_CONVERT,
1281 bcnt_bt->num_levels);
1282 INT_SET(agf->agf_freeblks, ARCH_CONVERT, freeblks);
1283
1284 #ifdef XR_BLD_FREE_TRACE
1285 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
1286 INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT),
1287 INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT),
1288 XFS_BTNUM_BNO,
1289 XFS_BTNUM_CNT);
1290 #endif
1291
1292 /*
1293 * do we have left-over blocks in the btree cursors that should
1294 * be used to fill the AGFL?
1295 */
1296 if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) {
1297 /*
1298 * yes - grab the AGFL buffer
1299 */
1300 agfl_buf = libxfs_getbuf(mp->m_dev,
1301 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1302 mp->m_sb.sb_sectsize/BBSIZE);
1303 agfl = XFS_BUF_TO_AGFL(agfl_buf);
1304 bzero(agfl, mp->m_sb.sb_sectsize);
1305 /*
1306 * ok, now grab as many blocks as we can
1307 */
1308 i = j = 0;
1309 while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
1310 INT_SET(agfl->agfl_bno[i], ARCH_CONVERT,
1311 get_next_blockaddr(agno, 0, bno_bt));
1312 i++;
1313 }
1314
1315 while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
1316 INT_SET(agfl->agfl_bno[i], ARCH_CONVERT,
1317 get_next_blockaddr(agno, 0, bcnt_bt));
1318 i++;
1319 }
1320 /*
1321 * now throw the rest of the blocks away and complain
1322 */
1323 while (bno_bt->num_free_blocks > 0) {
1324 (void) get_next_blockaddr(agno, 0, bno_bt);
1325 j++;
1326 }
1327 while (bcnt_bt->num_free_blocks > 0) {
1328 (void) get_next_blockaddr(agno, 0, bcnt_bt);
1329 j++;
1330 }
1331
1332 if (j > 0) {
1333 if (j == lostblocks)
1334 do_warn(_("lost %d blocks in ag %u\n"),
1335 j, agno);
1336 else
1337 do_warn(_("thought we were going to lose %d "
1338 "blocks in ag %u, actually lost "
1339 "%d\n"),
1340 lostblocks, j, agno);
1341 }
1342
1343 INT_ZERO(agf->agf_flfirst, ARCH_CONVERT);
1344 INT_SET(agf->agf_fllast, ARCH_CONVERT, i - 1);
1345 INT_SET(agf->agf_flcount, ARCH_CONVERT, i);
1346
1347 #ifdef XR_BLD_FREE_TRACE
1348 fprintf(stderr, "writing agfl for ag %u\n", agno);
1349 #endif
1350
1351 libxfs_writebuf(agfl_buf, 0);
1352 } else {
1353 INT_ZERO(agf->agf_flfirst, ARCH_CONVERT);
1354 INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1);
1355 INT_ZERO(agf->agf_flcount, ARCH_CONVERT);
1356 }
1357
1358 ext_ptr = findbiggest_bcnt_extent(agno);
1359 INT_SET(agf->agf_longest, ARCH_CONVERT,
1360 (ext_ptr != NULL) ? ext_ptr->ex_blockcount : 0);
1361
1362 ASSERT(INT_GET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT) !=
1363 INT_GET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT));
1364
1365 libxfs_writebuf(agf_buf, 0);
1366
1367 #ifdef XR_BLD_FREE_TRACE
1368 fprintf(stderr, "wrote agf for ag %u, error = %d\n", agno, error);
1369 #endif
1370 }
1371
1372 /*
1373 * update the superblock counters, sync the sb version numbers and
1374 * feature bits to the filesystem, and sync up the on-disk superblock
1375 * to match the incore superblock.
1376 */
1377 void
1378 sync_sb(xfs_mount_t *mp)
1379 {
1380 xfs_sb_t *sbp;
1381 xfs_buf_t *bp;
1382
1383 bp = libxfs_getsb(mp, 0);
1384 if (!bp)
1385 do_error(_("couldn't get superblock\n"));
1386
1387 sbp = XFS_BUF_TO_SBP(bp);
1388
1389 mp->m_sb.sb_icount = sb_icount;
1390 mp->m_sb.sb_ifree = sb_ifree;
1391 mp->m_sb.sb_fdblocks = sb_fdblocks;
1392 mp->m_sb.sb_frextents = sb_frextents;
1393
1394 update_sb_version(mp);
1395
1396 *sbp = mp->m_sb;
1397 libxfs_xlate_sb(XFS_BUF_PTR(bp), sbp, -1, ARCH_CONVERT,
1398 XFS_SB_ALL_BITS);
1399 libxfs_writebuf(bp, 0);
1400 }
1401
1402 /*
1403 * make sure the root and realtime inodes show up allocated
1404 * even if they've been freed. they get reinitialized in phase6.
1405 */
1406 void
1407 keep_fsinos(xfs_mount_t *mp)
1408 {
1409 ino_tree_node_t *irec;
1410 int i;
1411
1412 irec = find_inode_rec(XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
1413 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
1414
1415 for (i = 0; i < 3; i++)
1416 set_inode_used(irec, i);
1417 }
1418
1419 void
1420 phase5(xfs_mount_t *mp)
1421 {
1422 __uint64_t num_inos;
1423 __uint64_t num_free_inos;
1424 bt_status_t bno_btree_curs;
1425 bt_status_t bcnt_btree_curs;
1426 bt_status_t ino_btree_curs;
1427 xfs_agnumber_t agno;
1428 int extra_blocks = 0;
1429 uint num_freeblocks;
1430 xfs_extlen_t freeblks1;
1431 xfs_extlen_t freeblks2;
1432 xfs_agblock_t num_extents;
1433 extern int count_bno_extents(xfs_agnumber_t);
1434 extern int count_bno_extents_blocks(xfs_agnumber_t, uint *);
1435 #ifdef XR_BLD_FREE_TRACE
1436 extern int count_bcnt_extents(xfs_agnumber_t);
1437 #endif
1438
1439 do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
1440
1441 #ifdef XR_BLD_FREE_TRACE
1442 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
1443 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0),
1444 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0)
1445 );
1446 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1447 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1),
1448 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1)
1449 );
1450 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
1451 XR_INOBT_BLOCK_MAXRECS(mp, 0));
1452 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
1453 XR_INOBT_BLOCK_MAXRECS(mp, 1));
1454 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
1455 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0),
1456 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0));
1457 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1458 XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1),
1459 XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1));
1460 #endif
1461
1462 /*
1463 * make sure the root and realtime inodes show up allocated
1464 */
1465 keep_fsinos(mp);
1466
1467 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
1468 /*
1469 * build up incore bno and bcnt extent btrees
1470 */
1471 num_extents = mk_incore_fstree(mp, agno);
1472
1473 #ifdef XR_BLD_FREE_TRACE
1474 fprintf(stderr, "# of bno extents is %d\n",
1475 count_bno_extents(agno));
1476 #endif
1477
1478 if (num_extents == 0) {
1479 /*
1480 * XXX - what we probably should do here is pick an
1481 * inode for a regular file in the allocation group
1482 * that has space allocated and shoot it by traversing
1483 * the bmap list and putting all its extents on the
1484 * incore freespace trees, clearing the inode,
1485 * and clearing the in-use bit in the incore inode
1486 * tree. Then try mk_incore_fstree() again.
1487 */
1488 do_error(_("unable to rebuild AG %u. "
1489 "Not enough free space in on-disk AG.\n"),
1490 agno);
1491 }
1492
1493 /*
1494 * done with the AG bitmap, toss it...
1495 */
1496 teardown_ag_bmap(mp, agno);
1497
1498 /*
1499 * ok, now set up the btree cursors for the
1500 * on-disk btrees (includs pre-allocating all
1501 * required blocks for the trees themselves)
1502 */
1503 init_ino_cursor(mp, agno, &ino_btree_curs,
1504 &num_inos, &num_free_inos);
1505
1506 sb_icount += num_inos;
1507 sb_ifree += num_free_inos;
1508
1509 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
1510 /*
1511 * lose two blocks per AG -- the space tree roots
1512 * are counted as allocated since the space trees
1513 * always have roots
1514 */
1515 sb_fdblocks += num_freeblocks - 2;
1516
1517 if (num_extents == 0) {
1518 /*
1519 * XXX - what we probably should do here is pick an
1520 * inode for a regular file in the allocation group
1521 * that has space allocated and shoot it by traversing
1522 * the bmap list and putting all its extents on the
1523 * incore freespace trees, clearing the inode,
1524 * and clearing the in-use bit in the incore inode
1525 * tree. Then try mk_incore_fstree() again.
1526 */
1527 do_error(
1528 _("unable to rebuild AG %u. No free space.\n"), agno);
1529 }
1530
1531 #ifdef XR_BLD_FREE_TRACE
1532 fprintf(stderr, "# of bno extents is %d\n", num_extents);
1533 #endif
1534
1535 /*
1536 * track blocks that we might really lose
1537 */
1538 extra_blocks = calculate_freespace_cursor(mp, agno,
1539 &num_extents, &bno_btree_curs);
1540
1541 /*
1542 * freespace btrees live in the "free space" but
1543 * the filesystem treats AGFL blocks as allocated
1544 * since they aren't described by the freespace trees
1545 */
1546
1547 /*
1548 * see if we can fit all the extra blocks into the AGFL
1549 */
1550 extra_blocks = (extra_blocks - XFS_AGFL_SIZE(mp) > 0)
1551 ? extra_blocks - XFS_AGFL_SIZE(mp)
1552 : 0;
1553
1554 if (extra_blocks > 0) {
1555 do_warn(_("lost %d blocks in agno %d, sorry.\n"),
1556 extra_blocks, agno);
1557 sb_fdblocks -= extra_blocks;
1558 }
1559
1560 bcnt_btree_curs = bno_btree_curs;
1561
1562 setup_cursor(mp, agno, &bno_btree_curs);
1563 setup_cursor(mp, agno, &bcnt_btree_curs);
1564
1565 #ifdef XR_BLD_FREE_TRACE
1566 fprintf(stderr, "# of bno extents is %d\n",
1567 count_bno_extents(agno));
1568 fprintf(stderr, "# of bcnt extents is %d\n",
1569 count_bcnt_extents(agno));
1570 #endif
1571 /*
1572 * now rebuild the freespace trees
1573 */
1574 freeblks1 = build_freespace_tree(mp, agno, &bno_btree_curs,
1575 XFS_ABTB_MAGIC);
1576 #ifdef XR_BLD_FREE_TRACE
1577 fprintf(stderr, "# of free blocks == %d\n", freeblks1);
1578 #endif
1579 write_cursor(&bno_btree_curs);
1580
1581 freeblks2 = build_freespace_tree(mp, agno, &bcnt_btree_curs,
1582 XFS_ABTC_MAGIC);
1583 write_cursor(&bcnt_btree_curs);
1584
1585 ASSERT(freeblks1 == freeblks2);
1586
1587 /*
1588 * set up agf and agfl
1589 */
1590 build_agf_agfl(mp, agno, &bno_btree_curs,
1591 &bcnt_btree_curs, freeblks1, extra_blocks);
1592 /*
1593 * build inode allocation tree. this also build the agi
1594 */
1595 build_ino_tree(mp, agno, &ino_btree_curs);
1596 write_cursor(&ino_btree_curs);
1597 /*
1598 * tear down cursors
1599 */
1600 finish_cursor(&bno_btree_curs);
1601 finish_cursor(&ino_btree_curs);
1602 finish_cursor(&bcnt_btree_curs);
1603 /*
1604 * release the incore per-AG bno/bcnt trees so
1605 * the extent nodes can be recycled
1606 */
1607 release_agbno_extent_tree(agno);
1608 release_agbcnt_extent_tree(agno);
1609 }
1610
1611 if (mp->m_sb.sb_rblocks) {
1612 do_log(
1613 _(" - generate realtime summary info and bitmap...\n"));
1614 rtinit(mp);
1615 generate_rtinfo(mp, btmcompute, sumcompute);
1616 teardown_rt_bmap(mp);
1617 }
1618
1619 do_log(_(" - reset superblock...\n"));
1620
1621 /*
1622 * sync superblock counter and set version bits correctly
1623 */
1624 sync_sb(mp);
1625
1626 bad_ino_btree = 0;
1627 }