2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "err_protos.h"
45 * we maintain the current slice (path from root to leaf)
46 * of the btree incore. when we need a new block, we ask
47 * the block allocator for the address of a block on that
48 * level, map the block in, and set up the appropriate
49 * pointers (child, silbing, etc.) and keys that should
50 * point to the new block.
52 typedef struct bt_stat_level
{
54 * set in setup_cursor routine and maintained in the tree-building
57 xfs_buf_t
*buf_p
; /* 2 buffer pointers to ... */
58 xfs_buf_t
*prev_buf_p
;
59 xfs_agblock_t agbno
; /* current block being filled */
60 xfs_agblock_t prev_agbno
; /* previous block */
62 * set in calculate/init cursor routines for each btree level
64 int num_recs_tot
; /* # tree recs in level */
65 int num_blocks
; /* # tree blocks in level */
66 int num_recs_pb
; /* num_recs_tot / num_blocks */
67 int modulo
; /* num_recs_tot % num_blocks */
70 typedef struct bt_status
{
71 int init
; /* cursor set up once? */
72 int num_levels
; /* # of levels in btree */
73 xfs_extlen_t num_tot_blocks
; /* # blocks alloc'ed for tree */
74 xfs_extlen_t num_free_blocks
;/* # blocks currently unused */
76 xfs_agblock_t root
; /* root block */
78 * list of blocks to be used to set up this tree
79 * and pointer to the first unused block on the list
81 xfs_agblock_t
*btree_blocks
; /* block list */
82 xfs_agblock_t
*free_btree_blocks
; /* first unused block */
84 * per-level status info
86 bt_stat_level_t level
[XFS_BTREE_MAXLEVELS
];
91 mk_incore_fstree(xfs_mount_t
*mp
, xfs_agnumber_t agno
)
95 xfs_agblock_t extent_start
;
96 xfs_extlen_t extent_len
;
100 #ifdef XR_BLD_FREE_TRACE
102 int state
= XR_E_BAD_STATE
;
106 * scan the bitmap for the ag looking for continuous
107 * extents of free blocks. At this point, we know
108 * that blocks in the bitmap are either set to an
109 * "in use" state or set to unknown (0) since the
110 * bmaps were bzero'ed in phase 4 and only blocks
111 * being used by inodes, inode bmaps, ag headers,
112 * and the files themselves were put into the bitmap.
115 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
117 extent_start
= extent_len
= 0;
119 num_extents
= free_blocks
= 0;
121 if (agno
< mp
->m_sb
.sb_agcount
- 1)
122 ag_end
= mp
->m_sb
.sb_agblocks
;
124 ag_end
= mp
->m_sb
.sb_dblocks
-
125 mp
->m_sb
.sb_agblocks
* (mp
->m_sb
.sb_agcount
- 1);
128 * ok, now find the number of extents, keep track of the
131 for (agbno
= 0; agbno
< ag_end
; agbno
++) {
134 state
= get_agbno_state(mp
, agno
, agbno
);
135 if (state
!= old_state
) {
136 fprintf(stderr
, "agbno %u - new state is %d\n",
140 if (get_agbno_state(mp
, agno
, agbno
) < XR_E_INUSE
) {
142 if (in_extent
== 0) {
144 * found the start of a free extent
148 extent_start
= agbno
;
156 * free extent ends here, add extent to the
157 * 2 incore extent (avl-to-be-B+) trees
160 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
161 fprintf(stderr
, "adding extent %u [%u %u]\n",
162 agno
, extent_start
, extent_len
);
164 add_bno_extent(agno
, extent_start
, extent_len
);
165 add_bcnt_extent(agno
, extent_start
, extent_len
);
171 * free extent ends here
174 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
175 fprintf(stderr
, "adding extent %u [%u %u]\n",
176 agno
, extent_start
, extent_len
);
178 add_bno_extent(agno
, extent_start
, extent_len
);
179 add_bcnt_extent(agno
, extent_start
, extent_len
);
187 get_next_blockaddr(xfs_agnumber_t agno
, int level
, bt_status_t
*curs
)
189 ASSERT(curs
->free_btree_blocks
< curs
->btree_blocks
+
190 curs
->num_tot_blocks
);
191 ASSERT(curs
->num_free_blocks
> 0);
193 curs
->num_free_blocks
--;
194 return(*curs
->free_btree_blocks
++);
198 * set up the dynamically allocated block allocation data in the btree
199 * cursor that depends on the info in the static portion of the cursor.
200 * allocates space from the incore bno/bcnt extent trees and sets up
201 * the first path up the left side of the tree. Also sets up the
202 * cursor pointer to the btree root. called by init_freespace_cursor()
203 * and init_ino_cursor()
207 setup_cursor(xfs_mount_t
*mp
, xfs_agnumber_t agno
, bt_status_t
*curs
)
211 xfs_extlen_t big_extent_len
;
212 xfs_agblock_t big_extent_start
;
213 extent_tree_node_t
*ext_ptr
;
214 extent_tree_node_t
*bno_ext_ptr
;
215 xfs_extlen_t blocks_allocated
;
216 xfs_agblock_t
*agb_ptr
;
219 * get the number of blocks we need to allocate, then
220 * set up block number array, set the free block pointer
221 * to the first block in the array, and null the array
223 big_extent_len
= curs
->num_tot_blocks
;
224 blocks_allocated
= 0;
226 ASSERT(big_extent_len
> 0);
228 if ((curs
->btree_blocks
= malloc(sizeof(xfs_agblock_t
*)
229 * big_extent_len
)) == NULL
) {
230 do_error("could not set up btree block array\n");
234 agb_ptr
= curs
->free_btree_blocks
= curs
->btree_blocks
;
236 for (j
= 0; j
< curs
->num_free_blocks
; j
++, agb_ptr
++)
237 *agb_ptr
= NULLAGBLOCK
;
240 * grab the smallest extent and use it up, then get the
241 * next smallest. This mimics the init_*_cursor code.
243 if ((ext_ptr
= findfirst_bcnt_extent(agno
)) == NULL
) {
244 do_error("error - not enough free space in filesystem\n");
248 agb_ptr
= curs
->btree_blocks
;
249 j
= curs
->level
[0].num_blocks
;
252 * set up the free block array
254 while (blocks_allocated
< big_extent_len
) {
256 * use up the extent we've got
258 for (u
= 0; u
< ext_ptr
->ex_blockcount
&&
259 blocks_allocated
< big_extent_len
; u
++) {
260 ASSERT(agb_ptr
< curs
->btree_blocks
261 + curs
->num_tot_blocks
);
262 *agb_ptr
++ = ext_ptr
->ex_startblock
+ u
;
267 * if we only used part of this last extent, then we
268 * need only to reset the extent in the extent
269 * trees and we're done
271 if (u
< ext_ptr
->ex_blockcount
) {
272 big_extent_start
= ext_ptr
->ex_startblock
+ u
;
273 big_extent_len
= ext_ptr
->ex_blockcount
- u
;
275 ASSERT(big_extent_len
> 0);
277 bno_ext_ptr
= find_bno_extent(agno
,
278 ext_ptr
->ex_startblock
);
279 ASSERT(bno_ext_ptr
!= NULL
);
280 get_bno_extent(agno
, bno_ext_ptr
);
281 release_extent_tree_node(bno_ext_ptr
);
283 ext_ptr
= get_bcnt_extent(agno
, ext_ptr
->ex_startblock
,
284 ext_ptr
->ex_blockcount
);
285 release_extent_tree_node(ext_ptr
);
286 #ifdef XR_BLD_FREE_TRACE
287 fprintf(stderr
, "releasing extent: %u [%u %u]\n",
288 agno
, ext_ptr
->ex_startblock
,
289 ext_ptr
->ex_blockcount
);
290 fprintf(stderr
, "blocks_allocated = %d\n",
294 add_bno_extent(agno
, big_extent_start
, big_extent_len
);
295 add_bcnt_extent(agno
, big_extent_start
, big_extent_len
);
300 * delete the used-up extent from both extent trees and
301 * find next biggest extent
303 #ifdef XR_BLD_FREE_TRACE
304 fprintf(stderr
, "releasing extent: %u [%u %u]\n",
305 agno
, ext_ptr
->ex_startblock
, ext_ptr
->ex_blockcount
);
307 bno_ext_ptr
= find_bno_extent(agno
, ext_ptr
->ex_startblock
);
308 ASSERT(bno_ext_ptr
!= NULL
);
309 get_bno_extent(agno
, bno_ext_ptr
);
310 release_extent_tree_node(bno_ext_ptr
);
312 ext_ptr
= get_bcnt_extent(agno
, ext_ptr
->ex_startblock
,
313 ext_ptr
->ex_blockcount
);
314 ASSERT(ext_ptr
!= NULL
);
315 release_extent_tree_node(ext_ptr
);
317 ext_ptr
= findfirst_bcnt_extent(agno
);
319 #ifdef XR_BLD_FREE_TRACE
320 fprintf(stderr
, "blocks_allocated = %d\n",
326 write_cursor(bt_status_t
*curs
)
330 for (i
= 0; i
< curs
->num_levels
; i
++) {
331 #if defined(XR_BLD_FREE_TRACE) || defined(XR_BLD_INO_TRACE)
332 fprintf(stderr
, "writing bt block %u\n", curs
->level
[i
].agbno
);
334 if (curs
->level
[i
].prev_buf_p
!= NULL
) {
335 ASSERT(curs
->level
[i
].prev_agbno
!= NULLAGBLOCK
);
336 libxfs_writebuf(curs
->level
[i
].prev_buf_p
, 0);
338 libxfs_writebuf(curs
->level
[i
].buf_p
, 0);
343 finish_cursor(bt_status_t
*curs
)
345 ASSERT(curs
->num_free_blocks
== 0);
346 free(curs
->btree_blocks
);
350 * no-cursor versions of the XFS equivalents. The address calculators
351 * should be used only for interior btree nodes.
352 * these are adapted from xfs_alloc_btree.h and xfs_tree.h
354 #define XR_ALLOC_KEY_ADDR(mp, bp, i) \
355 (xfs_alloc_key_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
356 + ((i)-1) * sizeof(xfs_alloc_key_t))
358 #define XR_ALLOC_PTR_ADDR(mp, bp, i) \
359 (xfs_alloc_ptr_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
360 + (mp)->m_alloc_mxr[1] * sizeof(xfs_alloc_key_t) \
361 + ((i)-1) * sizeof(xfs_alloc_ptr_t))
363 #define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
364 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
365 xfs_alloc, (level) == 0)
368 * this calculates a freespace cursor for an ag.
369 * btree_curs is an in/out. returns the number of
370 * blocks that will show up in the AGFL.
374 calculate_freespace_cursor(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
375 xfs_agblock_t
*extents
, bt_status_t
*btree_curs
)
377 xfs_extlen_t blocks_needed
; /* a running count */
378 xfs_extlen_t blocks_allocated_pt
; /* per tree */
379 xfs_extlen_t blocks_allocated_total
; /* for both trees */
380 xfs_agblock_t num_extents
;
384 bt_stat_level_t
*lptr
;
385 bt_stat_level_t
*p_lptr
;
386 extent_tree_node_t
*ext_ptr
;
388 #ifdef XR_BLD_FREE_TRACE
390 int state
= XR_E_BAD_STATE
;
392 #ifdef XR_BLD_FREE_TRACE
394 "in init_freespace_cursor, agno = %d\n", agno
);
397 num_extents
= *extents
;
400 ASSERT(num_extents
!= 0);
402 lptr
= &btree_curs
->level
[0];
403 btree_curs
->init
= 1;
406 * figure out how much space we need for the leaf level
407 * of the tree and set up the cursor for the leaf level
408 * (note that the same code is duplicated further down)
410 lptr
->num_blocks
= howmany(num_extents
, XR_ALLOC_BLOCK_MAXRECS(mp
, 0));
411 lptr
->num_recs_pb
= num_extents
/ lptr
->num_blocks
;
412 lptr
->modulo
= num_extents
% lptr
->num_blocks
;
413 lptr
->num_recs_tot
= num_extents
;
417 * if we need more levels, set them up. # of records
418 * per level is the # of blocks in the level below it
420 if (lptr
->num_blocks
> 1) {
421 for (; btree_curs
->level
[level
- 1].num_blocks
> 1
422 && level
< XFS_BTREE_MAXLEVELS
;
424 lptr
= &btree_curs
->level
[level
];
425 p_lptr
= &btree_curs
->level
[level
- 1];
426 lptr
->num_blocks
= howmany(p_lptr
->num_blocks
,
427 XR_ALLOC_BLOCK_MAXRECS(mp
, level
));
428 lptr
->modulo
= p_lptr
->num_blocks
430 lptr
->num_recs_pb
= p_lptr
->num_blocks
432 lptr
->num_recs_tot
= p_lptr
->num_blocks
;
436 ASSERT(lptr
->num_blocks
== 1);
437 btree_curs
->num_levels
= level
;
440 * ok, now we have a hypothetical cursor that
441 * will work for both the bno and bcnt trees.
442 * now figure out if using up blocks to set up the
443 * trees will perturb the shape of the freespace tree.
444 * if so, we've over-allocated. the freespace trees
445 * as they will be *after* accounting for the free space
446 * we've used up will need fewer blocks to to represent
447 * than we've allocated. We can use the AGFL to hold
448 * XFS_AGFL_SIZE (128) blocks but that's it.
449 * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
450 * if the number of extra blocks is more than that,
451 * we'll have to be called again.
453 for (blocks_needed
= 0, i
= 0; i
< level
; i
++) {
454 blocks_needed
+= btree_curs
->level
[i
].num_blocks
;
458 * record the # of blocks we've allocated
460 blocks_allocated_pt
= blocks_needed
;
462 blocks_allocated_total
= blocks_needed
;
465 * figure out how many free extents will be used up by
466 * our space allocation
468 if ((ext_ptr
= findfirst_bcnt_extent(agno
)) == NULL
) {
469 do_error("can't rebuild fs trees -- not enough free space "
475 while (ext_ptr
!= NULL
&& blocks_needed
> 0) {
476 if (ext_ptr
->ex_blockcount
<= blocks_needed
) {
477 blocks_needed
-= ext_ptr
->ex_blockcount
;
483 ext_ptr
= findnext_bcnt_extent(agno
, ext_ptr
);
485 #ifdef XR_BLD_FREE_TRACE
486 if (ext_ptr
!= NULL
) {
487 fprintf(stderr
, "got next extent [%u %u]\n",
488 ext_ptr
->ex_startblock
, ext_ptr
->ex_blockcount
);
490 fprintf(stderr
, "out of extents\n");
494 if (blocks_needed
> 0) {
495 do_error("ag %u - not enough free space to build freespace "
500 ASSERT(num_extents
>= extents_used
);
502 num_extents
-= extents_used
;
505 * see if the number of leaf blocks will change as a result
506 * of the number of extents changing
508 if (howmany(num_extents
, XR_ALLOC_BLOCK_MAXRECS(mp
, 0))
509 != btree_curs
->level
[0].num_blocks
) {
511 * yes -- recalculate the cursor. If the number of
512 * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
513 * we can put those into the AGFL. we don't try
514 * and get things to converge exactly (reach a
515 * state with zero excess blocks) because there
516 * exist pathological cases which will never
517 * converge. first, check for the zero-case.
519 if (num_extents
== 0) {
521 * ok, we've used up all the free blocks
522 * trying to lay out the leaf level. go
523 * to a one block (empty) btree and put the
524 * already allocated blocks into the AGFL
526 if (btree_curs
->level
[0].num_blocks
!= 1) {
528 * we really needed more blocks because
529 * the old tree had more than one level.
532 do_warn("not enough free blocks left to "
533 "describe all free blocks in AG %u\n",
536 #ifdef XR_BLD_FREE_TRACE
538 "ag %u -- no free extents, alloc'ed %d\n",
539 agno
, blocks_allocated_pt
);
541 lptr
->num_blocks
= 1;
543 lptr
->num_recs_pb
= 0;
544 lptr
->num_recs_tot
= 0;
546 btree_curs
->num_levels
= 1;
549 * don't reset the allocation stats, assume
550 * they're all extra blocks
551 * don't forget to return the total block count
552 * not the per-tree block count. these are the
553 * extras that will go into the AGFL. subtract
554 * two for the root blocks.
556 btree_curs
->num_tot_blocks
= blocks_allocated_pt
;
557 btree_curs
->num_free_blocks
= blocks_allocated_pt
;
561 return(blocks_allocated_total
- 2);
564 lptr
= &btree_curs
->level
[0];
565 lptr
->num_blocks
= howmany(num_extents
,
566 XR_ALLOC_BLOCK_MAXRECS(mp
, 0));
567 lptr
->num_recs_pb
= num_extents
/ lptr
->num_blocks
;
568 lptr
->modulo
= num_extents
% lptr
->num_blocks
;
569 lptr
->num_recs_tot
= num_extents
;
573 * if we need more levels, set them up
575 if (lptr
->num_blocks
> 1) {
576 for (level
= 1; btree_curs
->level
[level
-1].num_blocks
577 > 1 && level
< XFS_BTREE_MAXLEVELS
;
579 lptr
= &btree_curs
->level
[level
];
580 p_lptr
= &btree_curs
->level
[level
-1];
581 lptr
->num_blocks
= howmany(p_lptr
->num_blocks
,
582 XR_ALLOC_BLOCK_MAXRECS(mp
,
584 lptr
->modulo
= p_lptr
->num_blocks
586 lptr
->num_recs_pb
= p_lptr
->num_blocks
588 lptr
->num_recs_tot
= p_lptr
->num_blocks
;
591 ASSERT(lptr
->num_blocks
== 1);
592 btree_curs
->num_levels
= level
;
595 * now figure out the number of excess blocks
597 for (blocks_needed
= 0, i
= 0; i
< level
; i
++) {
598 blocks_needed
+= btree_curs
->level
[i
].num_blocks
;
602 ASSERT(blocks_allocated_total
>= blocks_needed
);
603 extra_blocks
= blocks_allocated_total
- blocks_needed
;
605 if (extents_used
> 0) {
607 * reset the leaf level geometry to account
608 * for consumed extents. we can leave the
609 * rest of the cursor alone since the number
610 * of leaf blocks hasn't changed.
612 lptr
= &btree_curs
->level
[0];
614 lptr
->num_recs_pb
= num_extents
/ lptr
->num_blocks
;
615 lptr
->modulo
= num_extents
% lptr
->num_blocks
;
616 lptr
->num_recs_tot
= num_extents
;
622 btree_curs
->num_tot_blocks
= blocks_allocated_pt
;
623 btree_curs
->num_free_blocks
= blocks_allocated_pt
;
625 *extents
= num_extents
;
627 return(extra_blocks
);
631 prop_freespace_cursor(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
632 bt_status_t
*btree_curs
, xfs_agblock_t startblock
,
633 xfs_extlen_t blockcount
, int level
, __uint32_t magic
)
635 xfs_alloc_block_t
*bt_hdr
;
636 xfs_alloc_key_t
*bt_key
;
637 xfs_alloc_ptr_t
*bt_ptr
;
639 bt_stat_level_t
*lptr
;
643 if (level
>= btree_curs
->num_levels
)
646 lptr
= &btree_curs
->level
[level
];
647 bt_hdr
= XFS_BUF_TO_ALLOC_BLOCK(lptr
->buf_p
);
649 if (INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
) == 0) {
651 * only happens once when initializing the
652 * left-hand side of the tree.
654 prop_freespace_cursor(mp
, agno
, btree_curs
, startblock
,
655 blockcount
, level
, magic
);
658 if (INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
) ==
659 lptr
->num_recs_pb
+ (lptr
->modulo
> 0)) {
661 * write out current prev block, grab us a new block,
662 * and set the rightsib pointer of current block
664 #ifdef XR_BLD_FREE_TRACE
665 fprintf(stderr
, " %d ", lptr
->prev_agbno
);
667 if (lptr
->prev_agbno
!= NULLAGBLOCK
) {
668 ASSERT(lptr
->prev_buf_p
!= NULL
);
669 libxfs_writebuf(lptr
->prev_buf_p
, 0);
671 lptr
->prev_agbno
= lptr
->agbno
;;
672 lptr
->prev_buf_p
= lptr
->buf_p
;
673 agbno
= get_next_blockaddr(agno
, level
, btree_curs
);
675 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, agbno
);
677 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
678 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
679 XFS_FSB_TO_BB(mp
, 1));
686 * initialize block header
688 bt_hdr
= XFS_BUF_TO_ALLOC_BLOCK(lptr
->buf_p
);
689 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
691 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, magic
);
692 INT_SET(bt_hdr
->bb_level
, ARCH_CONVERT
, level
);
693 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
, lptr
->prev_agbno
);
694 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, NULLAGBLOCK
);
695 INT_ZERO(bt_hdr
->bb_numrecs
, ARCH_CONVERT
);
698 * propagate extent record for first extent in new block up
700 prop_freespace_cursor(mp
, agno
, btree_curs
, startblock
,
701 blockcount
, level
, magic
);
704 * add extent info to current block
706 INT_MOD(bt_hdr
->bb_numrecs
, ARCH_CONVERT
, +1);
708 bt_key
= XR_ALLOC_KEY_ADDR(mp
, bt_hdr
,
709 INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
));
710 bt_ptr
= XR_ALLOC_PTR_ADDR(mp
, bt_hdr
,
711 INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
));
713 INT_SET(bt_key
->ar_startblock
, ARCH_CONVERT
, startblock
);
714 INT_SET(bt_key
->ar_blockcount
, ARCH_CONVERT
, blockcount
);
715 INT_SET(*bt_ptr
, ARCH_CONVERT
, btree_curs
->level
[level
-1].agbno
);
719 * rebuilds a freespace tree given a cursor and magic number of type
720 * of tree to build (bno or bcnt). returns the number of free blocks
721 * represented by the tree.
724 build_freespace_tree(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
725 bt_status_t
*btree_curs
, __uint32_t magic
)
729 xfs_alloc_block_t
*bt_hdr
;
730 xfs_alloc_rec_t
*bt_rec
;
733 extent_tree_node_t
*ext_ptr
;
734 bt_stat_level_t
*lptr
;
735 xfs_extlen_t freeblks
;
737 #ifdef XR_BLD_FREE_TRACE
738 fprintf(stderr
, "in build_freespace_tree, agno = %d\n", agno
);
740 level
= btree_curs
->num_levels
;
746 * initialize the first block on each btree level
748 for (i
= 0; i
< level
; i
++) {
749 lptr
= &btree_curs
->level
[i
];
751 agbno
= get_next_blockaddr(agno
, i
, btree_curs
);
752 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
753 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
754 XFS_FSB_TO_BB(mp
, 1));
756 if (i
== btree_curs
->num_levels
- 1)
757 btree_curs
->root
= agbno
;
760 lptr
->prev_agbno
= NULLAGBLOCK
;
761 lptr
->prev_buf_p
= NULL
;
763 * initialize block header
765 bt_hdr
= XFS_BUF_TO_ALLOC_BLOCK(lptr
->buf_p
);
766 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
768 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, magic
);
769 INT_SET(bt_hdr
->bb_level
, ARCH_CONVERT
, i
);
770 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
,
771 bt_hdr
->bb_rightsib
= NULLAGBLOCK
);
772 INT_ZERO(bt_hdr
->bb_numrecs
, ARCH_CONVERT
);
775 * run along leaf, setting up records. as we have to switch
776 * blocks, call the prop_freespace_cursor routine to set up the new
777 * pointers for the parent. that can recurse up to the root
778 * if required. set the sibling pointers for leaf level here.
780 if (magic
== XFS_ABTB_MAGIC
)
781 ext_ptr
= findfirst_bno_extent(agno
);
783 ext_ptr
= findfirst_bcnt_extent(agno
);
785 #ifdef XR_BLD_FREE_TRACE
786 fprintf(stderr
, "bft, agno = %d, start = %u, count = %u\n",
787 agno
, ext_ptr
->ex_startblock
, ext_ptr
->ex_blockcount
);
790 lptr
= &btree_curs
->level
[0];
792 for (i
= 0; i
< btree_curs
->level
[0].num_blocks
; i
++) {
794 * block initialization, lay in block header
796 bt_hdr
= XFS_BUF_TO_ALLOC_BLOCK(lptr
->buf_p
);
797 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
799 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, magic
);
800 INT_ZERO(bt_hdr
->bb_level
, ARCH_CONVERT
);
801 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
, lptr
->prev_agbno
);
802 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, NULLAGBLOCK
);
803 INT_SET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
,
804 lptr
->num_recs_pb
+ (lptr
->modulo
> 0));
805 #ifdef XR_BLD_FREE_TRACE
806 fprintf(stderr
, "bft, bb_numrecs = %d\n",
807 INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
));
810 if (lptr
->modulo
> 0)
814 * initialize values in the path up to the root if
815 * this is a multi-level btree
817 if (btree_curs
->num_levels
> 1)
818 prop_freespace_cursor(mp
, agno
, btree_curs
,
819 ext_ptr
->ex_startblock
,
820 ext_ptr
->ex_blockcount
,
823 bt_rec
= (xfs_alloc_rec_t
*) ((char *) bt_hdr
+
824 sizeof(xfs_alloc_block_t
));
825 for (j
= 0; j
< INT_GET(bt_hdr
->bb_numrecs
,ARCH_CONVERT
); j
++) {
826 ASSERT(ext_ptr
!= NULL
);
827 INT_SET(bt_rec
[j
].ar_startblock
, ARCH_CONVERT
,
828 ext_ptr
->ex_startblock
);
829 INT_SET(bt_rec
[j
].ar_blockcount
, ARCH_CONVERT
,
830 ext_ptr
->ex_blockcount
);
831 freeblks
+= ext_ptr
->ex_blockcount
;
832 if (magic
== XFS_ABTB_MAGIC
)
833 ext_ptr
= findnext_bno_extent(ext_ptr
);
835 ext_ptr
= findnext_bcnt_extent(agno
, ext_ptr
);
837 #ifdef XR_BLD_FREE_TRACE
839 fprintf(stderr
, "null extent pointer, j = %d\n",
843 "bft, agno = %d, start = %u, count = %u\n",
844 agno
, ext_ptr
->ex_startblock
,
845 ext_ptr
->ex_blockcount
);
850 if (ext_ptr
!= NULL
) {
852 * get next leaf level block
854 if (lptr
->prev_buf_p
!= NULL
) {
855 #ifdef XR_BLD_FREE_TRACE
856 fprintf(stderr
, " writing fst agbno %u\n",
859 ASSERT(lptr
->prev_agbno
!= NULLAGBLOCK
);
860 libxfs_writebuf(lptr
->prev_buf_p
, 0);
862 lptr
->prev_buf_p
= lptr
->buf_p
;
863 lptr
->prev_agbno
= lptr
->agbno
;
865 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, lptr
->agbno
=
866 get_next_blockaddr(agno
, 0, btree_curs
));
868 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
869 XFS_AGB_TO_DADDR(mp
, agno
, lptr
->agbno
),
870 XFS_FSB_TO_BB(mp
, 1));
878 * no-cursor versions of the XFS equivalents. The address calculators
879 * should be used only for interior btree nodes.
880 * these are adapted from xfs_ialloc_btree.h and xfs_tree.h
882 #define XR_INOBT_KEY_ADDR(mp, bp, i) \
883 (xfs_inobt_key_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
884 + ((i)-1) * sizeof(xfs_inobt_key_t))
886 #define XR_INOBT_PTR_ADDR(mp, bp, i) \
887 (xfs_inobt_ptr_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
888 + (mp)->m_inobt_mxr[1] * sizeof(xfs_inobt_key_t) \
889 + ((i)-1) * sizeof(xfs_inobt_ptr_t))
891 #define XR_INOBT_BLOCK_MAXRECS(mp, level) \
892 XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
893 xfs_inobt, (level) == 0)
896 * we don't have to worry here about how chewing up free extents
897 * may perturb things because inode tree building happens before
898 * freespace tree building.
901 init_ino_cursor(xfs_mount_t
*mp
, xfs_agnumber_t agno
, bt_status_t
*btree_curs
,
902 __uint64_t
*num_inos
, __uint64_t
*num_free_inos
)
906 ino_tree_node_t
*ino_rec
;
909 bt_stat_level_t
*lptr
;
910 bt_stat_level_t
*p_lptr
;
911 xfs_extlen_t blocks_allocated
;
914 *num_inos
= *num_free_inos
= 0;
917 lptr
= &btree_curs
->level
[0];
918 btree_curs
->init
= 1;
920 if ((ino_rec
= findfirst_inode_rec(agno
)) == NULL
) {
922 * easy corner-case -- no inode records
924 lptr
->num_blocks
= 1;
926 lptr
->num_recs_pb
= 0;
927 lptr
->num_recs_tot
= 0;
929 btree_curs
->num_levels
= 1;
930 btree_curs
->num_tot_blocks
= btree_curs
->num_free_blocks
= 1;
932 setup_cursor(mp
, agno
, btree_curs
);
938 * build up statistics
940 for (num_recs
= 0; ino_rec
!= NULL
; ino_rec
= next_ino_rec(ino_rec
)) {
941 ninos
+= XFS_INODES_PER_CHUNK
;
943 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
944 ASSERT(is_inode_confirmed(ino_rec
, i
));
945 if (is_inode_free(ino_rec
, i
))
950 blocks_allocated
= lptr
->num_blocks
= howmany(num_recs
,
951 XR_INOBT_BLOCK_MAXRECS(mp
, 0));
953 lptr
->modulo
= num_recs
% lptr
->num_blocks
;
954 lptr
->num_recs_pb
= num_recs
/ lptr
->num_blocks
;
955 lptr
->num_recs_tot
= num_recs
;
958 if (lptr
->num_blocks
> 1) {
959 for (; btree_curs
->level
[level
-1].num_blocks
> 1
960 && level
< XFS_BTREE_MAXLEVELS
;
962 lptr
= &btree_curs
->level
[level
];
963 p_lptr
= &btree_curs
->level
[level
- 1];
964 lptr
->num_blocks
= howmany(p_lptr
->num_blocks
,
965 XR_INOBT_BLOCK_MAXRECS(mp
, level
));
966 lptr
->modulo
= p_lptr
->num_blocks
% lptr
->num_blocks
;
967 lptr
->num_recs_pb
= p_lptr
->num_blocks
969 lptr
->num_recs_tot
= p_lptr
->num_blocks
;
971 blocks_allocated
+= lptr
->num_blocks
;
974 ASSERT(lptr
->num_blocks
== 1);
975 btree_curs
->num_levels
= level
;
977 btree_curs
->num_tot_blocks
= btree_curs
->num_free_blocks
980 setup_cursor(mp
, agno
, btree_curs
);
983 *num_free_inos
= nfinos
;
989 prop_ino_cursor(xfs_mount_t
*mp
, xfs_agnumber_t agno
, bt_status_t
*btree_curs
,
990 xfs_agino_t startino
, int level
)
992 xfs_inobt_block_t
*bt_hdr
;
993 xfs_inobt_key_t
*bt_key
;
994 xfs_inobt_ptr_t
*bt_ptr
;
996 bt_stat_level_t
*lptr
;
1000 if (level
>= btree_curs
->num_levels
)
1003 lptr
= &btree_curs
->level
[level
];
1004 bt_hdr
= XFS_BUF_TO_INOBT_BLOCK(lptr
->buf_p
);
1006 if (INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
) == 0) {
1008 * this only happens once to initialize the
1009 * first path up the left side of the tree
1010 * where the agbno's are already set up
1012 prop_ino_cursor(mp
, agno
, btree_curs
, startino
, level
);
1015 if (INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
) ==
1016 lptr
->num_recs_pb
+ (lptr
->modulo
> 0)) {
1018 * write out current prev block, grab us a new block,
1019 * and set the rightsib pointer of current block
1021 #ifdef XR_BLD_INO_TRACE
1022 fprintf(stderr
, " ino prop agbno %d ", lptr
->prev_agbno
);
1024 if (lptr
->prev_agbno
!= NULLAGBLOCK
) {
1025 ASSERT(lptr
->prev_buf_p
!= NULL
);
1026 libxfs_writebuf(lptr
->prev_buf_p
, 0);
1028 lptr
->prev_agbno
= lptr
->agbno
;;
1029 lptr
->prev_buf_p
= lptr
->buf_p
;
1030 agbno
= get_next_blockaddr(agno
, level
, btree_curs
);
1032 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, agbno
);
1034 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
1035 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
1036 XFS_FSB_TO_BB(mp
, 1));
1037 lptr
->agbno
= agbno
;
1043 * initialize block header
1045 bt_hdr
= XFS_BUF_TO_INOBT_BLOCK(lptr
->buf_p
);
1046 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
1048 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, XFS_IBT_MAGIC
);
1049 INT_SET(bt_hdr
->bb_level
, ARCH_CONVERT
, level
);
1050 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
, lptr
->prev_agbno
);
1051 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, NULLAGBLOCK
);
1052 INT_ZERO(bt_hdr
->bb_numrecs
, ARCH_CONVERT
);
1054 * propagate extent record for first extent in new block up
1056 prop_ino_cursor(mp
, agno
, btree_curs
, startino
, level
);
1059 * add inode info to current block
1061 INT_MOD(bt_hdr
->bb_numrecs
, ARCH_CONVERT
, +1);
1063 bt_key
= XR_INOBT_KEY_ADDR(mp
, bt_hdr
,
1064 INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
));
1065 bt_ptr
= XR_INOBT_PTR_ADDR(mp
, bt_hdr
,
1066 INT_GET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
));
1068 INT_SET(bt_key
->ir_startino
, ARCH_CONVERT
, startino
);
1069 INT_SET(*bt_ptr
, ARCH_CONVERT
, btree_curs
->level
[level
-1].agbno
);
1073 build_agi(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
1074 bt_status_t
*btree_curs
, xfs_agino_t first_agino
,
1075 xfs_agino_t count
, xfs_agino_t freecount
)
1081 agi_buf
= libxfs_getbuf(mp
->m_dev
,
1082 XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR
),
1083 mp
->m_sb
.sb_sectsize
/BBSIZE
);
1084 agi
= XFS_BUF_TO_AGI(agi_buf
);
1085 bzero(agi
, mp
->m_sb
.sb_sectsize
);
1087 INT_SET(agi
->agi_magicnum
, ARCH_CONVERT
, XFS_AGI_MAGIC
);
1088 INT_SET(agi
->agi_versionnum
, ARCH_CONVERT
, XFS_AGI_VERSION
);
1089 INT_SET(agi
->agi_seqno
, ARCH_CONVERT
, agno
);
1090 if (agno
< mp
->m_sb
.sb_agcount
- 1)
1091 INT_SET(agi
->agi_length
, ARCH_CONVERT
, mp
->m_sb
.sb_agblocks
);
1093 INT_SET(agi
->agi_length
, ARCH_CONVERT
, mp
->m_sb
.sb_dblocks
-
1094 (xfs_drfsbno_t
) mp
->m_sb
.sb_agblocks
* agno
);
1095 INT_SET(agi
->agi_count
, ARCH_CONVERT
, count
);
1096 INT_SET(agi
->agi_root
, ARCH_CONVERT
, btree_curs
->root
);
1097 INT_SET(agi
->agi_level
, ARCH_CONVERT
, btree_curs
->num_levels
);
1098 INT_SET(agi
->agi_freecount
, ARCH_CONVERT
, freecount
);
1099 INT_SET(agi
->agi_newino
, ARCH_CONVERT
, first_agino
);
1100 INT_SET(agi
->agi_dirino
, ARCH_CONVERT
, NULLAGINO
);
1102 for (i
= 0; i
< XFS_AGI_UNLINKED_BUCKETS
; i
++) {
1103 INT_SET(agi
->agi_unlinked
[i
], ARCH_CONVERT
, NULLAGINO
);
1106 libxfs_writebuf(agi_buf
, 0);
1110 * rebuilds an inode tree given a cursor. We're lazy here and call
1111 * the routine that builds the agi
1114 build_ino_tree(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
1115 bt_status_t
*btree_curs
)
1119 xfs_agblock_t agbno
;
1120 xfs_agino_t first_agino
;
1121 xfs_inobt_block_t
*bt_hdr
;
1122 xfs_inobt_rec_t
*bt_rec
;
1123 ino_tree_node_t
*ino_rec
;
1124 bt_stat_level_t
*lptr
;
1125 xfs_agino_t count
= 0;
1126 xfs_agino_t freecount
= 0;
1129 int level
= btree_curs
->num_levels
;
1131 for (i
= 0; i
< level
; i
++) {
1132 lptr
= &btree_curs
->level
[i
];
1134 agbno
= get_next_blockaddr(agno
, i
, btree_curs
);
1135 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
1136 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
1137 XFS_FSB_TO_BB(mp
, 1));
1139 if (i
== btree_curs
->num_levels
- 1)
1140 btree_curs
->root
= agbno
;
1142 lptr
->agbno
= agbno
;
1143 lptr
->prev_agbno
= NULLAGBLOCK
;
1144 lptr
->prev_buf_p
= NULL
;
1146 * initialize block header
1148 bt_hdr
= XFS_BUF_TO_INOBT_BLOCK(lptr
->buf_p
);
1149 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
1151 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, XFS_IBT_MAGIC
);
1152 INT_SET(bt_hdr
->bb_level
, ARCH_CONVERT
, i
);
1153 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
,
1154 bt_hdr
->bb_rightsib
= NULLAGBLOCK
);
1155 INT_ZERO(bt_hdr
->bb_numrecs
, ARCH_CONVERT
);
1158 * run along leaf, setting up records. as we have to switch
1159 * blocks, call the prop_ino_cursor routine to set up the new
1160 * pointers for the parent. that can recurse up to the root
1161 * if required. set the sibling pointers for leaf level here.
1163 ino_rec
= findfirst_inode_rec(agno
);
1165 if (ino_rec
!= NULL
)
1166 first_agino
= ino_rec
->ino_startnum
;
1168 first_agino
= NULLAGINO
;
1170 lptr
= &btree_curs
->level
[0];
1172 for (i
= 0; i
< lptr
->num_blocks
; i
++) {
1174 * block initialization, lay in block header
1176 bt_hdr
= XFS_BUF_TO_INOBT_BLOCK(lptr
->buf_p
);
1177 bzero(bt_hdr
, mp
->m_sb
.sb_blocksize
);
1179 INT_SET(bt_hdr
->bb_magic
, ARCH_CONVERT
, XFS_IBT_MAGIC
);
1180 INT_ZERO(bt_hdr
->bb_level
, ARCH_CONVERT
);
1181 INT_SET(bt_hdr
->bb_leftsib
, ARCH_CONVERT
, lptr
->prev_agbno
);
1182 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, NULLAGBLOCK
);
1183 INT_SET(bt_hdr
->bb_numrecs
, ARCH_CONVERT
,
1184 lptr
->num_recs_pb
+ (lptr
->modulo
> 0));
1186 if (lptr
->modulo
> 0)
1189 if (lptr
->num_recs_pb
> 0)
1190 prop_ino_cursor(mp
, agno
, btree_curs
,
1191 ino_rec
->ino_startnum
, 0);
1193 bt_rec
= (xfs_inobt_rec_t
*) ((char *) bt_hdr
+
1194 sizeof(xfs_inobt_block_t
));
1195 for (j
= 0; j
< INT_GET(bt_hdr
->bb_numrecs
,ARCH_CONVERT
); j
++) {
1196 ASSERT(ino_rec
!= NULL
);
1197 INT_SET(bt_rec
[j
].ir_startino
, ARCH_CONVERT
,
1198 ino_rec
->ino_startnum
);
1199 INT_SET(bt_rec
[j
].ir_free
, ARCH_CONVERT
,
1203 for (k
= 0; k
< sizeof(xfs_inofree_t
)*NBBY
; k
++) {
1204 ASSERT(is_inode_confirmed(ino_rec
, k
));
1205 inocnt
+= is_inode_free(ino_rec
, k
);
1208 INT_SET(bt_rec
[j
].ir_freecount
, ARCH_CONVERT
, inocnt
);
1209 freecount
+= inocnt
;
1210 count
+= XFS_INODES_PER_CHUNK
;
1211 ino_rec
= next_ino_rec(ino_rec
);
1214 if (ino_rec
!= NULL
) {
1216 * get next leaf level block
1218 if (lptr
->prev_buf_p
!= NULL
) {
1219 #ifdef XR_BLD_INO_TRACE
1220 fprintf(stderr
, "writing inobt agbno %u\n",
1223 ASSERT(lptr
->prev_agbno
!= NULLAGBLOCK
);
1224 libxfs_writebuf(lptr
->prev_buf_p
, 0);
1226 lptr
->prev_buf_p
= lptr
->buf_p
;
1227 lptr
->prev_agbno
= lptr
->agbno
;
1229 INT_SET(bt_hdr
->bb_rightsib
, ARCH_CONVERT
, lptr
->agbno
=
1230 get_next_blockaddr(agno
, 0, btree_curs
));
1232 lptr
->buf_p
= libxfs_getbuf(mp
->m_dev
,
1233 XFS_AGB_TO_DADDR(mp
, agno
, lptr
->agbno
),
1234 XFS_FSB_TO_BB(mp
, 1));
1238 build_agi(mp
, agno
, btree_curs
, first_agino
, count
, freecount
);
1242 * build both the agf and the agfl for an agno given both
1246 build_agf_agfl(xfs_mount_t
*mp
,
1247 xfs_agnumber_t agno
,
1248 bt_status_t
*bno_bt
,
1249 bt_status_t
*bcnt_bt
,
1250 xfs_extlen_t freeblks
, /* # free blocks in tree */
1251 int lostblocks
) /* # blocks that will be lost */
1253 extent_tree_node_t
*ext_ptr
;
1254 xfs_buf_t
*agf_buf
, *agfl_buf
;
1260 agf_buf
= libxfs_getbuf(mp
->m_dev
,
1261 XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR
),
1262 mp
->m_sb
.sb_sectsize
/BBSIZE
);
1263 agf
= XFS_BUF_TO_AGF(agf_buf
);
1264 bzero(agf
, mp
->m_sb
.sb_sectsize
);
1266 #ifdef XR_BLD_FREE_TRACE
1267 fprintf(stderr
, "agf = 0x%x, agf_buf->b_un.b_addr = 0x%x\n",
1268 (__psint_t
) agf
, (__psint_t
) agf_buf
->b_un
.b_addr
);
1272 * set up fixed part of agf
1274 INT_SET(agf
->agf_magicnum
, ARCH_CONVERT
, XFS_AGF_MAGIC
);
1275 INT_SET(agf
->agf_versionnum
, ARCH_CONVERT
, XFS_AGF_VERSION
);
1276 INT_SET(agf
->agf_seqno
, ARCH_CONVERT
, agno
);
1278 if (agno
< mp
->m_sb
.sb_agcount
- 1)
1279 INT_SET(agf
->agf_length
, ARCH_CONVERT
, mp
->m_sb
.sb_agblocks
);
1281 INT_SET(agf
->agf_length
, ARCH_CONVERT
, mp
->m_sb
.sb_dblocks
-
1282 (xfs_drfsbno_t
) mp
->m_sb
.sb_agblocks
* agno
);
1284 INT_SET(agf
->agf_roots
[XFS_BTNUM_BNO
], ARCH_CONVERT
, bno_bt
->root
);
1285 INT_SET(agf
->agf_levels
[XFS_BTNUM_BNO
], ARCH_CONVERT
,
1286 bno_bt
->num_levels
);
1287 INT_SET(agf
->agf_roots
[XFS_BTNUM_CNT
], ARCH_CONVERT
, bcnt_bt
->root
);
1288 INT_SET(agf
->agf_levels
[XFS_BTNUM_CNT
], ARCH_CONVERT
,
1289 bcnt_bt
->num_levels
);
1290 INT_SET(agf
->agf_freeblks
, ARCH_CONVERT
, freeblks
);
1292 #ifdef XR_BLD_FREE_TRACE
1293 fprintf(stderr
, "bno root = %u, bcnt root = %u, indices = %u %u\n",
1294 INT_GET(agf
->agf_roots
[XFS_BTNUM_BNO
], ARCH_CONVERT
),
1295 INT_GET(agf
->agf_roots
[XFS_BTNUM_CNT
], ARCH_CONVERT
),
1301 * do we have left-over blocks in the btree cursors that should
1302 * be used to fill the AGFL?
1304 if (bno_bt
->num_free_blocks
> 0 || bcnt_bt
->num_free_blocks
> 0) {
1306 * yes - grab the AGFL buffer
1308 agfl_buf
= libxfs_getbuf(mp
->m_dev
,
1309 XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR
),
1310 mp
->m_sb
.sb_sectsize
/BBSIZE
);
1311 agfl
= XFS_BUF_TO_AGFL(agfl_buf
);
1312 bzero(agfl
, mp
->m_sb
.sb_sectsize
);
1314 * ok, now grab as many blocks as we can
1317 while (bno_bt
->num_free_blocks
> 0 && i
< XFS_AGFL_SIZE
) {
1318 INT_SET(agfl
->agfl_bno
[i
], ARCH_CONVERT
,
1319 get_next_blockaddr(agno
, 0, bno_bt
));
1323 while (bcnt_bt
->num_free_blocks
> 0 && i
< XFS_AGFL_SIZE
) {
1324 INT_SET(agfl
->agfl_bno
[i
], ARCH_CONVERT
,
1325 get_next_blockaddr(agno
, 0, bcnt_bt
));
1329 * now throw the rest of the blocks away and complain
1331 while (bno_bt
->num_free_blocks
> 0) {
1332 (void) get_next_blockaddr(agno
, 0, bno_bt
);
1335 while (bcnt_bt
->num_free_blocks
> 0) {
1336 (void) get_next_blockaddr(agno
, 0, bcnt_bt
);
1341 if (j
== lostblocks
)
1342 do_warn("lost %d blocks in ag %u\n", j
, agno
);
1344 do_warn("thought we were going to lose %d "
1345 "blocks in ag %u, actually lost %d\n",
1346 lostblocks
, j
, agno
);
1349 INT_ZERO(agf
->agf_flfirst
, ARCH_CONVERT
);
1350 INT_SET(agf
->agf_fllast
, ARCH_CONVERT
, i
- 1);
1351 INT_SET(agf
->agf_flcount
, ARCH_CONVERT
, i
);
1353 #ifdef XR_BLD_FREE_TRACE
1354 fprintf(stderr
, "writing agfl for ag %u\n", agno
);
1357 libxfs_writebuf(agfl_buf
, 0);
1359 INT_ZERO(agf
->agf_flfirst
, ARCH_CONVERT
);
1360 INT_SET(agf
->agf_fllast
, ARCH_CONVERT
, XFS_AGFL_SIZE
- 1);
1361 INT_ZERO(agf
->agf_flcount
, ARCH_CONVERT
);
1364 ext_ptr
= findbiggest_bcnt_extent(agno
);
1365 INT_SET(agf
->agf_longest
, ARCH_CONVERT
,
1366 (ext_ptr
!= NULL
) ? ext_ptr
->ex_blockcount
: 0);
1368 ASSERT(INT_GET(agf
->agf_roots
[XFS_BTNUM_BNOi
], ARCH_CONVERT
) !=
1369 INT_GET(agf
->agf_roots
[XFS_BTNUM_CNTi
], ARCH_CONVERT
));
1371 libxfs_writebuf(agf_buf
, 0);
1373 #ifdef XR_BLD_FREE_TRACE
1374 fprintf(stderr
, "wrote agf for ag %u, error = %d\n", agno
, error
);
1379 * update the superblock counters, sync the sb version numbers and
1380 * feature bits to the filesystem, and sync up the on-disk superblock
1381 * to match the incore superblock.
1384 sync_sb(xfs_mount_t
*mp
)
1389 bp
= libxfs_getsb(mp
, 0);
1391 do_error("couldn't get superblock\n");
1393 sbp
= XFS_BUF_TO_SBP(bp
);
1395 mp
->m_sb
.sb_icount
= sb_icount
;
1396 mp
->m_sb
.sb_ifree
= sb_ifree
;
1397 mp
->m_sb
.sb_fdblocks
= sb_fdblocks
;
1398 mp
->m_sb
.sb_frextents
= sb_frextents
;
1400 update_sb_version(mp
);
1403 libxfs_xlate_sb(XFS_BUF_PTR(bp
), sbp
, -1, ARCH_CONVERT
,
1405 libxfs_writebuf(bp
, 0);
1409 * make sure the root and realtime inodes show up allocated
1410 * even if they've been freed. they get reinitialized in phase6.
1413 keep_fsinos(xfs_mount_t
*mp
)
1415 ino_tree_node_t
*irec
;
1418 irec
= find_inode_rec(XFS_INO_TO_AGNO(mp
, mp
->m_sb
.sb_rootino
),
1419 XFS_INO_TO_AGINO(mp
, mp
->m_sb
.sb_rootino
));
1421 for (i
= 0; i
< 3; i
++)
1422 set_inode_used(irec
, i
);
1426 phase5(xfs_mount_t
*mp
)
1428 __uint64_t num_inos
;
1429 __uint64_t num_free_inos
;
1430 bt_status_t bno_btree_curs
;
1431 bt_status_t bcnt_btree_curs
;
1432 bt_status_t ino_btree_curs
;
1433 xfs_agnumber_t agno
;
1434 int extra_blocks
= 0;
1435 uint num_freeblocks
;
1436 xfs_extlen_t freeblks1
;
1437 xfs_extlen_t freeblks2
;
1438 xfs_agblock_t num_extents
;
1439 extern int count_bno_extents(xfs_agnumber_t
);
1440 extern int count_bno_extents_blocks(xfs_agnumber_t
, uint
*);
1441 #ifdef XR_BLD_FREE_TRACE
1442 extern int count_bcnt_extents(xfs_agnumber_t
);
1445 do_log("Phase 5 - rebuild AG headers and trees...\n");
1447 #ifdef XR_BLD_FREE_TRACE
1448 fprintf(stderr
, "inobt level 1, maxrec = %d, minrec = %d\n",
1449 XFS_BTREE_BLOCK_MAXRECS(mp
->m_sb
.sb_blocksize
, xfs_inobt
, 0),
1450 XFS_BTREE_BLOCK_MINRECS(mp
->m_sb
.sb_blocksize
, xfs_inobt
, 0)
1452 fprintf(stderr
, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1453 XFS_BTREE_BLOCK_MAXRECS(mp
->m_sb
.sb_blocksize
, xfs_inobt
, 1),
1454 XFS_BTREE_BLOCK_MINRECS(mp
->m_sb
.sb_blocksize
, xfs_inobt
, 1)
1456 fprintf(stderr
, "xr inobt level 0 (leaf), maxrec = %d\n",
1457 XR_INOBT_BLOCK_MAXRECS(mp
, 0));
1458 fprintf(stderr
, "xr inobt level 1 (int), maxrec = %d\n",
1459 XR_INOBT_BLOCK_MAXRECS(mp
, 1));
1460 fprintf(stderr
, "bnobt level 1, maxrec = %d, minrec = %d\n",
1461 XFS_BTREE_BLOCK_MAXRECS(mp
->m_sb
.sb_blocksize
, xfs_alloc
, 0),
1462 XFS_BTREE_BLOCK_MINRECS(mp
->m_sb
.sb_blocksize
, xfs_alloc
, 0));
1463 fprintf(stderr
, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
1464 XFS_BTREE_BLOCK_MAXRECS(mp
->m_sb
.sb_blocksize
, xfs_alloc
, 1),
1465 XFS_BTREE_BLOCK_MINRECS(mp
->m_sb
.sb_blocksize
, xfs_alloc
, 1));
1469 * make sure the root and realtime inodes show up allocated
1473 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
1475 * build up incore bno and bcnt extent btrees
1477 num_extents
= mk_incore_fstree(mp
, agno
);
1479 #ifdef XR_BLD_FREE_TRACE
1480 fprintf(stderr
, "# of bno extents is %d\n",
1481 count_bno_extents(agno
));
1484 if (num_extents
== 0) {
1486 * XXX - what we probably should do here is pick an
1487 * inode for a regular file in the allocation group
1488 * that has space allocated and shoot it by traversing
1489 * the bmap list and putting all its extents on the
1490 * incore freespace trees, clearing the inode,
1491 * and clearing the in-use bit in the incore inode
1492 * tree. Then try mk_incore_fstree() again.
1494 do_error("unable to rebuild AG %u. "
1495 "Not enough free space in on-disk AG.\n", agno
);
1499 * done with the AG bitmap, toss it...
1501 teardown_ag_bmap(mp
, agno
);
1504 * ok, now set up the btree cursors for the
1505 * on-disk btrees (includs pre-allocating all
1506 * required blocks for the trees themselves)
1508 init_ino_cursor(mp
, agno
, &ino_btree_curs
,
1509 &num_inos
, &num_free_inos
);
1511 sb_icount
+= num_inos
;
1512 sb_ifree
+= num_free_inos
;
1514 num_extents
= count_bno_extents_blocks(agno
, &num_freeblocks
);
1516 * lose two blocks per AG -- the space tree roots
1517 * are counted as allocated since the space trees
1520 sb_fdblocks
+= num_freeblocks
- 2;
1522 if (num_extents
== 0) {
1524 * XXX - what we probably should do here is pick an
1525 * inode for a regular file in the allocation group
1526 * that has space allocated and shoot it by traversing
1527 * the bmap list and putting all its extents on the
1528 * incore freespace trees, clearing the inode,
1529 * and clearing the in-use bit in the incore inode
1530 * tree. Then try mk_incore_fstree() again.
1532 do_error("unable to rebuild AG %u. No free space.\n",
1537 #ifdef XR_BLD_FREE_TRACE
1538 fprintf(stderr
, "# of bno extents is %d\n", num_extents
);
1542 * track blocks that we might really lose
1544 extra_blocks
= calculate_freespace_cursor(mp
, agno
,
1545 &num_extents
, &bno_btree_curs
);
1548 * freespace btrees live in the "free space" but
1549 * the filesystem treats AGFL blocks as allocated
1550 * since they aren't described by the freespace trees
1554 * see if we can fit all the extra blocks into the AGFL
1556 extra_blocks
= (extra_blocks
- XFS_AGFL_SIZE
> 0)
1557 ? extra_blocks
- XFS_AGFL_SIZE
1560 if (extra_blocks
> 0) {
1561 do_warn("lost %d blocks in agno %d, sorry.\n",
1562 extra_blocks
, agno
);
1563 sb_fdblocks
-= extra_blocks
;
1566 bcnt_btree_curs
= bno_btree_curs
;
1568 setup_cursor(mp
, agno
, &bno_btree_curs
);
1569 setup_cursor(mp
, agno
, &bcnt_btree_curs
);
1571 #ifdef XR_BLD_FREE_TRACE
1572 fprintf(stderr
, "# of bno extents is %d\n",
1573 count_bno_extents(agno
));
1574 fprintf(stderr
, "# of bcnt extents is %d\n",
1575 count_bcnt_extents(agno
));
1578 * now rebuild the freespace trees
1580 freeblks1
= build_freespace_tree(mp
, agno
, &bno_btree_curs
,
1582 #ifdef XR_BLD_FREE_TRACE
1583 fprintf(stderr
, "# of free blocks == %d\n", freeblks1
);
1585 write_cursor(&bno_btree_curs
);
1587 freeblks2
= build_freespace_tree(mp
, agno
, &bcnt_btree_curs
,
1589 write_cursor(&bcnt_btree_curs
);
1591 ASSERT(freeblks1
== freeblks2
);
1594 * set up agf and agfl
1596 build_agf_agfl(mp
, agno
, &bno_btree_curs
,
1597 &bcnt_btree_curs
, freeblks1
, extra_blocks
);
1599 * build inode allocation tree. this also build the agi
1601 build_ino_tree(mp
, agno
, &ino_btree_curs
);
1602 write_cursor(&ino_btree_curs
);
1606 finish_cursor(&bno_btree_curs
);
1607 finish_cursor(&ino_btree_curs
);
1608 finish_cursor(&bcnt_btree_curs
);
1610 * release the incore per-AG bno/bcnt trees so
1611 * the extent nodes can be recycled
1613 release_agbno_extent_tree(agno
);
1614 release_agbcnt_extent_tree(agno
);
1617 if (mp
->m_sb
.sb_rblocks
) {
1619 " - generate realtime summary info and bitmap...\n");
1621 generate_rtinfo(mp
, btmcompute
, sumcompute
);
1622 teardown_rt_bmap(mp
);
1625 do_log(" - reset superblock...\n");
1628 * sync superblock counter and set version bits correctly