2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "err_protos.h"
31 * note: there are 4 sets of incore things handled here:
32 * block bitmaps, extent trees, uncertain inode list,
33 * and inode tree. The tree-based code uses the AVL
34 * tree package used by the IRIX kernel VM code
35 * (sys/avl.h). The inode list code uses the same records
36 * as the inode tree code for convenience. The bitmaps
37 * and bitmap operators are mostly macros defined in incore.h.
38 * There are one of everything per AG except for extent
39 * trees. There's one duplicate extent tree, one bno and
40 * one bcnt extent tree per AG. Not all of the above exist
41 * through all phases. The duplicate extent tree gets trashed
42 * at the end of phase 4. The bno/bcnt trees don't appear until
43 * phase 5. The uncertain inode list goes away at the end of
44 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
47 static avl64tree_desc_t
*rt_ext_tree_ptr
; /* dup extent tree for rt */
48 static pthread_mutex_t rt_ext_tree_lock
;
50 static struct btree_root
**dup_extent_trees
; /* per ag dup extent trees */
51 static pthread_mutex_t
*dup_extent_tree_locks
;
53 static avltree_desc_t
**extent_bno_ptrs
; /*
54 * array of extent tree ptrs
55 * one per ag for free extents
56 * sorted by starting block
59 static avltree_desc_t
**extent_bcnt_ptrs
; /*
60 * array of extent tree ptrs
61 * one per ag for free extents
66 * duplicate extent tree functions
70 release_dup_extent_tree(
73 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
74 btree_clear(dup_extent_trees
[agno
]);
75 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
81 xfs_agblock_t startblock
,
82 xfs_extlen_t blockcount
)
86 fprintf(stderr
, "Adding dup extent - %d/%d %d\n", agno
, startblock
,
89 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
90 ret
= btree_insert(dup_extent_trees
[agno
], startblock
,
91 (void *)(uintptr_t)(startblock
+ blockcount
));
92 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
99 xfs_agblock_t start_agbno
,
100 xfs_agblock_t end_agbno
)
105 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
106 if (!btree_find(dup_extent_trees
[agno
], start_agbno
, &bno
)) {
108 goto out
; /* this really shouldn't happen */
110 if (bno
< end_agbno
) {
114 ret
= (uintptr_t)btree_peek_prev(dup_extent_trees
[agno
], NULL
) >
117 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
123 * extent tree stuff is avl trees of duplicate extents,
124 * sorted in order by block number. there is one tree per ag.
127 static extent_tree_node_t
*
128 mk_extent_tree_nodes(xfs_agblock_t new_startblock
,
129 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
131 extent_tree_node_t
*new;
133 new = malloc(sizeof(*new));
135 do_error(_("couldn't allocate new extent descriptor.\n"));
137 new->avl_node
.avl_nextino
= NULL
;
138 new->ex_startblock
= new_startblock
;
139 new->ex_blockcount
= new_blockcount
;
140 new->ex_state
= new_state
;
148 release_extent_tree_node(extent_tree_node_t
*node
)
154 * routines to recycle all nodes in a tree. it walks the tree
155 * and puts all nodes back on the free list so the nodes can be
156 * reused. the duplicate and bno/bcnt extent trees for each AG
157 * are recycled after they're no longer needed to save memory
160 release_extent_tree(avltree_desc_t
*tree
)
162 extent_tree_node_t
*ext
;
163 extent_tree_node_t
*tmp
;
164 extent_tree_node_t
*lext
;
165 extent_tree_node_t
*ltmp
;
167 if (tree
->avl_firstino
== NULL
)
170 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
172 while (ext
!= NULL
) {
173 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
176 * ext->next is guaranteed to be set only in bcnt trees
178 if (ext
->next
!= NULL
) {
180 while (lext
!= NULL
) {
182 release_extent_tree_node(lext
);
187 release_extent_tree_node(ext
);
191 tree
->avl_root
= tree
->avl_firstino
= NULL
;
197 * top-level (visible) routines
200 release_agbno_extent_tree(xfs_agnumber_t agno
)
202 release_extent_tree(extent_bno_ptrs
[agno
]);
208 release_agbcnt_extent_tree(xfs_agnumber_t agno
)
210 release_extent_tree(extent_bcnt_ptrs
[agno
]);
216 * the next 4 routines manage the trees of free extents -- 2 trees
217 * per AG. The first tree is sorted by block number. The second
218 * tree is sorted by extent size. This is the bno tree.
221 add_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
222 xfs_extlen_t blockcount
)
224 extent_tree_node_t
*ext
;
226 ASSERT(extent_bno_ptrs
!= NULL
);
227 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
229 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
231 if (avl_insert(extent_bno_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
232 do_error(_("duplicate bno extent range\n"));
237 findfirst_bno_extent(xfs_agnumber_t agno
)
239 ASSERT(extent_bno_ptrs
!= NULL
);
240 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
242 return((extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
);
246 find_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
)
248 ASSERT(extent_bno_ptrs
!= NULL
);
249 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
251 return((extent_tree_node_t
*) avl_find(extent_bno_ptrs
[agno
],
256 * delete a node that's in the tree (pointer obtained by a find routine)
259 get_bno_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
261 ASSERT(extent_bno_ptrs
!= NULL
);
262 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
264 avl_delete(extent_bno_ptrs
[agno
], &ext
->avl_node
);
270 * normalizing constant for bcnt size -> address conversion (see avl ops)
271 * used by the AVL tree code to convert sizes and must be used when
272 * doing an AVL search in the tree (e.g. avl_findrange(s))
274 #define MAXBCNT 0xFFFFFFFF
275 #define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
278 * the next 4 routines manage the trees of free extents -- 2 trees
279 * per AG. The first tree is sorted by block number. The second
280 * tree is sorted by extent size. This is the bcnt tree.
283 add_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
284 xfs_extlen_t blockcount
)
286 extent_tree_node_t
*ext
, *prev
, *current
, *top
;
287 xfs_agblock_t tmp_startblock
;
288 xfs_extlen_t tmp_blockcount
;
289 extent_state_t tmp_state
;
291 ASSERT(extent_bcnt_ptrs
!= NULL
);
292 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
294 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
296 ASSERT(ext
->next
== NULL
);
299 fprintf(stderr
, "adding bcnt: agno = %d, start = %u, count = %u\n",
300 agno
, startblock
, blockcount
);
302 if ((current
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
303 blockcount
)) != NULL
) {
305 * avl tree code doesn't handle dups so insert
306 * onto linked list in increasing startblock order
308 * when called from mk_incore_fstree,
309 * startblock is in increasing order.
310 * current is an "anchor" node.
311 * quick check if the new ext goes to the end.
312 * if so, append at the end, using the last field
315 ASSERT(current
->last
!= NULL
);
316 if (startblock
> current
->last
->ex_startblock
) {
317 current
->last
->next
= ext
;
323 * scan, to find the proper location for new entry.
324 * this scan is *very* expensive and gets worse with
325 * with increasing entries.
327 top
= prev
= current
;
328 while (current
!= NULL
&&
329 startblock
> current
->ex_startblock
) {
331 current
= current
->next
;
334 if (top
== current
) {
337 * new entry should be ahead of current.
338 * to keep the avl tree intact,
339 * swap the values of to-be-inserted element
340 * and the values of the head of the list.
341 * then insert as the 2nd element on the list.
343 * see the comment in get_bcnt_extent()
344 * as to why we have to do this.
346 tmp_startblock
= top
->ex_startblock
;
347 tmp_blockcount
= top
->ex_blockcount
;
348 tmp_state
= top
->ex_state
;
350 top
->ex_startblock
= ext
->ex_startblock
;
351 top
->ex_blockcount
= ext
->ex_blockcount
;
352 top
->ex_state
= ext
->ex_state
;
354 ext
->ex_startblock
= tmp_startblock
;
355 ext
->ex_blockcount
= tmp_blockcount
;
356 ext
->ex_state
= tmp_state
;
368 if (avl_insert(extent_bcnt_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
369 do_error(_(": duplicate bno extent range\n"));
372 ext
->last
= ext
; /* ext is an "anchor" node */
378 findfirst_bcnt_extent(xfs_agnumber_t agno
)
380 ASSERT(extent_bcnt_ptrs
!= NULL
);
381 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
383 return((extent_tree_node_t
*) extent_bcnt_ptrs
[agno
]->avl_firstino
);
387 findbiggest_bcnt_extent(xfs_agnumber_t agno
)
389 ASSERT(extent_bcnt_ptrs
!= NULL
);
390 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
392 return((extent_tree_node_t
*) avl_lastino(extent_bcnt_ptrs
[agno
]->avl_root
));
396 findnext_bcnt_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
400 if (ext
->next
!= NULL
) {
401 ASSERT(ext
->ex_blockcount
== ext
->next
->ex_blockcount
);
402 ASSERT(ext
->ex_startblock
< ext
->next
->ex_startblock
);
406 * have to look at the top of the list to get the
407 * correct avl_nextino pointer since that pointer
408 * is maintained and altered by the AVL code.
410 nextino
= avl_find(extent_bcnt_ptrs
[agno
], ext
->ex_blockcount
);
411 ASSERT(nextino
!= NULL
);
412 if (nextino
->avl_nextino
!= NULL
) {
413 ASSERT(ext
->ex_blockcount
< ((extent_tree_node_t
*)
414 nextino
->avl_nextino
)->ex_blockcount
);
416 return((extent_tree_node_t
*) nextino
->avl_nextino
);
421 * this is meant to be called after you walk the bno tree to
422 * determine exactly which extent you want (so you'll know the
423 * desired value for startblock when you call this routine).
426 get_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
427 xfs_extlen_t blockcount
)
429 extent_tree_node_t
*ext
, *prev
, *top
;
430 xfs_agblock_t tmp_startblock
;
431 xfs_extlen_t tmp_blockcount
;
432 extent_state_t tmp_state
;
435 ASSERT(extent_bcnt_ptrs
!= NULL
);
436 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
438 if ((ext
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
439 blockcount
)) == NULL
)
444 if (ext
->next
!= NULL
) {
446 * pull it off the list
448 while (ext
!= NULL
&& startblock
!= ext
->ex_startblock
) {
455 * this node is linked into the tree so we
456 * swap the core values so we can delete
457 * the next item on the list instead of
458 * the head of the list. This is because
459 * the rest of the tree undoubtedly has
460 * pointers to the piece of memory that
461 * is the head of the list so pulling
462 * the item out of the list and hence
463 * the avl tree would be a bad idea.
465 * (cheaper than the alternative, a tree
466 * delete of this node followed by a tree
467 * insert of the next node on the list).
469 tmp_startblock
= ext
->next
->ex_startblock
;
470 tmp_blockcount
= ext
->next
->ex_blockcount
;
471 tmp_state
= ext
->next
->ex_state
;
473 ext
->next
->ex_startblock
= ext
->ex_startblock
;
474 ext
->next
->ex_blockcount
= ext
->ex_blockcount
;
475 ext
->next
->ex_state
= ext
->ex_state
;
477 ext
->ex_startblock
= tmp_startblock
;
478 ext
->ex_blockcount
= tmp_blockcount
;
479 ext
->ex_state
= tmp_state
;
485 * now, a simple list deletion
487 prev
->next
= ext
->next
;
491 * no list, just one node. simply delete
493 avl_delete(extent_bcnt_ptrs
[agno
], &ext
->avl_node
);
496 ASSERT(ext
->ex_startblock
== startblock
);
497 ASSERT(ext
->ex_blockcount
== blockcount
);
502 avl_ext_start(avlnode_t
*node
)
505 ((extent_tree_node_t
*) node
)->ex_startblock
);
509 avl_ext_end(avlnode_t
*node
)
512 ((extent_tree_node_t
*) node
)->ex_startblock
+
513 ((extent_tree_node_t
*) node
)->ex_blockcount
));
517 * convert size to an address for the AVL tree code -- the bigger the size,
518 * the lower the address so the biggest extent will be first in the tree
521 avl_ext_bcnt_start(avlnode_t
*node
)
524 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
525 node)->ex_blockcount)));
527 return((uintptr_t) ((extent_tree_node_t
*)node
)->ex_blockcount
);
531 avl_ext_bcnt_end(avlnode_t
*node
)
534 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
535 node)->ex_blockcount)));
537 return((uintptr_t) ((extent_tree_node_t
*)node
)->ex_blockcount
);
540 avlops_t avl_extent_bcnt_tree_ops
= {
545 avlops_t avl_extent_tree_ops
= {
551 * for real-time extents -- have to dup code since realtime extent
552 * startblocks can be 64-bit values.
554 static rt_extent_tree_node_t
*
555 mk_rt_extent_tree_nodes(xfs_rtblock_t new_startblock
,
556 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
558 rt_extent_tree_node_t
*new;
560 new = malloc(sizeof(*new));
562 do_error(_("couldn't allocate new extent descriptor.\n"));
564 new->avl_node
.avl_nextino
= NULL
;
565 new->rt_startblock
= new_startblock
;
566 new->rt_blockcount
= new_blockcount
;
567 new->rt_state
= new_state
;
573 release_rt_extent_tree_node(rt_extent_tree_node_t
*node
)
579 release_rt_extent_tree()
581 extent_tree_node_t
*ext
;
582 extent_tree_node_t
*tmp
;
583 extent_tree_node_t
*lext
;
584 extent_tree_node_t
*ltmp
;
585 avl64tree_desc_t
*tree
;
587 tree
= rt_extent_tree_ptr
;
589 if (tree
->avl_firstino
== NULL
)
592 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
594 while (ext
!= NULL
) {
595 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
596 release_rt_extent_tree_node(ext
);
600 tree
->avl_root
= tree
->avl_firstino
= NULL
;
607 * don't need release functions for realtime tree teardown
608 * since we only have one tree, not one per AG
612 free_rt_dup_extent_tree(xfs_mount_t
*mp
)
614 ASSERT(mp
->m_sb
.sb_rblocks
!= 0);
615 free(rt_ext_tree_ptr
);
616 rt_ext_tree_ptr
= NULL
;
620 * add a duplicate real-time extent
623 add_rt_dup_extent(xfs_rtblock_t startblock
, xfs_extlen_t blockcount
)
625 rt_extent_tree_node_t
*first
, *last
, *ext
, *next_ext
;
626 xfs_rtblock_t new_startblock
;
627 xfs_extlen_t new_blockcount
;
629 pthread_mutex_lock(&rt_ext_tree_lock
);
630 avl64_findranges(rt_ext_tree_ptr
, startblock
- 1,
631 startblock
+ blockcount
+ 1,
632 (avl64node_t
**) &first
, (avl64node_t
**) &last
);
634 * find adjacent and overlapping extent blocks
636 if (first
== NULL
&& last
== NULL
) {
637 /* nothing, just make and insert new extent */
639 ext
= mk_rt_extent_tree_nodes(startblock
,
640 blockcount
, XR_E_MULT
);
642 if (avl64_insert(rt_ext_tree_ptr
,
643 (avl64node_t
*) ext
) == NULL
) {
644 do_error(_("duplicate extent range\n"));
647 pthread_mutex_unlock(&rt_ext_tree_lock
);
651 ASSERT(first
!= NULL
&& last
!= NULL
);
654 * find the new composite range, delete old extent nodes
657 new_startblock
= startblock
;
658 new_blockcount
= blockcount
;
661 ext
!= (rt_extent_tree_node_t
*) last
->avl_node
.avl_nextino
;
664 * preserve the next inorder node
666 next_ext
= (rt_extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
668 * just bail if the new extent is contained within an old one
670 if (ext
->rt_startblock
<= startblock
&&
671 ext
->rt_blockcount
>= blockcount
) {
672 pthread_mutex_unlock(&rt_ext_tree_lock
);
676 * now check for overlaps and adjacent extents
678 if (ext
->rt_startblock
+ ext
->rt_blockcount
>= startblock
679 || ext
->rt_startblock
<= startblock
+ blockcount
) {
681 if (ext
->rt_startblock
< new_startblock
)
682 new_startblock
= ext
->rt_startblock
;
684 if (ext
->rt_startblock
+ ext
->rt_blockcount
>
685 new_startblock
+ new_blockcount
)
686 new_blockcount
= ext
->rt_startblock
+
690 avl64_delete(rt_ext_tree_ptr
, (avl64node_t
*) ext
);
695 ext
= mk_rt_extent_tree_nodes(new_startblock
,
696 new_blockcount
, XR_E_MULT
);
698 if (avl64_insert(rt_ext_tree_ptr
, (avl64node_t
*) ext
) == NULL
) {
699 do_error(_("duplicate extent range\n"));
702 pthread_mutex_unlock(&rt_ext_tree_lock
);
707 * returns 1 if block is a dup, 0 if not
711 search_rt_dup_extent(xfs_mount_t
*mp
, xfs_rtblock_t bno
)
715 pthread_mutex_lock(&rt_ext_tree_lock
);
716 if (avl64_findrange(rt_ext_tree_ptr
, bno
) != NULL
)
720 pthread_mutex_unlock(&rt_ext_tree_lock
);
725 avl64_rt_ext_start(avl64node_t
*node
)
727 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
);
731 avl64_ext_end(avl64node_t
*node
)
733 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
+
734 ((rt_extent_tree_node_t
*) node
)->rt_blockcount
);
737 avl64ops_t avl64_extent_tree_ops
= {
743 incore_ext_init(xfs_mount_t
*mp
)
746 xfs_agnumber_t agcount
= mp
->m_sb
.sb_agcount
;
748 pthread_mutex_init(&rt_ext_tree_lock
, NULL
);
750 dup_extent_trees
= calloc(agcount
, sizeof(struct btree_root
*));
751 if (!dup_extent_trees
)
752 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
754 dup_extent_tree_locks
= calloc(agcount
, sizeof(pthread_mutex_t
));
755 if (!dup_extent_tree_locks
)
756 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
758 if ((extent_bno_ptrs
= malloc(agcount
*
759 sizeof(avltree_desc_t
*))) == NULL
)
761 _("couldn't malloc free by-bno extent tree descriptor table\n"));
763 if ((extent_bcnt_ptrs
= malloc(agcount
*
764 sizeof(avltree_desc_t
*))) == NULL
)
766 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
768 for (i
= 0; i
< agcount
; i
++) {
769 if ((extent_bno_ptrs
[i
] =
770 malloc(sizeof(avltree_desc_t
))) == NULL
)
772 _("couldn't malloc bno extent tree descriptor\n"));
773 if ((extent_bcnt_ptrs
[i
] =
774 malloc(sizeof(avltree_desc_t
))) == NULL
)
776 _("couldn't malloc bcnt extent tree descriptor\n"));
779 for (i
= 0; i
< agcount
; i
++) {
780 btree_init(&dup_extent_trees
[i
]);
781 pthread_mutex_init(&dup_extent_tree_locks
[i
], NULL
);
782 avl_init_tree(extent_bno_ptrs
[i
], &avl_extent_tree_ops
);
783 avl_init_tree(extent_bcnt_ptrs
[i
], &avl_extent_bcnt_tree_ops
);
786 if ((rt_ext_tree_ptr
= malloc(sizeof(avl64tree_desc_t
))) == NULL
)
787 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
789 avl64_init_tree(rt_ext_tree_ptr
, &avl64_extent_tree_ops
);
793 * this routine actually frees all the memory used to track per-AG trees
796 incore_ext_teardown(xfs_mount_t
*mp
)
800 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
801 btree_destroy(dup_extent_trees
[i
]);
802 free(extent_bno_ptrs
[i
]);
803 free(extent_bcnt_ptrs
[i
]);
806 free(dup_extent_trees
);
807 free(extent_bcnt_ptrs
);
808 free(extent_bno_ptrs
);
810 dup_extent_trees
= NULL
;
811 extent_bcnt_ptrs
= NULL
;
812 extent_bno_ptrs
= NULL
;
816 count_extents(xfs_agnumber_t agno
, avltree_desc_t
*tree
, int whichtree
)
818 extent_tree_node_t
*node
;
821 node
= (extent_tree_node_t
*) tree
->avl_firstino
;
823 while (node
!= NULL
) {
826 node
= findnext_bcnt_extent(agno
, node
);
828 node
= findnext_bno_extent(node
);
835 count_bno_extents_blocks(xfs_agnumber_t agno
, uint
*numblocks
)
838 extent_tree_node_t
*node
;
841 ASSERT(agno
< glob_agcount
);
845 node
= (extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
;
847 while (node
!= NULL
) {
848 nblocks
+= node
->ex_blockcount
;
850 node
= findnext_bno_extent(node
);
853 *numblocks
= nblocks
;
858 count_bno_extents(xfs_agnumber_t agno
)
860 ASSERT(agno
< glob_agcount
);
861 return(count_extents(agno
, extent_bno_ptrs
[agno
], 0));
865 count_bcnt_extents(xfs_agnumber_t agno
)
867 ASSERT(agno
< glob_agcount
);
868 return(count_extents(agno
, extent_bcnt_ptrs
[agno
], 1));