2 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "err_protos.h"
33 # define dbg_printf(f, a...) do {printf(f, ## a); fflush(stdout); } while (0)
35 # define dbg_printf(f, a...)
38 /* per-AG rmap object anchor */
40 struct xfs_slab
*ar_rmaps
; /* rmap observations, p4 */
41 struct xfs_slab
*ar_raw_rmaps
; /* unmerged rmaps */
42 int ar_flcount
; /* agfl entries from leftover */
43 /* agbt allocations */
44 struct xfs_rmap_irec ar_last_rmap
; /* last rmap seen */
47 static struct xfs_ag_rmap
*ag_rmaps
;
48 static bool rmapbt_suspect
;
51 * Compare rmap observations for array sorting.
58 const struct xfs_rmap_irec
*pa
;
59 const struct xfs_rmap_irec
*pb
;
64 oa
= xfs_rmap_irec_offset_pack(pa
);
65 ob
= xfs_rmap_irec_offset_pack(pb
);
67 if (pa
->rm_startblock
< pb
->rm_startblock
)
69 else if (pa
->rm_startblock
> pb
->rm_startblock
)
71 else if (pa
->rm_owner
< pb
->rm_owner
)
73 else if (pa
->rm_owner
> pb
->rm_owner
)
84 * Returns true if we must reconstruct either the reference count or reverse
91 return xfs_sb_version_hasrmapbt(&mp
->m_sb
);
95 * Initialize per-AG reverse map data.
104 if (!needs_rmap_work(mp
))
107 ag_rmaps
= calloc(mp
->m_sb
.sb_agcount
, sizeof(struct xfs_ag_rmap
));
109 do_error(_("couldn't allocate per-AG reverse map roots\n"));
111 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
112 error
= init_slab(&ag_rmaps
[i
].ar_rmaps
,
113 sizeof(struct xfs_rmap_irec
));
116 _("Insufficient memory while allocating reverse mapping slabs."));
117 error
= init_slab(&ag_rmaps
[i
].ar_raw_rmaps
,
118 sizeof(struct xfs_rmap_irec
));
121 _("Insufficient memory while allocating raw metadata reverse mapping slabs."));
122 ag_rmaps
[i
].ar_last_rmap
.rm_owner
= XFS_RMAP_OWN_UNKNOWN
;
127 * Free the per-AG reverse-mapping data.
131 struct xfs_mount
*mp
)
135 if (!needs_rmap_work(mp
))
138 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
139 free_slab(&ag_rmaps
[i
].ar_rmaps
);
140 free_slab(&ag_rmaps
[i
].ar_raw_rmaps
);
147 * Decide if two reverse-mapping records can be merged.
151 struct xfs_rmap_irec
*r1
,
152 struct xfs_rmap_irec
*r2
)
154 if (r1
->rm_owner
!= r2
->rm_owner
)
156 if (r1
->rm_startblock
+ r1
->rm_blockcount
!= r2
->rm_startblock
)
158 if ((unsigned long long)r1
->rm_blockcount
+ r2
->rm_blockcount
>
161 if (XFS_RMAP_NON_INODE_OWNER(r2
->rm_owner
))
163 /* must be an inode owner below here */
164 if (r1
->rm_flags
!= r2
->rm_flags
)
166 if (r1
->rm_flags
& XFS_RMAP_BMBT_BLOCK
)
168 return r1
->rm_offset
+ r1
->rm_blockcount
== r2
->rm_offset
;
172 * Add an observation about a block mapping in an inode's data or attribute
173 * fork for later btree reconstruction.
177 struct xfs_mount
*mp
,
180 struct xfs_bmbt_irec
*irec
)
182 struct xfs_rmap_irec rmap
;
185 struct xfs_rmap_irec
*last_rmap
;
188 if (!needs_rmap_work(mp
))
191 agno
= XFS_FSB_TO_AGNO(mp
, irec
->br_startblock
);
192 agbno
= XFS_FSB_TO_AGBNO(mp
, irec
->br_startblock
);
193 ASSERT(agno
!= NULLAGNUMBER
);
194 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
195 ASSERT(agbno
+ irec
->br_blockcount
<= mp
->m_sb
.sb_agblocks
);
196 ASSERT(ino
!= NULLFSINO
);
197 ASSERT(whichfork
== XFS_DATA_FORK
|| whichfork
== XFS_ATTR_FORK
);
200 rmap
.rm_offset
= irec
->br_startoff
;
202 if (whichfork
== XFS_ATTR_FORK
)
203 rmap
.rm_flags
|= XFS_RMAP_ATTR_FORK
;
204 rmap
.rm_startblock
= agbno
;
205 rmap
.rm_blockcount
= irec
->br_blockcount
;
206 if (irec
->br_state
== XFS_EXT_UNWRITTEN
)
207 rmap
.rm_flags
|= XFS_RMAP_UNWRITTEN
;
208 last_rmap
= &ag_rmaps
[agno
].ar_last_rmap
;
209 if (last_rmap
->rm_owner
== XFS_RMAP_OWN_UNKNOWN
)
211 else if (mergeable_rmaps(last_rmap
, &rmap
))
212 last_rmap
->rm_blockcount
+= rmap
.rm_blockcount
;
214 error
= slab_add(ag_rmaps
[agno
].ar_rmaps
, last_rmap
);
223 /* Finish collecting inode data/attr fork rmaps. */
225 finish_collecting_fork_rmaps(
226 struct xfs_mount
*mp
,
229 if (!needs_rmap_work(mp
) ||
230 ag_rmaps
[agno
].ar_last_rmap
.rm_owner
== XFS_RMAP_OWN_UNKNOWN
)
232 return slab_add(ag_rmaps
[agno
].ar_rmaps
, &ag_rmaps
[agno
].ar_last_rmap
);
235 /* add a raw rmap; these will be merged later */
238 struct xfs_mount
*mp
,
246 struct xfs_rmap_irec rmap
;
249 rmap
.rm_owner
= owner
;
253 rmap
.rm_flags
|= XFS_RMAP_ATTR_FORK
;
255 rmap
.rm_flags
|= XFS_RMAP_BMBT_BLOCK
;
256 rmap
.rm_startblock
= agbno
;
257 rmap
.rm_blockcount
= len
;
258 return slab_add(ag_rmaps
[agno
].ar_raw_rmaps
, &rmap
);
262 * Add a reverse mapping for an inode fork's block mapping btree block.
266 struct xfs_mount
*mp
,
274 if (!needs_rmap_work(mp
))
277 agno
= XFS_FSB_TO_AGNO(mp
, fsbno
);
278 agbno
= XFS_FSB_TO_AGBNO(mp
, fsbno
);
279 ASSERT(agno
!= NULLAGNUMBER
);
280 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
281 ASSERT(agbno
+ 1 <= mp
->m_sb
.sb_agblocks
);
283 return __add_raw_rmap(mp
, agno
, agbno
, 1, ino
,
284 whichfork
== XFS_ATTR_FORK
, true);
288 * Add a reverse mapping for a per-AG fixed metadata extent.
292 struct xfs_mount
*mp
,
298 if (!needs_rmap_work(mp
))
301 ASSERT(agno
!= NULLAGNUMBER
);
302 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
303 ASSERT(agbno
+ len
<= mp
->m_sb
.sb_agblocks
);
305 return __add_raw_rmap(mp
, agno
, agbno
, len
, owner
, false, false);
309 * Merge adjacent raw rmaps and add them to the main rmap list.
313 struct xfs_mount
*mp
,
316 struct xfs_slab_cursor
*cur
= NULL
;
317 struct xfs_rmap_irec
*prev
, *rec
;
321 old_sz
= slab_count(ag_rmaps
[agno
].ar_rmaps
);
322 if (slab_count(ag_rmaps
[agno
].ar_raw_rmaps
) == 0)
324 qsort_slab(ag_rmaps
[agno
].ar_raw_rmaps
, rmap_compare
);
325 error
= init_slab_cursor(ag_rmaps
[agno
].ar_raw_rmaps
, rmap_compare
,
330 prev
= pop_slab_cursor(cur
);
331 rec
= pop_slab_cursor(cur
);
333 if (mergeable_rmaps(prev
, rec
)) {
334 prev
->rm_blockcount
+= rec
->rm_blockcount
;
335 rec
= pop_slab_cursor(cur
);
338 error
= slab_add(ag_rmaps
[agno
].ar_rmaps
, prev
);
342 rec
= pop_slab_cursor(cur
);
345 error
= slab_add(ag_rmaps
[agno
].ar_rmaps
, prev
);
349 free_slab(&ag_rmaps
[agno
].ar_raw_rmaps
);
350 error
= init_slab(&ag_rmaps
[agno
].ar_raw_rmaps
,
351 sizeof(struct xfs_rmap_irec
));
354 _("Insufficient memory while allocating raw metadata reverse mapping slabs."));
357 qsort_slab(ag_rmaps
[agno
].ar_rmaps
, rmap_compare
);
359 free_slab_cursor(&cur
);
370 for (n
= 0; n
< sizeof(mask
) * NBBY
&& (mask
& 1); n
++, mask
>>= 1)
386 for (n
= 0; n
< sizeof(mask
) * NBBY
; n
++, mask
>>= 1)
394 * Add an allocation group's fixed metadata to the rmap list. This includes
395 * sb/agi/agf/agfl headers, inode chunks, and the log.
398 add_fixed_ag_rmap_data(
399 struct xfs_mount
*mp
,
404 ino_tree_node_t
*ino_rec
;
410 if (!needs_rmap_work(mp
))
413 /* sb/agi/agf/agfl headers */
414 error
= add_ag_rmap(mp
, agno
, 0, XFS_BNO_BLOCK(mp
),
420 ino_rec
= findfirst_inode_rec(agno
);
421 for (; ino_rec
!= NULL
; ino_rec
= next_ino_rec(ino_rec
)) {
422 if (xfs_sb_version_hassparseinodes(&mp
->m_sb
)) {
423 startidx
= find_first_zero_bit(ino_rec
->ir_sparse
);
424 nr
= XFS_INODES_PER_CHUNK
- popcnt(ino_rec
->ir_sparse
);
427 nr
= XFS_INODES_PER_CHUNK
;
429 nr
/= mp
->m_sb
.sb_inopblock
;
432 agino
= ino_rec
->ino_startnum
+ startidx
;
433 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
434 if (XFS_AGINO_TO_OFFSET(mp
, agino
) == 0) {
435 error
= add_ag_rmap(mp
, agno
, agbno
, nr
,
436 XFS_RMAP_OWN_INODES
);
443 fsbno
= mp
->m_sb
.sb_logstart
;
444 if (fsbno
&& XFS_FSB_TO_AGNO(mp
, fsbno
) == agno
) {
445 agbno
= XFS_FSB_TO_AGBNO(mp
, mp
->m_sb
.sb_logstart
);
446 error
= add_ag_rmap(mp
, agno
, agbno
, mp
->m_sb
.sb_logblocks
,
456 * Copy the per-AG btree reverse-mapping data into the rmapbt.
458 * At rmapbt reconstruction time, the rmapbt will be populated _only_ with
459 * rmaps for file extents, inode chunks, AG headers, and bmbt blocks. While
460 * building the AG btrees we can record all the blocks allocated for each
461 * btree, but we cannot resolve the conflict between the fact that one has to
462 * finish allocating the space for the rmapbt before building the bnobt and the
463 * fact that allocating blocks for the bnobt requires adding rmapbt entries.
464 * Therefore we record in-core the rmaps for each btree and here use the
465 * libxfs rmap functions to finish building the rmap btree.
467 * During AGF/AGFL reconstruction in phase 5, rmaps for the AG btrees are
468 * recorded in memory. The rmapbt has not been set up yet, so we need to be
469 * able to "expand" the AGFL without updating the rmapbt. After we've written
470 * out the new AGF header the new rmapbt is available, so this function reads
471 * each AGFL to generate rmap entries. These entries are merged with the AG
472 * btree rmap entries, and then we use libxfs' rmap functions to add them to
473 * the rmapbt, after which it is fully regenerated.
476 store_ag_btree_rmap_data(
477 struct xfs_mount
*mp
,
480 struct xfs_slab_cursor
*rm_cur
;
481 struct xfs_rmap_irec
*rm_rec
= NULL
;
482 struct xfs_buf
*agbp
= NULL
;
483 struct xfs_buf
*agflbp
= NULL
;
484 struct xfs_trans
*tp
;
485 struct xfs_trans_res tres
= {0};
486 __be32
*agfl_bno
, *b
;
488 struct xfs_owner_info oinfo
;
490 if (!xfs_sb_version_hasrmapbt(&mp
->m_sb
))
493 /* Release the ar_rmaps; they were put into the rmapbt during p5. */
494 free_slab(&ag_rmaps
[agno
].ar_rmaps
);
495 error
= init_slab(&ag_rmaps
[agno
].ar_rmaps
,
496 sizeof(struct xfs_rmap_irec
));
500 /* Add the AGFL blocks to the rmap list */
501 error
= libxfs_trans_read_buf(
502 mp
, NULL
, mp
->m_ddev_targp
,
503 XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
504 XFS_FSS_TO_BB(mp
, 1), 0, &agflbp
, &xfs_agfl_buf_ops
);
508 agfl_bno
= XFS_BUF_TO_AGFL_BNO(mp
, agflbp
);
509 agfl_bno
+= ag_rmaps
[agno
].ar_flcount
;
511 while (*b
!= NULLAGBLOCK
&& b
- agfl_bno
<= XFS_AGFL_SIZE(mp
)) {
512 error
= add_ag_rmap(mp
, agno
, be32_to_cpu(*b
), 1,
518 libxfs_putbuf(agflbp
);
521 /* Merge all the raw rmaps into the main list */
522 error
= fold_raw_rmaps(mp
, agno
);
526 /* Create cursors to refcount structures */
527 error
= init_slab_cursor(ag_rmaps
[agno
].ar_rmaps
, rmap_compare
,
532 /* Insert rmaps into the btree one at a time */
533 rm_rec
= pop_slab_cursor(rm_cur
);
535 error
= -libxfs_trans_alloc(mp
, &tres
, 16, 0, 0, &tp
);
539 error
= libxfs_alloc_read_agf(mp
, tp
, agno
, 0, &agbp
);
543 ASSERT(XFS_RMAP_NON_INODE_OWNER(rm_rec
->rm_owner
));
544 libxfs_rmap_ag_owner(&oinfo
, rm_rec
->rm_owner
);
545 error
= libxfs_rmap_alloc(tp
, agbp
, agno
, rm_rec
->rm_startblock
,
546 rm_rec
->rm_blockcount
, &oinfo
);
550 error
= -libxfs_trans_commit(tp
);
554 fix_freelist(mp
, agno
, false);
556 rm_rec
= pop_slab_cursor(rm_cur
);
559 free_slab_cursor(&rm_cur
);
563 libxfs_trans_cancel(tp
);
565 free_slab_cursor(&rm_cur
);
568 libxfs_putbuf(agflbp
);
569 printf("FAIL err %d\n", error
);
578 struct xfs_rmap_irec
*rmap
)
580 printf("%s: %p agno=%u pblk=%llu own=%lld lblk=%llu len=%u flags=0x%x\n",
583 (unsigned long long)rmap
->rm_startblock
,
584 (unsigned long long)rmap
->rm_owner
,
585 (unsigned long long)rmap
->rm_offset
,
586 (unsigned int)rmap
->rm_blockcount
,
587 (unsigned int)rmap
->rm_flags
);
590 # define dump_rmap(m, a, r)
594 * Return the number of rmap objects for an AG.
598 struct xfs_mount
*mp
,
601 return slab_count(ag_rmaps
[agno
].ar_rmaps
);
605 * Return a slab cursor that will return rmap objects in order.
610 struct xfs_slab_cursor
**cur
)
612 return init_slab_cursor(ag_rmaps
[agno
].ar_rmaps
, rmap_compare
, cur
);
616 * Disable the refcount btree check.
619 rmap_avoid_check(void)
621 rmapbt_suspect
= true;
624 /* Look for an rmap in the rmapbt that matches a given rmap. */
627 struct xfs_btree_cur
*bt_cur
,
628 struct xfs_rmap_irec
*rm_rec
,
629 struct xfs_rmap_irec
*tmp
,
634 /* Use the regular btree retrieval routine. */
635 error
= -libxfs_rmap_lookup_le(bt_cur
, rm_rec
->rm_startblock
,
636 rm_rec
->rm_blockcount
,
637 rm_rec
->rm_owner
, rm_rec
->rm_offset
,
638 rm_rec
->rm_flags
, have
);
643 return -libxfs_rmap_get_rec(bt_cur
, tmp
, have
);
646 /* Does the btree rmap cover the observed rmap? */
647 #define NEXTP(x) ((x)->rm_startblock + (x)->rm_blockcount)
648 #define NEXTL(x) ((x)->rm_offset + (x)->rm_blockcount)
651 struct xfs_rmap_irec
*observed
,
652 struct xfs_rmap_irec
*btree
)
654 /* Can't have mismatches in the flags or the owner. */
655 if (btree
->rm_flags
!= observed
->rm_flags
||
656 btree
->rm_owner
!= observed
->rm_owner
)
660 * Btree record can't physically start after the observed
661 * record, nor can it end before the observed record.
663 if (btree
->rm_startblock
> observed
->rm_startblock
||
664 NEXTP(btree
) < NEXTP(observed
))
667 /* If this is metadata or bmbt, we're done. */
668 if (XFS_RMAP_NON_INODE_OWNER(observed
->rm_owner
) ||
669 (observed
->rm_flags
& XFS_RMAP_BMBT_BLOCK
))
672 * Btree record can't logically start after the observed
673 * record, nor can it end before the observed record.
675 if (btree
->rm_offset
> observed
->rm_offset
||
676 NEXTL(btree
) < NEXTL(observed
))
685 * Compare the observed reverse mappings against what's in the ag btree.
689 struct xfs_mount
*mp
,
692 struct xfs_slab_cursor
*rm_cur
;
693 struct xfs_btree_cur
*bt_cur
= NULL
;
696 struct xfs_buf
*agbp
= NULL
;
697 struct xfs_rmap_irec
*rm_rec
;
698 struct xfs_rmap_irec tmp
;
699 struct xfs_perag
*pag
; /* per allocation group data */
701 if (!xfs_sb_version_hasrmapbt(&mp
->m_sb
))
703 if (rmapbt_suspect
) {
704 if (no_modify
&& agno
== 0)
705 do_warn(_("would rebuild corrupt rmap btrees.\n"));
709 /* Create cursors to refcount structures */
710 error
= init_rmap_cursor(agno
, &rm_cur
);
714 error
= -libxfs_alloc_read_agf(mp
, NULL
, agno
, 0, &agbp
);
718 /* Leave the per-ag data "uninitialized" since we rewrite it later */
719 pag
= xfs_perag_get(mp
, agno
);
723 bt_cur
= libxfs_rmapbt_init_cursor(mp
, NULL
, agbp
, agno
);
729 rm_rec
= pop_slab_cursor(rm_cur
);
731 error
= lookup_rmap(bt_cur
, rm_rec
, &tmp
, &have
);
736 _("Missing reverse-mapping record for (%u/%u) %slen %u owner %"PRId64
" \
737 %s%soff %"PRIu64
"\n"),
738 agno
, rm_rec
->rm_startblock
,
739 (rm_rec
->rm_flags
& XFS_RMAP_UNWRITTEN
) ?
740 _("unwritten ") : "",
741 rm_rec
->rm_blockcount
,
743 (rm_rec
->rm_flags
& XFS_RMAP_ATTR_FORK
) ?
745 (rm_rec
->rm_flags
& XFS_RMAP_BMBT_BLOCK
) ?
751 /* Compare each refcount observation against the btree's */
752 if (!is_good_rmap(rm_rec
, &tmp
)) {
754 _("Incorrect reverse-mapping: saw (%u/%u) %slen %u owner %"PRId64
" %s%soff \
755 %"PRIu64
"; should be (%u/%u) %slen %u owner %"PRId64
" %s%soff %"PRIu64
"\n"),
756 agno
, tmp
.rm_startblock
,
757 (tmp
.rm_flags
& XFS_RMAP_UNWRITTEN
) ?
758 _("unwritten ") : "",
761 (tmp
.rm_flags
& XFS_RMAP_ATTR_FORK
) ?
763 (tmp
.rm_flags
& XFS_RMAP_BMBT_BLOCK
) ?
766 agno
, rm_rec
->rm_startblock
,
767 (rm_rec
->rm_flags
& XFS_RMAP_UNWRITTEN
) ?
768 _("unwritten ") : "",
769 rm_rec
->rm_blockcount
,
771 (rm_rec
->rm_flags
& XFS_RMAP_ATTR_FORK
) ?
773 (rm_rec
->rm_flags
& XFS_RMAP_BMBT_BLOCK
) ?
779 rm_rec
= pop_slab_cursor(rm_cur
);
784 libxfs_btree_del_cursor(bt_cur
, XFS_BTREE_NOERROR
);
787 free_slab_cursor(&rm_cur
);
792 * Compare the key fields of two rmap records -- positive if key1 > key2,
793 * negative if key1 < key2, and zero if equal.
797 struct xfs_rmap_irec
*kp1
,
798 struct xfs_rmap_irec
*kp2
)
803 struct xfs_rmap_irec tmp
;
806 tmp
.rm_flags
&= ~XFS_RMAP_REC_FLAGS
;
807 oa
= xfs_rmap_irec_offset_pack(&tmp
);
809 tmp
.rm_flags
&= ~XFS_RMAP_REC_FLAGS
;
810 ob
= xfs_rmap_irec_offset_pack(&tmp
);
812 d
= (__int64_t
)kp1
->rm_startblock
- kp2
->rm_startblock
;
816 if (kp1
->rm_owner
> kp2
->rm_owner
)
818 else if (kp2
->rm_owner
> kp1
->rm_owner
)
828 /* Compute the high key of an rmap record. */
830 rmap_high_key_from_rec(
831 struct xfs_rmap_irec
*rec
,
832 struct xfs_rmap_irec
*key
)
836 adj
= rec
->rm_blockcount
- 1;
838 key
->rm_startblock
= rec
->rm_startblock
+ adj
;
839 key
->rm_owner
= rec
->rm_owner
;
840 key
->rm_offset
= rec
->rm_offset
;
841 key
->rm_flags
= rec
->rm_flags
& XFS_RMAP_KEY_FLAGS
;
842 if (XFS_RMAP_NON_INODE_OWNER(rec
->rm_owner
) ||
843 (rec
->rm_flags
& XFS_RMAP_BMBT_BLOCK
))
845 key
->rm_offset
+= adj
;
849 * Regenerate the AGFL so that we don't run out of it while rebuilding the
850 * rmap btree. If skip_rmapbt is true, don't update the rmapbt (most probably
851 * because we're updating the rmapbt).
855 struct xfs_mount
*mp
,
859 xfs_alloc_arg_t args
;
861 struct xfs_trans_res tres
= {0};
865 memset(&args
, 0, sizeof(args
));
869 args
.pag
= xfs_perag_get(mp
, agno
);
870 error
= -libxfs_trans_alloc(mp
, &tres
,
871 libxfs_alloc_min_freelist(mp
, args
.pag
), 0, 0, &tp
);
873 do_error(_("failed to fix AGFL on AG %d, error %d\n"),
878 * Prior to rmapbt, all we had to do to fix the freelist is "expand"
879 * the fresh AGFL header from empty to full. That hasn't changed. For
880 * rmapbt, however, things change a bit.
882 * When we're stuffing the rmapbt with the AG btree rmaps the tree can
883 * expand, so we need to keep the AGFL well-stocked for the expansion.
884 * However, this expansion can cause the bnobt/cntbt to shrink, which
885 * can make the AGFL eligible for shrinking. Shrinking involves
886 * freeing rmapbt entries, but since we haven't finished loading the
887 * rmapbt with the btree rmaps it's possible for the remove operation
888 * to fail. The AGFL block is large enough at this point to absorb any
889 * blocks freed from the bnobt/cntbt, so we can disable shrinking.
891 * During the initial AGFL regeneration during AGF generation in phase5
892 * we must also disable rmapbt modifications because the AGF that
893 * libxfs reads does not yet point to the new rmapbt. These initial
894 * AGFL entries are added just prior to adding the AG btree block rmaps
895 * to the rmapbt. It's ok to pass NOSHRINK here too, since the AGFL is
896 * empty and cannot shrink.
898 flags
= XFS_ALLOC_FLAG_NOSHRINK
;
900 flags
|= XFS_ALLOC_FLAG_NORMAP
;
901 error
= libxfs_alloc_fix_freelist(&args
, flags
);
902 xfs_perag_put(args
.pag
);
904 do_error(_("failed to fix AGFL on AG %d, error %d\n"),
907 libxfs_trans_commit(tp
);
911 * Remember how many AGFL entries came from excess AG btree allocations and
912 * therefore already have rmap entries.
915 rmap_store_agflcount(
916 struct xfs_mount
*mp
,
920 if (!needs_rmap_work(mp
))
923 ag_rmaps
[agno
].ar_flcount
= count
;