]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase5.c
xfsprogs: Release v6.8.0
[thirdparty/xfsprogs-dev.git] / repair / phase5.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "libfrog/bitmap.h"
9 #include "avl.h"
10 #include "globals.h"
11 #include "agheader.h"
12 #include "incore.h"
13 #include "protos.h"
14 #include "err_protos.h"
15 #include "dinode.h"
16 #include "rt.h"
17 #include "versions.h"
18 #include "threads.h"
19 #include "progress.h"
20 #include "slab.h"
21 #include "rmap.h"
22 #include "bulkload.h"
23 #include "agbtree.h"
24
25 static uint64_t *sb_icount_ag; /* allocated inodes per ag */
26 static uint64_t *sb_ifree_ag; /* free inodes per ag */
27 static uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
28
29 static int
30 mk_incore_fstree(
31 struct xfs_mount *mp,
32 xfs_agnumber_t agno,
33 unsigned int *num_freeblocks)
34 {
35 int in_extent;
36 int num_extents;
37 xfs_agblock_t extent_start;
38 xfs_extlen_t extent_len;
39 xfs_agblock_t agbno;
40 xfs_agblock_t ag_end;
41 uint free_blocks;
42 xfs_extlen_t blen;
43 int bstate;
44
45 *num_freeblocks = 0;
46
47 /*
48 * scan the bitmap for the ag looking for continuous
49 * extents of free blocks. At this point, we know
50 * that blocks in the bitmap are either set to an
51 * "in use" state or set to unknown (0) since the
52 * bmaps were zero'ed in phase 4 and only blocks
53 * being used by inodes, inode bmaps, ag headers,
54 * and the files themselves were put into the bitmap.
55 *
56 */
57 ASSERT(agno < mp->m_sb.sb_agcount);
58
59 extent_start = extent_len = 0;
60 in_extent = 0;
61 num_extents = free_blocks = 0;
62
63 if (agno < mp->m_sb.sb_agcount - 1)
64 ag_end = mp->m_sb.sb_agblocks;
65 else
66 ag_end = mp->m_sb.sb_dblocks -
67 (xfs_rfsblock_t)mp->m_sb.sb_agblocks *
68 (mp->m_sb.sb_agcount - 1);
69
70 /*
71 * ok, now find the number of extents, keep track of the
72 * largest extent.
73 */
74 for (agbno = 0; agbno < ag_end; agbno += blen) {
75 bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
76 if (bstate < XR_E_INUSE) {
77 free_blocks += blen;
78 if (in_extent == 0) {
79 /*
80 * found the start of a free extent
81 */
82 in_extent = 1;
83 num_extents++;
84 extent_start = agbno;
85 extent_len = blen;
86 } else {
87 extent_len += blen;
88 }
89 } else {
90 if (in_extent) {
91 /*
92 * free extent ends here, add extent to the
93 * 2 incore extent (avl-to-be-B+) trees
94 */
95 in_extent = 0;
96 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
97 fprintf(stderr, "adding extent %u [%u %u]\n",
98 agno, extent_start, extent_len);
99 #endif
100 add_bno_extent(agno, extent_start, extent_len);
101 add_bcnt_extent(agno, extent_start, extent_len);
102 *num_freeblocks += extent_len;
103 }
104 }
105 }
106 if (in_extent) {
107 /*
108 * free extent ends here
109 */
110 #if defined(XR_BLD_FREE_TRACE) && defined(XR_BLD_ADD_EXTENT)
111 fprintf(stderr, "adding extent %u [%u %u]\n",
112 agno, extent_start, extent_len);
113 #endif
114 add_bno_extent(agno, extent_start, extent_len);
115 add_bcnt_extent(agno, extent_start, extent_len);
116 *num_freeblocks += extent_len;
117 }
118
119 return(num_extents);
120 }
121
122 /*
123 * XXX: yet more code that can be shared with mkfs, growfs.
124 */
125 static void
126 build_agi(
127 struct xfs_mount *mp,
128 xfs_agnumber_t agno,
129 struct bt_rebuild *btr_ino,
130 struct bt_rebuild *btr_fino)
131 {
132 struct xfs_buf *agi_buf;
133 struct xfs_agi *agi;
134 int i;
135 int error;
136
137 error = -libxfs_buf_get(mp->m_dev,
138 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
139 mp->m_sb.sb_sectsize / BBSIZE, &agi_buf);
140 if (error)
141 do_error(_("Cannot grab AG %u AGI buffer, err=%d"),
142 agno, error);
143 agi_buf->b_ops = &xfs_agi_buf_ops;
144 agi = agi_buf->b_addr;
145 memset(agi, 0, mp->m_sb.sb_sectsize);
146
147 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
148 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
149 agi->agi_seqno = cpu_to_be32(agno);
150 if (agno < mp->m_sb.sb_agcount - 1)
151 agi->agi_length = cpu_to_be32(mp->m_sb.sb_agblocks);
152 else
153 agi->agi_length = cpu_to_be32(mp->m_sb.sb_dblocks -
154 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
155 agi->agi_count = cpu_to_be32(btr_ino->count);
156 agi->agi_root = cpu_to_be32(btr_ino->newbt.afake.af_root);
157 agi->agi_level = cpu_to_be32(btr_ino->newbt.afake.af_levels);
158 agi->agi_freecount = cpu_to_be32(btr_ino->freecount);
159 agi->agi_newino = cpu_to_be32(btr_ino->first_agino);
160 agi->agi_dirino = cpu_to_be32(NULLAGINO);
161
162 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
163 agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO);
164
165 if (xfs_has_crc(mp))
166 platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
167
168 if (xfs_has_finobt(mp)) {
169 agi->agi_free_root =
170 cpu_to_be32(btr_fino->newbt.afake.af_root);
171 agi->agi_free_level =
172 cpu_to_be32(btr_fino->newbt.afake.af_levels);
173 }
174
175 if (xfs_has_inobtcounts(mp)) {
176 agi->agi_iblocks = cpu_to_be32(btr_ino->newbt.afake.af_blocks);
177 agi->agi_fblocks = cpu_to_be32(btr_fino->newbt.afake.af_blocks);
178 }
179
180 libxfs_buf_mark_dirty(agi_buf);
181 libxfs_buf_relse(agi_buf);
182 }
183
184 /* Fill the AGFL with any leftover bnobt rebuilder blocks. */
185 static void
186 fill_agfl(
187 struct bt_rebuild *btr,
188 __be32 *agfl_bnos,
189 unsigned int *agfl_idx)
190 {
191 struct bulkload_resv *resv, *n;
192 struct xfs_mount *mp = btr->newbt.sc->mp;
193
194 for_each_bulkload_reservation(&btr->newbt, resv, n) {
195 xfs_agblock_t bno;
196
197 bno = resv->agbno + resv->used;
198 while (resv->used < resv->len &&
199 *agfl_idx < libxfs_agfl_size(mp)) {
200 agfl_bnos[(*agfl_idx)++] = cpu_to_be32(bno++);
201 resv->used++;
202 }
203 }
204 }
205
206 /*
207 * build both the agf and the agfl for an agno given both
208 * btree cursors.
209 *
210 * XXX: yet more common code that can be shared with mkfs/growfs.
211 */
212 static void
213 build_agf_agfl(
214 struct xfs_mount *mp,
215 xfs_agnumber_t agno,
216 struct bt_rebuild *btr_bno,
217 struct bt_rebuild *btr_cnt,
218 struct bt_rebuild *btr_rmap,
219 struct bt_rebuild *btr_refc,
220 struct bitmap *lost_blocks)
221 {
222 struct extent_tree_node *ext_ptr;
223 struct xfs_buf *agf_buf, *agfl_buf;
224 unsigned int agfl_idx;
225 struct xfs_agfl *agfl;
226 struct xfs_agf *agf;
227 __be32 *freelist;
228 int error;
229
230 error = -libxfs_buf_get(mp->m_dev,
231 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
232 mp->m_sb.sb_sectsize / BBSIZE, &agf_buf);
233 if (error)
234 do_error(_("Cannot grab AG %u AGF buffer, err=%d"),
235 agno, error);
236 agf_buf->b_ops = &xfs_agf_buf_ops;
237 agf = agf_buf->b_addr;
238 memset(agf, 0, mp->m_sb.sb_sectsize);
239
240 #ifdef XR_BLD_FREE_TRACE
241 fprintf(stderr, "agf = %p, agf_buf->b_addr = %p\n",
242 agf, agf_buf->b_addr);
243 #endif
244
245 /*
246 * set up fixed part of agf
247 */
248 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
249 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
250 agf->agf_seqno = cpu_to_be32(agno);
251
252 if (agno < mp->m_sb.sb_agcount - 1)
253 agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
254 else
255 agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
256 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);
257
258 agf->agf_roots[XFS_BTNUM_BNO] =
259 cpu_to_be32(btr_bno->newbt.afake.af_root);
260 agf->agf_levels[XFS_BTNUM_BNO] =
261 cpu_to_be32(btr_bno->newbt.afake.af_levels);
262 agf->agf_roots[XFS_BTNUM_CNT] =
263 cpu_to_be32(btr_cnt->newbt.afake.af_root);
264 agf->agf_levels[XFS_BTNUM_CNT] =
265 cpu_to_be32(btr_cnt->newbt.afake.af_levels);
266 agf->agf_freeblks = cpu_to_be32(btr_bno->freeblks);
267
268 if (xfs_has_rmapbt(mp)) {
269 agf->agf_roots[XFS_BTNUM_RMAP] =
270 cpu_to_be32(btr_rmap->newbt.afake.af_root);
271 agf->agf_levels[XFS_BTNUM_RMAP] =
272 cpu_to_be32(btr_rmap->newbt.afake.af_levels);
273 agf->agf_rmap_blocks =
274 cpu_to_be32(btr_rmap->newbt.afake.af_blocks);
275 }
276
277 if (xfs_has_reflink(mp)) {
278 agf->agf_refcount_root =
279 cpu_to_be32(btr_refc->newbt.afake.af_root);
280 agf->agf_refcount_level =
281 cpu_to_be32(btr_refc->newbt.afake.af_levels);
282 agf->agf_refcount_blocks =
283 cpu_to_be32(btr_refc->newbt.afake.af_blocks);
284 }
285
286 /*
287 * Count and record the number of btree blocks consumed if required.
288 */
289 if (xfs_has_lazysbcount(mp)) {
290 unsigned int blks;
291 /*
292 * Don't count the root blocks as they are already
293 * accounted for.
294 */
295 blks = btr_bno->newbt.afake.af_blocks +
296 btr_cnt->newbt.afake.af_blocks - 2;
297 if (xfs_has_rmapbt(mp))
298 blks += btr_rmap->newbt.afake.af_blocks - 1;
299 agf->agf_btreeblks = cpu_to_be32(blks);
300 #ifdef XR_BLD_FREE_TRACE
301 fprintf(stderr, "agf->agf_btreeblks = %u\n",
302 be32_to_cpu(agf->agf_btreeblks));
303 #endif
304 }
305
306 #ifdef XR_BLD_FREE_TRACE
307 fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
308 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
309 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
310 XFS_BTNUM_BNO,
311 XFS_BTNUM_CNT);
312 #endif
313
314 if (xfs_has_crc(mp))
315 platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
316
317 /* initialise the AGFL, then fill it if there are blocks left over. */
318 error = -libxfs_buf_get(mp->m_dev,
319 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
320 mp->m_sb.sb_sectsize / BBSIZE, &agfl_buf);
321 if (error)
322 do_error(_("Cannot grab AG %u AGFL buffer, err=%d"),
323 agno, error);
324 agfl_buf->b_ops = &xfs_agfl_buf_ops;
325 agfl = XFS_BUF_TO_AGFL(agfl_buf);
326
327 /* setting to 0xff results in initialisation to NULLAGBLOCK */
328 memset(agfl, 0xff, mp->m_sb.sb_sectsize);
329 freelist = xfs_buf_to_agfl_bno(agfl_buf);
330 if (xfs_has_crc(mp)) {
331 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
332 agfl->agfl_seqno = cpu_to_be32(agno);
333 platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
334 for (agfl_idx = 0; agfl_idx < libxfs_agfl_size(mp); agfl_idx++)
335 freelist[agfl_idx] = cpu_to_be32(NULLAGBLOCK);
336 }
337
338 /* Fill the AGFL with leftover blocks or save them for later. */
339 agfl_idx = 0;
340 freelist = xfs_buf_to_agfl_bno(agfl_buf);
341 fill_agfl(btr_bno, freelist, &agfl_idx);
342 fill_agfl(btr_cnt, freelist, &agfl_idx);
343 if (xfs_has_rmapbt(mp))
344 fill_agfl(btr_rmap, freelist, &agfl_idx);
345
346 /* Set the AGF counters for the AGFL. */
347 if (agfl_idx > 0) {
348 agf->agf_flfirst = 0;
349 agf->agf_fllast = cpu_to_be32(agfl_idx - 1);
350 agf->agf_flcount = cpu_to_be32(agfl_idx);
351 rmap_store_agflcount(mp, agno, agfl_idx);
352
353 #ifdef XR_BLD_FREE_TRACE
354 fprintf(stderr, "writing agfl for ag %u\n", agno);
355 #endif
356
357 } else {
358 agf->agf_flfirst = 0;
359 agf->agf_fllast = cpu_to_be32(libxfs_agfl_size(mp) - 1);
360 agf->agf_flcount = 0;
361 }
362
363 libxfs_buf_mark_dirty(agfl_buf);
364 libxfs_buf_relse(agfl_buf);
365
366 ext_ptr = findbiggest_bcnt_extent(agno);
367 agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
368 ext_ptr->ex_blockcount : 0);
369
370 ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
371 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
372 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
373 be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]));
374 ASSERT(be32_to_cpu(agf->agf_refcount_root) !=
375 be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));
376
377 libxfs_buf_mark_dirty(agf_buf);
378 libxfs_buf_relse(agf_buf);
379
380 /*
381 * now fix up the free list appropriately
382 */
383 fix_freelist(mp, agno, true);
384
385 #ifdef XR_BLD_FREE_TRACE
386 fprintf(stderr, "wrote agf for ag %u\n", agno);
387 #endif
388 }
389
390 /*
391 * update the superblock counters, sync the sb version numbers and
392 * feature bits to the filesystem, and sync up the on-disk superblock
393 * to match the incore superblock.
394 */
395 static void
396 sync_sb(xfs_mount_t *mp)
397 {
398 struct xfs_buf *bp;
399
400 bp = libxfs_getsb(mp);
401 if (!bp)
402 do_error(_("couldn't get superblock\n"));
403
404 mp->m_sb.sb_icount = sb_icount;
405 mp->m_sb.sb_ifree = sb_ifree;
406 mp->m_sb.sb_fdblocks = sb_fdblocks;
407 mp->m_sb.sb_frextents = sb_frextents;
408
409 update_sb_version(mp);
410
411 libxfs_sb_to_disk(bp->b_addr, &mp->m_sb);
412 libxfs_buf_mark_dirty(bp);
413 libxfs_buf_relse(bp);
414 }
415
416 /*
417 * make sure the root and realtime inodes show up allocated
418 * even if they've been freed. they get reinitialized in phase6.
419 */
420 static void
421 keep_fsinos(xfs_mount_t *mp)
422 {
423 ino_tree_node_t *irec;
424 int i;
425
426 irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
427 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
428
429 for (i = 0; i < 3; i++)
430 set_inode_used(irec, i);
431 }
432
433 static void
434 phase5_func(
435 struct xfs_mount *mp,
436 struct xfs_perag *pag,
437 struct bitmap *lost_blocks)
438 {
439 struct repair_ctx sc = { .mp = mp, };
440 struct bt_rebuild btr_bno;
441 struct bt_rebuild btr_cnt;
442 struct bt_rebuild btr_ino;
443 struct bt_rebuild btr_fino;
444 struct bt_rebuild btr_rmap;
445 struct bt_rebuild btr_refc;
446 xfs_agnumber_t agno = pag->pag_agno;
447 int extra_blocks = 0;
448 uint num_freeblocks;
449 xfs_agblock_t num_extents;
450 unsigned int est_agfreeblocks = 0;
451 unsigned int total_btblocks;
452
453 if (verbose)
454 do_log(_(" - agno = %d\n"), agno);
455
456 /*
457 * build up incore bno and bcnt extent btrees
458 */
459 num_extents = mk_incore_fstree(mp, agno, &num_freeblocks);
460
461 #ifdef XR_BLD_FREE_TRACE
462 fprintf(stderr, "# of bno extents is %d\n", count_bno_extents(agno));
463 #endif
464
465 if (num_extents == 0) {
466 /*
467 * XXX - what we probably should do here is pick an inode for
468 * a regular file in the allocation group that has space
469 * allocated and shoot it by traversing the bmap list and
470 * putting all its extents on the incore freespace trees,
471 * clearing the inode, and clearing the in-use bit in the
472 * incore inode tree. Then try mk_incore_fstree() again.
473 */
474 do_error(
475 _("unable to rebuild AG %u. Not enough free space in on-disk AG.\n"),
476 agno);
477 }
478
479 /*
480 * Estimate the number of free blocks in this AG after rebuilding
481 * all btrees.
482 */
483 total_btblocks = estimate_agbtree_blocks(pag, num_extents);
484 if (num_freeblocks > total_btblocks)
485 est_agfreeblocks = num_freeblocks - total_btblocks;
486
487 init_ino_cursors(&sc, pag, est_agfreeblocks, &sb_icount_ag[agno],
488 &sb_ifree_ag[agno], &btr_ino, &btr_fino);
489
490 init_rmapbt_cursor(&sc, pag, est_agfreeblocks, &btr_rmap);
491
492 init_refc_cursor(&sc, pag, est_agfreeblocks, &btr_refc);
493
494 num_extents = count_bno_extents_blocks(agno, &num_freeblocks);
495 /*
496 * lose two blocks per AG -- the space tree roots are counted as
497 * allocated since the space trees always have roots
498 */
499 sb_fdblocks_ag[agno] += num_freeblocks - 2;
500
501 if (num_extents == 0) {
502 /*
503 * XXX - what we probably should do here is pick an inode for
504 * a regular file in the allocation group that has space
505 * allocated and shoot it by traversing the bmap list and
506 * putting all its extents on the incore freespace trees,
507 * clearing the inode, and clearing the in-use bit in the
508 * incore inode tree. Then try mk_incore_fstree() again.
509 */
510 do_error(_("unable to rebuild AG %u. No free space.\n"), agno);
511 }
512
513 #ifdef XR_BLD_FREE_TRACE
514 fprintf(stderr, "# of bno extents is %d\n", num_extents);
515 #endif
516
517 /*
518 * track blocks that we might really lose
519 */
520 init_freespace_cursors(&sc, pag, est_agfreeblocks, &num_extents,
521 &extra_blocks, &btr_bno, &btr_cnt);
522
523 /*
524 * freespace btrees live in the "free space" but the filesystem treats
525 * AGFL blocks as allocated since they aren't described by the
526 * freespace trees
527 */
528
529 /*
530 * see if we can fit all the extra blocks into the AGFL
531 */
532 extra_blocks = (extra_blocks - libxfs_agfl_size(mp) > 0) ?
533 extra_blocks - libxfs_agfl_size(mp) : 0;
534
535 if (extra_blocks > 0)
536 sb_fdblocks_ag[agno] -= extra_blocks;
537
538 #ifdef XR_BLD_FREE_TRACE
539 fprintf(stderr, "# of bno extents is %d\n", count_bno_extents(agno));
540 fprintf(stderr, "# of bcnt extents is %d\n", count_bcnt_extents(agno));
541 #endif
542
543 build_freespace_btrees(&sc, agno, &btr_bno, &btr_cnt);
544
545 #ifdef XR_BLD_FREE_TRACE
546 fprintf(stderr, "# of free blocks == %d/%d\n", btr_bno.freeblks,
547 btr_cnt.freeblks);
548 #endif
549 ASSERT(btr_bno.freeblks == btr_cnt.freeblks);
550
551 if (xfs_has_rmapbt(mp)) {
552 build_rmap_tree(&sc, agno, &btr_rmap);
553 sb_fdblocks_ag[agno] += btr_rmap.newbt.afake.af_blocks - 1;
554 }
555
556 if (xfs_has_reflink(mp))
557 build_refcount_tree(&sc, agno, &btr_refc);
558
559 /*
560 * set up agf and agfl
561 */
562 build_agf_agfl(mp, agno, &btr_bno, &btr_cnt, &btr_rmap, &btr_refc,
563 lost_blocks);
564
565 build_inode_btrees(&sc, agno, &btr_ino, &btr_fino);
566
567 /* build the agi */
568 build_agi(mp, agno, &btr_ino, &btr_fino);
569
570 /*
571 * tear down cursors
572 */
573 finish_rebuild(mp, &btr_bno, lost_blocks);
574 finish_rebuild(mp, &btr_cnt, lost_blocks);
575 finish_rebuild(mp, &btr_ino, lost_blocks);
576 if (xfs_has_finobt(mp))
577 finish_rebuild(mp, &btr_fino, lost_blocks);
578 if (xfs_has_rmapbt(mp))
579 finish_rebuild(mp, &btr_rmap, lost_blocks);
580 if (xfs_has_reflink(mp))
581 finish_rebuild(mp, &btr_refc, lost_blocks);
582
583 /*
584 * release the incore per-AG bno/bcnt trees so the extent nodes
585 * can be recycled
586 */
587 release_agbno_extent_tree(agno);
588 release_agbcnt_extent_tree(agno);
589 PROG_RPT_INC(prog_rpt_done[agno], 1);
590 }
591
592 /* Inject this unused space back into the filesystem. */
593 static int
594 inject_lost_extent(
595 uint64_t start,
596 uint64_t length,
597 void *arg)
598 {
599 struct xfs_mount *mp = arg;
600 struct xfs_trans *tp;
601 struct xfs_perag *pag;
602 xfs_agnumber_t agno;
603 xfs_agblock_t agbno;
604 int error;
605
606 error = -libxfs_trans_alloc_rollable(mp, 16, &tp);
607 if (error)
608 return error;
609
610 agno = XFS_FSB_TO_AGNO(mp, start);
611 agbno = XFS_FSB_TO_AGBNO(mp, start);
612 pag = libxfs_perag_get(mp, agno);
613 error = -libxfs_free_extent(tp, pag, agbno, length,
614 &XFS_RMAP_OINFO_ANY_OWNER, XFS_AG_RESV_NONE);
615 libxfs_perag_put(pag);
616
617 if (error)
618 return error;
619
620 return -libxfs_trans_commit(tp);
621 }
622
623 void
624 check_rtmetadata(
625 struct xfs_mount *mp)
626 {
627 rtinit(mp);
628 generate_rtinfo(mp, btmcompute, sumcompute);
629 check_rtbitmap(mp);
630 check_rtsummary(mp);
631 }
632
633 void
634 phase5(xfs_mount_t *mp)
635 {
636 struct bitmap *lost_blocks = NULL;
637 struct xfs_perag *pag;
638 xfs_agnumber_t agno;
639 int error;
640
641 do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
642 set_progress_msg(PROG_FMT_REBUILD_AG, (uint64_t)glob_agcount);
643
644 #ifdef XR_BLD_FREE_TRACE
645 fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
646 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
647 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
648 fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
649 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
650 libxfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
651 fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
652 XR_INOBT_BLOCK_MAXRECS(mp, 0));
653 fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
654 XR_INOBT_BLOCK_MAXRECS(mp, 1));
655 fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
656 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
657 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
658 fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
659 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
660 libxfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
661 #endif
662 /*
663 * make sure the root and realtime inodes show up allocated
664 */
665 keep_fsinos(mp);
666
667 /* allocate per ag counters */
668 sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
669 if (sb_icount_ag == NULL)
670 do_error(_("cannot alloc sb_icount_ag buffers\n"));
671
672 sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
673 if (sb_ifree_ag == NULL)
674 do_error(_("cannot alloc sb_ifree_ag buffers\n"));
675
676 sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
677 if (sb_fdblocks_ag == NULL)
678 do_error(_("cannot alloc sb_fdblocks_ag buffers\n"));
679
680 error = bitmap_alloc(&lost_blocks);
681 if (error)
682 do_error(_("cannot alloc lost block bitmap\n"));
683
684 for_each_perag(mp, agno, pag)
685 phase5_func(mp, pag, lost_blocks);
686
687 print_final_rpt();
688
689 /* aggregate per ag counters */
690 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
691 sb_icount += sb_icount_ag[agno];
692 sb_ifree += sb_ifree_ag[agno];
693 sb_fdblocks += sb_fdblocks_ag[agno];
694 }
695 free(sb_icount_ag);
696 free(sb_ifree_ag);
697 free(sb_fdblocks_ag);
698
699 if (mp->m_sb.sb_rblocks) {
700 do_log(
701 _(" - generate realtime summary info and bitmap...\n"));
702 check_rtmetadata(mp);
703 }
704
705 do_log(_(" - reset superblock...\n"));
706
707 /*
708 * sync superblock counter and set version bits correctly
709 */
710 sync_sb(mp);
711
712 /*
713 * Put the per-AG btree rmap data into the rmapbt now that we've reset
714 * the superblock counters.
715 */
716 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
717 error = rmap_store_ag_btree_rec(mp, agno);
718 if (error)
719 do_error(
720 _("unable to add AG %u reverse-mapping data to btree.\n"), agno);
721 }
722
723 /*
724 * Put blocks that were unnecessarily reserved for btree
725 * reconstruction back into the filesystem free space data.
726 */
727 error = bitmap_iterate(lost_blocks, inject_lost_extent, mp);
728 if (error)
729 do_error(_("Unable to reinsert lost blocks into filesystem.\n"));
730 bitmap_free(&lost_blocks);
731
732 bad_ino_btree = 0;
733
734 }