1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
6 #include "libxfs_priv.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_alloc.h"
15 #include "xfs_btree.h"
16 #include "xfs_btree_staging.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_trace.h"
21 #include "xfs_ag_resv.h"
26 * This is a per-ag tree used to track the owner(s) of a given extent. With
27 * reflink it is possible for there to be multiple owners, which is a departure
28 * from classic XFS. Owner records for data extents are inserted when the
29 * extent is mapped and removed when an extent is unmapped. Owner records for
30 * all other block types (i.e. metadata) are inserted when an extent is
31 * allocated and removed when an extent is freed. There can only be one owner
32 * of a metadata extent, usually an inode or some other metadata structure like
35 * The rmap btree is part of the free space management, so blocks for the tree
36 * are sourced from the agfl. Hence we need transaction reservation support for
37 * this tree so that the freelist is always large enough. This also impacts on
38 * the minimum space we need to leave free in the AG.
40 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
41 * but it is the only way to enforce unique keys when a block can be owned by
42 * multiple files at any offset. There's no need to order/search by extent
43 * size for online updating/management of the tree. It is intended that most
44 * reverse lookups will be to find the owner(s) of a particular block, or to
45 * try to recover tree and file data from corrupt primary metadata.
48 static struct xfs_btree_cur
*
49 xfs_rmapbt_dup_cursor(
50 struct xfs_btree_cur
*cur
)
52 return xfs_rmapbt_init_cursor(cur
->bc_mp
, cur
->bc_tp
,
53 cur
->bc_ag
.agbp
, cur
->bc_ag
.pag
);
58 struct xfs_btree_cur
*cur
,
59 const union xfs_btree_ptr
*ptr
,
62 struct xfs_buf
*agbp
= cur
->bc_ag
.agbp
;
63 struct xfs_agf
*agf
= agbp
->b_addr
;
64 int btnum
= cur
->bc_btnum
;
68 agf
->agf_roots
[btnum
] = ptr
->s
;
69 be32_add_cpu(&agf
->agf_levels
[btnum
], inc
);
70 cur
->bc_ag
.pag
->pagf_levels
[btnum
] += inc
;
72 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_ROOTS
| XFS_AGF_LEVELS
);
76 xfs_rmapbt_alloc_block(
77 struct xfs_btree_cur
*cur
,
78 const union xfs_btree_ptr
*start
,
79 union xfs_btree_ptr
*new,
82 struct xfs_buf
*agbp
= cur
->bc_ag
.agbp
;
83 struct xfs_agf
*agf
= agbp
->b_addr
;
84 struct xfs_perag
*pag
= cur
->bc_ag
.pag
;
88 /* Allocate the new block from the freelist. If we can't, give up. */
89 error
= xfs_alloc_get_freelist(cur
->bc_tp
, cur
->bc_ag
.agbp
,
94 trace_xfs_rmapbt_alloc_block(cur
->bc_mp
, pag
->pag_agno
, bno
, 1);
95 if (bno
== NULLAGBLOCK
) {
100 xfs_extent_busy_reuse(cur
->bc_mp
, pag
, bno
, 1, false);
102 new->s
= cpu_to_be32(bno
);
103 be32_add_cpu(&agf
->agf_rmap_blocks
, 1);
104 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_RMAP_BLOCKS
);
106 xfs_ag_resv_rmapbt_alloc(cur
->bc_mp
, pag
->pag_agno
);
113 xfs_rmapbt_free_block(
114 struct xfs_btree_cur
*cur
,
117 struct xfs_buf
*agbp
= cur
->bc_ag
.agbp
;
118 struct xfs_agf
*agf
= agbp
->b_addr
;
119 struct xfs_perag
*pag
= cur
->bc_ag
.pag
;
123 bno
= xfs_daddr_to_agbno(cur
->bc_mp
, xfs_buf_daddr(bp
));
124 trace_xfs_rmapbt_free_block(cur
->bc_mp
, pag
->pag_agno
,
126 be32_add_cpu(&agf
->agf_rmap_blocks
, -1);
127 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_RMAP_BLOCKS
);
128 error
= xfs_alloc_put_freelist(cur
->bc_tp
, agbp
, NULL
, bno
, 1);
132 xfs_extent_busy_insert(cur
->bc_tp
, pag
, bno
, 1,
133 XFS_EXTENT_BUSY_SKIP_DISCARD
);
135 xfs_ag_resv_free_extent(pag
, XFS_AG_RESV_RMAPBT
, NULL
, 1);
140 xfs_rmapbt_get_minrecs(
141 struct xfs_btree_cur
*cur
,
144 return cur
->bc_mp
->m_rmap_mnr
[level
!= 0];
148 xfs_rmapbt_get_maxrecs(
149 struct xfs_btree_cur
*cur
,
152 return cur
->bc_mp
->m_rmap_mxr
[level
!= 0];
156 xfs_rmapbt_init_key_from_rec(
157 union xfs_btree_key
*key
,
158 const union xfs_btree_rec
*rec
)
160 key
->rmap
.rm_startblock
= rec
->rmap
.rm_startblock
;
161 key
->rmap
.rm_owner
= rec
->rmap
.rm_owner
;
162 key
->rmap
.rm_offset
= rec
->rmap
.rm_offset
;
166 * The high key for a reverse mapping record can be computed by shifting
167 * the startblock and offset to the highest value that would still map
168 * to that record. In practice this means that we add blockcount-1 to
169 * the startblock for all records, and if the record is for a data/attr
170 * fork mapping, we add blockcount-1 to the offset too.
173 xfs_rmapbt_init_high_key_from_rec(
174 union xfs_btree_key
*key
,
175 const union xfs_btree_rec
*rec
)
180 adj
= be32_to_cpu(rec
->rmap
.rm_blockcount
) - 1;
182 key
->rmap
.rm_startblock
= rec
->rmap
.rm_startblock
;
183 be32_add_cpu(&key
->rmap
.rm_startblock
, adj
);
184 key
->rmap
.rm_owner
= rec
->rmap
.rm_owner
;
185 key
->rmap
.rm_offset
= rec
->rmap
.rm_offset
;
186 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec
->rmap
.rm_owner
)) ||
187 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec
->rmap
.rm_offset
)))
189 off
= be64_to_cpu(key
->rmap
.rm_offset
);
190 off
= (XFS_RMAP_OFF(off
) + adj
) | (off
& ~XFS_RMAP_OFF_MASK
);
191 key
->rmap
.rm_offset
= cpu_to_be64(off
);
195 xfs_rmapbt_init_rec_from_cur(
196 struct xfs_btree_cur
*cur
,
197 union xfs_btree_rec
*rec
)
199 rec
->rmap
.rm_startblock
= cpu_to_be32(cur
->bc_rec
.r
.rm_startblock
);
200 rec
->rmap
.rm_blockcount
= cpu_to_be32(cur
->bc_rec
.r
.rm_blockcount
);
201 rec
->rmap
.rm_owner
= cpu_to_be64(cur
->bc_rec
.r
.rm_owner
);
202 rec
->rmap
.rm_offset
= cpu_to_be64(
203 xfs_rmap_irec_offset_pack(&cur
->bc_rec
.r
));
207 xfs_rmapbt_init_ptr_from_cur(
208 struct xfs_btree_cur
*cur
,
209 union xfs_btree_ptr
*ptr
)
211 struct xfs_agf
*agf
= cur
->bc_ag
.agbp
->b_addr
;
213 ASSERT(cur
->bc_ag
.pag
->pag_agno
== be32_to_cpu(agf
->agf_seqno
));
215 ptr
->s
= agf
->agf_roots
[cur
->bc_btnum
];
220 struct xfs_btree_cur
*cur
,
221 const union xfs_btree_key
*key
)
223 struct xfs_rmap_irec
*rec
= &cur
->bc_rec
.r
;
224 const struct xfs_rmap_key
*kp
= &key
->rmap
;
228 d
= (int64_t)be32_to_cpu(kp
->rm_startblock
) - rec
->rm_startblock
;
232 x
= be64_to_cpu(kp
->rm_owner
);
239 x
= XFS_RMAP_OFF(be64_to_cpu(kp
->rm_offset
));
249 xfs_rmapbt_diff_two_keys(
250 struct xfs_btree_cur
*cur
,
251 const union xfs_btree_key
*k1
,
252 const union xfs_btree_key
*k2
)
254 const struct xfs_rmap_key
*kp1
= &k1
->rmap
;
255 const struct xfs_rmap_key
*kp2
= &k2
->rmap
;
259 d
= (int64_t)be32_to_cpu(kp1
->rm_startblock
) -
260 be32_to_cpu(kp2
->rm_startblock
);
264 x
= be64_to_cpu(kp1
->rm_owner
);
265 y
= be64_to_cpu(kp2
->rm_owner
);
271 x
= XFS_RMAP_OFF(be64_to_cpu(kp1
->rm_offset
));
272 y
= XFS_RMAP_OFF(be64_to_cpu(kp2
->rm_offset
));
280 static xfs_failaddr_t
284 struct xfs_mount
*mp
= bp
->b_mount
;
285 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
286 struct xfs_perag
*pag
= bp
->b_pag
;
291 * magic number and level verification
293 * During growfs operations, we can't verify the exact level or owner as
294 * the perag is not fully initialised and hence not attached to the
295 * buffer. In this case, check against the maximum tree depth.
297 * Similarly, during log recovery we will have a perag structure
298 * attached, but the agf information will not yet have been initialised
299 * from the on disk AGF. Again, we can only check against maximum limits
302 if (!xfs_verify_magic(bp
, block
->bb_magic
))
303 return __this_address
;
305 if (!xfs_has_rmapbt(mp
))
306 return __this_address
;
307 fa
= xfs_btree_sblock_v5hdr_verify(bp
);
311 level
= be16_to_cpu(block
->bb_level
);
312 if (pag
&& pag
->pagf_init
) {
313 if (level
>= pag
->pagf_levels
[XFS_BTNUM_RMAPi
])
314 return __this_address
;
315 } else if (level
>= mp
->m_rmap_maxlevels
)
316 return __this_address
;
318 return xfs_btree_sblock_verify(bp
, mp
->m_rmap_mxr
[level
!= 0]);
322 xfs_rmapbt_read_verify(
327 if (!xfs_btree_sblock_verify_crc(bp
))
328 xfs_verifier_error(bp
, -EFSBADCRC
, __this_address
);
330 fa
= xfs_rmapbt_verify(bp
);
332 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
336 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
340 xfs_rmapbt_write_verify(
345 fa
= xfs_rmapbt_verify(bp
);
347 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
348 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
351 xfs_btree_sblock_calc_crc(bp
);
355 const struct xfs_buf_ops xfs_rmapbt_buf_ops
= {
356 .name
= "xfs_rmapbt",
357 .magic
= { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC
) },
358 .verify_read
= xfs_rmapbt_read_verify
,
359 .verify_write
= xfs_rmapbt_write_verify
,
360 .verify_struct
= xfs_rmapbt_verify
,
364 xfs_rmapbt_keys_inorder(
365 struct xfs_btree_cur
*cur
,
366 const union xfs_btree_key
*k1
,
367 const union xfs_btree_key
*k2
)
374 x
= be32_to_cpu(k1
->rmap
.rm_startblock
);
375 y
= be32_to_cpu(k2
->rmap
.rm_startblock
);
380 a
= be64_to_cpu(k1
->rmap
.rm_owner
);
381 b
= be64_to_cpu(k2
->rmap
.rm_owner
);
386 a
= XFS_RMAP_OFF(be64_to_cpu(k1
->rmap
.rm_offset
));
387 b
= XFS_RMAP_OFF(be64_to_cpu(k2
->rmap
.rm_offset
));
394 xfs_rmapbt_recs_inorder(
395 struct xfs_btree_cur
*cur
,
396 const union xfs_btree_rec
*r1
,
397 const union xfs_btree_rec
*r2
)
404 x
= be32_to_cpu(r1
->rmap
.rm_startblock
);
405 y
= be32_to_cpu(r2
->rmap
.rm_startblock
);
410 a
= be64_to_cpu(r1
->rmap
.rm_owner
);
411 b
= be64_to_cpu(r2
->rmap
.rm_owner
);
416 a
= XFS_RMAP_OFF(be64_to_cpu(r1
->rmap
.rm_offset
));
417 b
= XFS_RMAP_OFF(be64_to_cpu(r2
->rmap
.rm_offset
));
423 static const struct xfs_btree_ops xfs_rmapbt_ops
= {
424 .rec_len
= sizeof(struct xfs_rmap_rec
),
425 .key_len
= 2 * sizeof(struct xfs_rmap_key
),
427 .dup_cursor
= xfs_rmapbt_dup_cursor
,
428 .set_root
= xfs_rmapbt_set_root
,
429 .alloc_block
= xfs_rmapbt_alloc_block
,
430 .free_block
= xfs_rmapbt_free_block
,
431 .get_minrecs
= xfs_rmapbt_get_minrecs
,
432 .get_maxrecs
= xfs_rmapbt_get_maxrecs
,
433 .init_key_from_rec
= xfs_rmapbt_init_key_from_rec
,
434 .init_high_key_from_rec
= xfs_rmapbt_init_high_key_from_rec
,
435 .init_rec_from_cur
= xfs_rmapbt_init_rec_from_cur
,
436 .init_ptr_from_cur
= xfs_rmapbt_init_ptr_from_cur
,
437 .key_diff
= xfs_rmapbt_key_diff
,
438 .buf_ops
= &xfs_rmapbt_buf_ops
,
439 .diff_two_keys
= xfs_rmapbt_diff_two_keys
,
440 .keys_inorder
= xfs_rmapbt_keys_inorder
,
441 .recs_inorder
= xfs_rmapbt_recs_inorder
,
444 static struct xfs_btree_cur
*
445 xfs_rmapbt_init_common(
446 struct xfs_mount
*mp
,
447 struct xfs_trans
*tp
,
448 struct xfs_perag
*pag
)
450 struct xfs_btree_cur
*cur
;
452 /* Overlapping btree; 2 keys per pointer. */
453 cur
= xfs_btree_alloc_cursor(mp
, tp
, XFS_BTNUM_RMAP
,
454 mp
->m_rmap_maxlevels
);
455 cur
->bc_flags
= XFS_BTREE_CRC_BLOCKS
| XFS_BTREE_OVERLAPPING
;
456 cur
->bc_statoff
= XFS_STATS_CALC_INDEX(xs_rmap_2
);
457 cur
->bc_ops
= &xfs_rmapbt_ops
;
459 /* take a reference for the cursor */
460 atomic_inc(&pag
->pag_ref
);
461 cur
->bc_ag
.pag
= pag
;
466 /* Create a new reverse mapping btree cursor. */
467 struct xfs_btree_cur
*
468 xfs_rmapbt_init_cursor(
469 struct xfs_mount
*mp
,
470 struct xfs_trans
*tp
,
471 struct xfs_buf
*agbp
,
472 struct xfs_perag
*pag
)
474 struct xfs_agf
*agf
= agbp
->b_addr
;
475 struct xfs_btree_cur
*cur
;
477 cur
= xfs_rmapbt_init_common(mp
, tp
, pag
);
478 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_RMAP
]);
479 cur
->bc_ag
.agbp
= agbp
;
483 /* Create a new reverse mapping btree cursor with a fake root for staging. */
484 struct xfs_btree_cur
*
485 xfs_rmapbt_stage_cursor(
486 struct xfs_mount
*mp
,
487 struct xbtree_afakeroot
*afake
,
488 struct xfs_perag
*pag
)
490 struct xfs_btree_cur
*cur
;
492 cur
= xfs_rmapbt_init_common(mp
, NULL
, pag
);
493 xfs_btree_stage_afakeroot(cur
, afake
);
498 * Install a new reverse mapping btree root. Caller is responsible for
499 * invalidating and freeing the old btree blocks.
502 xfs_rmapbt_commit_staged_btree(
503 struct xfs_btree_cur
*cur
,
504 struct xfs_trans
*tp
,
505 struct xfs_buf
*agbp
)
507 struct xfs_agf
*agf
= agbp
->b_addr
;
508 struct xbtree_afakeroot
*afake
= cur
->bc_ag
.afake
;
510 ASSERT(cur
->bc_flags
& XFS_BTREE_STAGING
);
512 agf
->agf_roots
[cur
->bc_btnum
] = cpu_to_be32(afake
->af_root
);
513 agf
->agf_levels
[cur
->bc_btnum
] = cpu_to_be32(afake
->af_levels
);
514 agf
->agf_rmap_blocks
= cpu_to_be32(afake
->af_blocks
);
515 xfs_alloc_log_agf(tp
, agbp
, XFS_AGF_ROOTS
| XFS_AGF_LEVELS
|
516 XFS_AGF_RMAP_BLOCKS
);
517 xfs_btree_commit_afakeroot(cur
, tp
, agbp
, &xfs_rmapbt_ops
);
521 * Calculate number of records in an rmap btree block.
528 blocklen
-= XFS_RMAP_BLOCK_LEN
;
531 return blocklen
/ sizeof(struct xfs_rmap_rec
);
533 (2 * sizeof(struct xfs_rmap_key
) + sizeof(xfs_rmap_ptr_t
));
536 /* Compute the maximum height of an rmap btree. */
538 xfs_rmapbt_compute_maxlevels(
539 struct xfs_mount
*mp
)
542 * On a non-reflink filesystem, the maximum number of rmap
543 * records is the number of blocks in the AG, hence the max
544 * rmapbt height is log_$maxrecs($agblocks). However, with
545 * reflink each AG block can have up to 2^32 (per the refcount
546 * record format) owners, which means that theoretically we
547 * could face up to 2^64 rmap records.
549 * That effectively means that the max rmapbt height must be
550 * XFS_BTREE_MAXLEVELS. "Fortunately" we'll run out of AG
551 * blocks to feed the rmapbt long before the rmapbt reaches
552 * maximum height. The reflink code uses ag_resv_critical to
553 * disallow reflinking when less than 10% of the per-AG metadata
554 * block reservation since the fallback is a regular file copy.
556 if (xfs_has_reflink(mp
))
557 mp
->m_rmap_maxlevels
= XFS_BTREE_MAXLEVELS
;
559 mp
->m_rmap_maxlevels
= xfs_btree_compute_maxlevels(
560 mp
->m_rmap_mnr
, mp
->m_sb
.sb_agblocks
);
563 /* Calculate the refcount btree size for some records. */
565 xfs_rmapbt_calc_size(
566 struct xfs_mount
*mp
,
567 unsigned long long len
)
569 return xfs_btree_calc_size(mp
->m_rmap_mnr
, len
);
573 * Calculate the maximum refcount btree size.
577 struct xfs_mount
*mp
,
578 xfs_agblock_t agblocks
)
580 /* Bail out if we're uninitialized, which can happen in mkfs. */
581 if (mp
->m_rmap_mxr
[0] == 0)
584 return xfs_rmapbt_calc_size(mp
, agblocks
);
588 * Figure out how many blocks to reserve and how many are used by this btree.
591 xfs_rmapbt_calc_reserves(
592 struct xfs_mount
*mp
,
593 struct xfs_trans
*tp
,
594 struct xfs_perag
*pag
,
598 struct xfs_buf
*agbp
;
600 xfs_agblock_t agblocks
;
601 xfs_extlen_t tree_len
;
604 if (!xfs_has_rmapbt(mp
))
607 error
= xfs_alloc_read_agf(mp
, tp
, pag
->pag_agno
, 0, &agbp
);
612 agblocks
= be32_to_cpu(agf
->agf_length
);
613 tree_len
= be32_to_cpu(agf
->agf_rmap_blocks
);
614 xfs_trans_brelse(tp
, agbp
);
617 * The log is permanently allocated, so the space it occupies will
618 * never be available for the kinds of things that would require btree
619 * expansion. We therefore can pretend the space isn't there.
621 if (mp
->m_sb
.sb_logstart
&&
622 XFS_FSB_TO_AGNO(mp
, mp
->m_sb
.sb_logstart
) == pag
->pag_agno
)
623 agblocks
-= mp
->m_sb
.sb_logblocks
;
625 /* Reserve 1% of the AG or enough for 1 block per record. */
626 *ask
+= max(agblocks
/ 100, xfs_rmapbt_max_size(mp
, agblocks
));