2 * Copyright (c) 2014 Red Hat, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "libxfs_priv.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_trans.h"
30 #include "xfs_alloc.h"
31 #include "xfs_btree.h"
33 #include "xfs_rmap_btree.h"
34 #include "xfs_trace.h"
35 #include "xfs_cksum.h"
40 * This is a per-ag tree used to track the owner(s) of a given extent. With
41 * reflink it is possible for there to be multiple owners, which is a departure
42 * from classic XFS. Owner records for data extents are inserted when the
43 * extent is mapped and removed when an extent is unmapped. Owner records for
44 * all other block types (i.e. metadata) are inserted when an extent is
45 * allocated and removed when an extent is freed. There can only be one owner
46 * of a metadata extent, usually an inode or some other metadata structure like
49 * The rmap btree is part of the free space management, so blocks for the tree
50 * are sourced from the agfl. Hence we need transaction reservation support for
51 * this tree so that the freelist is always large enough. This also impacts on
52 * the minimum space we need to leave free in the AG.
54 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
55 * but it is the only way to enforce unique keys when a block can be owned by
56 * multiple files at any offset. There's no need to order/search by extent
57 * size for online updating/management of the tree. It is intended that most
58 * reverse lookups will be to find the owner(s) of a particular block, or to
59 * try to recover tree and file data from corrupt primary metadata.
62 static struct xfs_btree_cur
*
63 xfs_rmapbt_dup_cursor(
64 struct xfs_btree_cur
*cur
)
66 return xfs_rmapbt_init_cursor(cur
->bc_mp
, cur
->bc_tp
,
67 cur
->bc_private
.a
.agbp
, cur
->bc_private
.a
.agno
);
72 struct xfs_btree_cur
*cur
,
73 union xfs_btree_ptr
*ptr
,
76 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
77 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
78 xfs_agnumber_t seqno
= be32_to_cpu(agf
->agf_seqno
);
79 int btnum
= cur
->bc_btnum
;
80 struct xfs_perag
*pag
= xfs_perag_get(cur
->bc_mp
, seqno
);
84 agf
->agf_roots
[btnum
] = ptr
->s
;
85 be32_add_cpu(&agf
->agf_levels
[btnum
], inc
);
86 pag
->pagf_levels
[btnum
] += inc
;
89 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_ROOTS
| XFS_AGF_LEVELS
);
93 xfs_rmapbt_alloc_block(
94 struct xfs_btree_cur
*cur
,
95 union xfs_btree_ptr
*start
,
96 union xfs_btree_ptr
*new,
102 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ENTRY
);
104 /* Allocate the new block from the freelist. If we can't, give up. */
105 error
= xfs_alloc_get_freelist(cur
->bc_tp
, cur
->bc_private
.a
.agbp
,
108 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ERROR
);
112 trace_xfs_rmapbt_alloc_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
114 if (bno
== NULLAGBLOCK
) {
115 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
120 xfs_extent_busy_reuse(cur
->bc_mp
, cur
->bc_private
.a
.agno
, bno
, 1,
123 xfs_trans_agbtree_delta(cur
->bc_tp
, 1);
124 new->s
= cpu_to_be32(bno
);
126 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
132 xfs_rmapbt_free_block(
133 struct xfs_btree_cur
*cur
,
136 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
137 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
141 bno
= xfs_daddr_to_agbno(cur
->bc_mp
, XFS_BUF_ADDR(bp
));
142 trace_xfs_rmapbt_free_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
144 error
= xfs_alloc_put_freelist(cur
->bc_tp
, agbp
, NULL
, bno
, 1);
148 xfs_extent_busy_insert(cur
->bc_tp
, be32_to_cpu(agf
->agf_seqno
), bno
, 1,
149 XFS_EXTENT_BUSY_SKIP_DISCARD
);
150 xfs_trans_agbtree_delta(cur
->bc_tp
, -1);
156 xfs_rmapbt_get_minrecs(
157 struct xfs_btree_cur
*cur
,
160 return cur
->bc_mp
->m_rmap_mnr
[level
!= 0];
164 xfs_rmapbt_get_maxrecs(
165 struct xfs_btree_cur
*cur
,
168 return cur
->bc_mp
->m_rmap_mxr
[level
!= 0];
172 xfs_rmapbt_init_key_from_rec(
173 union xfs_btree_key
*key
,
174 union xfs_btree_rec
*rec
)
176 key
->rmap
.rm_startblock
= rec
->rmap
.rm_startblock
;
177 key
->rmap
.rm_owner
= rec
->rmap
.rm_owner
;
178 key
->rmap
.rm_offset
= rec
->rmap
.rm_offset
;
182 * The high key for a reverse mapping record can be computed by shifting
183 * the startblock and offset to the highest value that would still map
184 * to that record. In practice this means that we add blockcount-1 to
185 * the startblock for all records, and if the record is for a data/attr
186 * fork mapping, we add blockcount-1 to the offset too.
189 xfs_rmapbt_init_high_key_from_rec(
190 union xfs_btree_key
*key
,
191 union xfs_btree_rec
*rec
)
196 adj
= be32_to_cpu(rec
->rmap
.rm_blockcount
) - 1;
198 key
->rmap
.rm_startblock
= rec
->rmap
.rm_startblock
;
199 be32_add_cpu(&key
->rmap
.rm_startblock
, adj
);
200 key
->rmap
.rm_owner
= rec
->rmap
.rm_owner
;
201 key
->rmap
.rm_offset
= rec
->rmap
.rm_offset
;
202 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec
->rmap
.rm_owner
)) ||
203 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec
->rmap
.rm_offset
)))
205 off
= be64_to_cpu(key
->rmap
.rm_offset
);
206 off
= (XFS_RMAP_OFF(off
) + adj
) | (off
& ~XFS_RMAP_OFF_MASK
);
207 key
->rmap
.rm_offset
= cpu_to_be64(off
);
211 xfs_rmapbt_init_rec_from_cur(
212 struct xfs_btree_cur
*cur
,
213 union xfs_btree_rec
*rec
)
215 rec
->rmap
.rm_startblock
= cpu_to_be32(cur
->bc_rec
.r
.rm_startblock
);
216 rec
->rmap
.rm_blockcount
= cpu_to_be32(cur
->bc_rec
.r
.rm_blockcount
);
217 rec
->rmap
.rm_owner
= cpu_to_be64(cur
->bc_rec
.r
.rm_owner
);
218 rec
->rmap
.rm_offset
= cpu_to_be64(
219 xfs_rmap_irec_offset_pack(&cur
->bc_rec
.r
));
223 xfs_rmapbt_init_ptr_from_cur(
224 struct xfs_btree_cur
*cur
,
225 union xfs_btree_ptr
*ptr
)
227 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(cur
->bc_private
.a
.agbp
);
229 ASSERT(cur
->bc_private
.a
.agno
== be32_to_cpu(agf
->agf_seqno
));
230 ASSERT(agf
->agf_roots
[cur
->bc_btnum
] != 0);
232 ptr
->s
= agf
->agf_roots
[cur
->bc_btnum
];
237 struct xfs_btree_cur
*cur
,
238 union xfs_btree_key
*key
)
240 struct xfs_rmap_irec
*rec
= &cur
->bc_rec
.r
;
241 struct xfs_rmap_key
*kp
= &key
->rmap
;
245 d
= (__int64_t
)be32_to_cpu(kp
->rm_startblock
) - rec
->rm_startblock
;
249 x
= be64_to_cpu(kp
->rm_owner
);
256 x
= XFS_RMAP_OFF(be64_to_cpu(kp
->rm_offset
));
266 xfs_rmapbt_diff_two_keys(
267 struct xfs_btree_cur
*cur
,
268 union xfs_btree_key
*k1
,
269 union xfs_btree_key
*k2
)
271 struct xfs_rmap_key
*kp1
= &k1
->rmap
;
272 struct xfs_rmap_key
*kp2
= &k2
->rmap
;
276 d
= (__int64_t
)be32_to_cpu(kp1
->rm_startblock
) -
277 be32_to_cpu(kp2
->rm_startblock
);
281 x
= be64_to_cpu(kp1
->rm_owner
);
282 y
= be64_to_cpu(kp2
->rm_owner
);
288 x
= XFS_RMAP_OFF(be64_to_cpu(kp1
->rm_offset
));
289 y
= XFS_RMAP_OFF(be64_to_cpu(kp2
->rm_offset
));
301 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
302 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
303 struct xfs_perag
*pag
= bp
->b_pag
;
307 * magic number and level verification
309 * During growfs operations, we can't verify the exact level or owner as
310 * the perag is not fully initialised and hence not attached to the
311 * buffer. In this case, check against the maximum tree depth.
313 * Similarly, during log recovery we will have a perag structure
314 * attached, but the agf information will not yet have been initialised
315 * from the on disk AGF. Again, we can only check against maximum limits
318 if (block
->bb_magic
!= cpu_to_be32(XFS_RMAP_CRC_MAGIC
))
321 if (!xfs_sb_version_hasrmapbt(&mp
->m_sb
))
323 if (!xfs_btree_sblock_v5hdr_verify(bp
))
326 level
= be16_to_cpu(block
->bb_level
);
327 if (pag
&& pag
->pagf_init
) {
328 if (level
>= pag
->pagf_levels
[XFS_BTNUM_RMAPi
])
330 } else if (level
>= mp
->m_rmap_maxlevels
)
333 return xfs_btree_sblock_verify(bp
, mp
->m_rmap_mxr
[level
!= 0]);
337 xfs_rmapbt_read_verify(
340 if (!xfs_btree_sblock_verify_crc(bp
))
341 xfs_buf_ioerror(bp
, -EFSBADCRC
);
342 else if (!xfs_rmapbt_verify(bp
))
343 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
346 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
347 xfs_verifier_error(bp
);
352 xfs_rmapbt_write_verify(
355 if (!xfs_rmapbt_verify(bp
)) {
356 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
357 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
358 xfs_verifier_error(bp
);
361 xfs_btree_sblock_calc_crc(bp
);
365 const struct xfs_buf_ops xfs_rmapbt_buf_ops
= {
366 .name
= "xfs_rmapbt",
367 .verify_read
= xfs_rmapbt_read_verify
,
368 .verify_write
= xfs_rmapbt_write_verify
,
371 #if defined(DEBUG) || defined(XFS_WARN)
373 xfs_rmapbt_keys_inorder(
374 struct xfs_btree_cur
*cur
,
375 union xfs_btree_key
*k1
,
376 union xfs_btree_key
*k2
)
383 x
= be32_to_cpu(k1
->rmap
.rm_startblock
);
384 y
= be32_to_cpu(k2
->rmap
.rm_startblock
);
389 a
= be64_to_cpu(k1
->rmap
.rm_owner
);
390 b
= be64_to_cpu(k2
->rmap
.rm_owner
);
395 a
= XFS_RMAP_OFF(be64_to_cpu(k1
->rmap
.rm_offset
));
396 b
= XFS_RMAP_OFF(be64_to_cpu(k2
->rmap
.rm_offset
));
403 xfs_rmapbt_recs_inorder(
404 struct xfs_btree_cur
*cur
,
405 union xfs_btree_rec
*r1
,
406 union xfs_btree_rec
*r2
)
413 x
= be32_to_cpu(r1
->rmap
.rm_startblock
);
414 y
= be32_to_cpu(r2
->rmap
.rm_startblock
);
419 a
= be64_to_cpu(r1
->rmap
.rm_owner
);
420 b
= be64_to_cpu(r2
->rmap
.rm_owner
);
425 a
= XFS_RMAP_OFF(be64_to_cpu(r1
->rmap
.rm_offset
));
426 b
= XFS_RMAP_OFF(be64_to_cpu(r2
->rmap
.rm_offset
));
433 static const struct xfs_btree_ops xfs_rmapbt_ops
= {
434 .rec_len
= sizeof(struct xfs_rmap_rec
),
435 .key_len
= 2 * sizeof(struct xfs_rmap_key
),
437 .dup_cursor
= xfs_rmapbt_dup_cursor
,
438 .set_root
= xfs_rmapbt_set_root
,
439 .alloc_block
= xfs_rmapbt_alloc_block
,
440 .free_block
= xfs_rmapbt_free_block
,
441 .get_minrecs
= xfs_rmapbt_get_minrecs
,
442 .get_maxrecs
= xfs_rmapbt_get_maxrecs
,
443 .init_key_from_rec
= xfs_rmapbt_init_key_from_rec
,
444 .init_high_key_from_rec
= xfs_rmapbt_init_high_key_from_rec
,
445 .init_rec_from_cur
= xfs_rmapbt_init_rec_from_cur
,
446 .init_ptr_from_cur
= xfs_rmapbt_init_ptr_from_cur
,
447 .key_diff
= xfs_rmapbt_key_diff
,
448 .buf_ops
= &xfs_rmapbt_buf_ops
,
449 .diff_two_keys
= xfs_rmapbt_diff_two_keys
,
450 #if defined(DEBUG) || defined(XFS_WARN)
451 .keys_inorder
= xfs_rmapbt_keys_inorder
,
452 .recs_inorder
= xfs_rmapbt_recs_inorder
,
457 * Allocate a new allocation btree cursor.
459 struct xfs_btree_cur
*
460 xfs_rmapbt_init_cursor(
461 struct xfs_mount
*mp
,
462 struct xfs_trans
*tp
,
463 struct xfs_buf
*agbp
,
466 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
467 struct xfs_btree_cur
*cur
;
469 cur
= kmem_zone_zalloc(xfs_btree_cur_zone
, KM_NOFS
);
472 /* Overlapping btree; 2 keys per pointer. */
473 cur
->bc_btnum
= XFS_BTNUM_RMAP
;
474 cur
->bc_flags
= XFS_BTREE_CRC_BLOCKS
| XFS_BTREE_OVERLAPPING
;
475 cur
->bc_blocklog
= mp
->m_sb
.sb_blocklog
;
476 cur
->bc_ops
= &xfs_rmapbt_ops
;
477 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_RMAP
]);
479 cur
->bc_private
.a
.agbp
= agbp
;
480 cur
->bc_private
.a
.agno
= agno
;
486 * Calculate number of records in an rmap btree block.
490 struct xfs_mount
*mp
,
494 blocklen
-= XFS_RMAP_BLOCK_LEN
;
497 return blocklen
/ sizeof(struct xfs_rmap_rec
);
499 (2 * sizeof(struct xfs_rmap_key
) + sizeof(xfs_rmap_ptr_t
));
502 /* Compute the maximum height of an rmap btree. */
504 xfs_rmapbt_compute_maxlevels(
505 struct xfs_mount
*mp
)
507 mp
->m_rmap_maxlevels
= xfs_btree_compute_maxlevels(mp
,
508 mp
->m_rmap_mnr
, mp
->m_sb
.sb_agblocks
);