1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include "libxfs_priv.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_errortag.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_trace.h"
26 #include "xfs_attr_leaf.h"
27 #include "xfs_quota_defs.h"
30 #include "xfs_ag_resv.h"
31 #include "xfs_refcount.h"
32 #include "xfs_rtbitmap.h"
33 #include "xfs_health.h"
34 #include "defer_item.h"
35 #include "xfs_symlink_remote.h"
36 #include "xfs_inode_util.h"
37 #include "xfs_rtgroup.h"
39 struct kmem_cache
*xfs_bmap_intent_cache
;
42 * Miscellaneous helper functions
46 * Compute and fill in the value of the maximum depth of a bmap btree
47 * in this filesystem. Done once, during mount.
50 xfs_bmap_compute_maxlevels(
51 xfs_mount_t
*mp
, /* file system mount structure */
52 int whichfork
) /* data or attr fork */
54 uint64_t maxblocks
; /* max blocks at this level */
55 xfs_extnum_t maxleafents
; /* max leaf entries possible */
56 int level
; /* btree level */
57 int maxrootrecs
; /* max records in root block */
58 int minleafrecs
; /* min records in leaf block */
59 int minnoderecs
; /* min records in node block */
60 int sz
; /* root block size */
63 * The maximum number of extents in a fork, hence the maximum number of
64 * leaf entries, is controlled by the size of the on-disk extent count.
66 * Note that we can no longer assume that if we are in ATTR1 that the
67 * fork offset of all the inodes will be
68 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
69 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
70 * but probably at various positions. Therefore, for both ATTR1 and
71 * ATTR2 we have to assume the worst case scenario of a minimum size
74 maxleafents
= xfs_iext_max_nextents(xfs_has_large_extent_counts(mp
),
76 if (whichfork
== XFS_DATA_FORK
)
77 sz
= xfs_bmdr_space_calc(MINDBTPTRS
);
79 sz
= xfs_bmdr_space_calc(MINABTPTRS
);
81 maxrootrecs
= xfs_bmdr_maxrecs(sz
, 0);
82 minleafrecs
= mp
->m_bmap_dmnr
[0];
83 minnoderecs
= mp
->m_bmap_dmnr
[1];
84 maxblocks
= howmany_64(maxleafents
, minleafrecs
);
85 for (level
= 1; maxblocks
> 1; level
++) {
86 if (maxblocks
<= maxrootrecs
)
89 maxblocks
= howmany_64(maxblocks
, minnoderecs
);
91 mp
->m_bm_maxlevels
[whichfork
] = level
;
92 ASSERT(mp
->m_bm_maxlevels
[whichfork
] <= xfs_bmbt_maxlevels_ondisk());
96 xfs_bmap_compute_attr_offset(
99 if (mp
->m_sb
.sb_inodesize
== 256)
100 return XFS_LITINO(mp
) - xfs_bmdr_space_calc(MINABTPTRS
);
101 return xfs_bmdr_space_calc(6 * MINABTPTRS
);
104 STATIC
int /* error */
106 struct xfs_btree_cur
*cur
,
107 struct xfs_bmbt_irec
*irec
,
108 int *stat
) /* success/failure */
110 cur
->bc_rec
.b
= *irec
;
111 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
114 STATIC
int /* error */
115 xfs_bmbt_lookup_first(
116 struct xfs_btree_cur
*cur
,
117 int *stat
) /* success/failure */
119 cur
->bc_rec
.b
.br_startoff
= 0;
120 cur
->bc_rec
.b
.br_startblock
= 0;
121 cur
->bc_rec
.b
.br_blockcount
= 0;
122 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
126 * Check if the inode needs to be converted to btree format.
128 static inline bool xfs_bmap_needs_btree(struct xfs_inode
*ip
, int whichfork
)
130 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
132 return whichfork
!= XFS_COW_FORK
&&
133 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
134 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
);
138 * Check if the inode should be converted to extent format.
140 static inline bool xfs_bmap_wants_extents(struct xfs_inode
*ip
, int whichfork
)
142 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
144 return whichfork
!= XFS_COW_FORK
&&
145 ifp
->if_format
== XFS_DINODE_FMT_BTREE
&&
146 ifp
->if_nextents
<= XFS_IFORK_MAXEXT(ip
, whichfork
);
150 * Update the record referred to by cur to the value given by irec
151 * This either works (return 0) or gets an EFSCORRUPTED error.
155 struct xfs_btree_cur
*cur
,
156 struct xfs_bmbt_irec
*irec
)
158 union xfs_btree_rec rec
;
160 xfs_bmbt_disk_set_all(&rec
.bmbt
, irec
);
161 return xfs_btree_update(cur
, &rec
);
165 * Compute the worst-case number of indirect blocks that will be used
166 * for ip's delayed extent of length "len".
169 xfs_bmap_worst_indlen(
170 struct xfs_inode
*ip
, /* incore inode pointer */
171 xfs_filblks_t len
) /* delayed extent length */
173 struct xfs_mount
*mp
= ip
->i_mount
;
174 int maxrecs
= mp
->m_bmap_dmxr
[0];
178 for (level
= 0, rval
= 0;
179 level
< XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
);
182 do_div(len
, maxrecs
);
185 return rval
+ XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
) -
188 maxrecs
= mp
->m_bmap_dmxr
[1];
194 * Calculate the default attribute fork offset for newly created inodes.
197 xfs_default_attroffset(
198 struct xfs_inode
*ip
)
200 if (ip
->i_df
.if_format
== XFS_DINODE_FMT_DEV
)
201 return roundup(sizeof(xfs_dev_t
), 8);
202 return M_IGEO(ip
->i_mount
)->attr_fork_offset
;
206 * Helper routine to reset inode i_forkoff field when switching attribute fork
207 * from local to extent format - we reset it where possible to make space
208 * available for inline data fork extents.
211 xfs_bmap_forkoff_reset(
215 if (whichfork
== XFS_ATTR_FORK
&&
216 ip
->i_df
.if_format
!= XFS_DINODE_FMT_DEV
&&
217 ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
) {
218 uint dfl_forkoff
= xfs_default_attroffset(ip
) >> 3;
220 if (dfl_forkoff
> ip
->i_forkoff
)
221 ip
->i_forkoff
= dfl_forkoff
;
227 struct xfs_mount
*mp
, /* file system mount point */
228 struct xfs_trans
*tp
, /* transaction pointer */
229 xfs_fsblock_t fsbno
, /* file system block number */
230 struct xfs_buf
**bpp
) /* buffer for fsbno */
232 struct xfs_buf
*bp
; /* return value */
235 if (!xfs_verify_fsbno(mp
, fsbno
))
236 return -EFSCORRUPTED
;
237 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
238 XFS_FSB_TO_DADDR(mp
, fsbno
), mp
->m_bsize
, 0, &bp
,
241 xfs_buf_set_ref(bp
, XFS_BMAP_BTREE_REF
);
248 STATIC
struct xfs_buf
*
250 struct xfs_btree_cur
*cur
,
253 struct xfs_log_item
*lip
;
259 for (i
= 0; i
< cur
->bc_maxlevels
; i
++) {
260 if (!cur
->bc_levels
[i
].bp
)
262 if (xfs_buf_daddr(cur
->bc_levels
[i
].bp
) == bno
)
263 return cur
->bc_levels
[i
].bp
;
266 /* Chase down all the log items to see if the bp is there */
267 list_for_each_entry(lip
, &cur
->bc_tp
->t_items
, li_trans
) {
268 struct xfs_buf_log_item
*bip
= (struct xfs_buf_log_item
*)lip
;
270 if (bip
->bli_item
.li_type
== XFS_LI_BUF
&&
271 xfs_buf_daddr(bip
->bli_buf
) == bno
)
280 struct xfs_btree_block
*block
,
286 __be64
*pp
, *thispa
; /* pointer to block address */
287 xfs_bmbt_key_t
*prevp
, *keyp
;
289 ASSERT(be16_to_cpu(block
->bb_level
) > 0);
292 for( i
= 1; i
<= xfs_btree_get_numrecs(block
); i
++) {
293 dmxr
= mp
->m_bmap_dmxr
[0];
294 keyp
= xfs_bmbt_key_addr(mp
, block
, i
);
297 ASSERT(be64_to_cpu(prevp
->br_startoff
) <
298 be64_to_cpu(keyp
->br_startoff
));
303 * Compare the block numbers to see if there are dups.
306 pp
= xfs_bmap_broot_ptr_addr(mp
, block
, i
, sz
);
308 pp
= xfs_bmbt_ptr_addr(mp
, block
, i
, dmxr
);
310 for (j
= i
+1; j
<= be16_to_cpu(block
->bb_numrecs
); j
++) {
312 thispa
= xfs_bmap_broot_ptr_addr(mp
, block
, j
, sz
);
314 thispa
= xfs_bmbt_ptr_addr(mp
, block
, j
, dmxr
);
315 if (*thispa
== *pp
) {
316 xfs_warn(mp
, "%s: thispa(%d) == pp(%d) %lld",
318 (unsigned long long)be64_to_cpu(*thispa
));
319 xfs_err(mp
, "%s: ptrs are equal in node\n",
321 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
328 * Check that the extents for the inode ip are in the right order in all
329 * btree leaves. THis becomes prohibitively expensive for large extent count
330 * files, so don't bother with inodes that have more than 10,000 extents in
331 * them. The btree record ordering checks will still be done, so for such large
332 * bmapbt constructs that is going to catch most corruptions.
335 xfs_bmap_check_leaf_extents(
336 struct xfs_btree_cur
*cur
, /* btree cursor or null */
337 xfs_inode_t
*ip
, /* incore inode pointer */
338 int whichfork
) /* data or attr fork */
340 struct xfs_mount
*mp
= ip
->i_mount
;
341 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
342 struct xfs_btree_block
*block
; /* current btree block */
343 xfs_fsblock_t bno
; /* block # of "block" */
344 struct xfs_buf
*bp
; /* buffer for "block" */
345 int error
; /* error return value */
346 xfs_extnum_t i
=0, j
; /* index into the extents list */
347 int level
; /* btree level, for checking */
348 __be64
*pp
; /* pointer to block address */
349 xfs_bmbt_rec_t
*ep
; /* pointer to current extent */
350 xfs_bmbt_rec_t last
= {0, 0}; /* last extent in prev block */
351 xfs_bmbt_rec_t
*nextp
; /* pointer to next extent */
354 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
357 /* skip large extent count inodes */
358 if (ip
->i_df
.if_nextents
> 10000)
362 block
= ifp
->if_broot
;
364 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
366 level
= be16_to_cpu(block
->bb_level
);
368 xfs_check_block(block
, mp
, 1, ifp
->if_broot_bytes
);
369 pp
= xfs_bmap_broot_ptr_addr(mp
, block
, 1, ifp
->if_broot_bytes
);
370 bno
= be64_to_cpu(*pp
);
372 ASSERT(bno
!= NULLFSBLOCK
);
373 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
374 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
377 * Go down the tree until leaf level is reached, following the first
378 * pointer (leftmost) at each level.
380 while (level
-- > 0) {
381 /* See if buf is in cur first */
383 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
386 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
387 if (xfs_metadata_is_sick(error
))
388 xfs_btree_mark_sick(cur
);
392 block
= XFS_BUF_TO_BLOCK(bp
);
397 * Check this block for basic sanity (increasing keys and
398 * no duplicate blocks).
401 xfs_check_block(block
, mp
, 0, 0);
402 pp
= xfs_bmbt_ptr_addr(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
403 bno
= be64_to_cpu(*pp
);
404 if (XFS_IS_CORRUPT(mp
, !xfs_verify_fsbno(mp
, bno
))) {
405 xfs_btree_mark_sick(cur
);
406 error
= -EFSCORRUPTED
;
411 xfs_trans_brelse(NULL
, bp
);
416 * Here with bp and block set to the leftmost leaf node in the tree.
421 * Loop over all leaf nodes checking that all extents are in the right order.
424 xfs_fsblock_t nextbno
;
425 xfs_extnum_t num_recs
;
428 num_recs
= xfs_btree_get_numrecs(block
);
431 * Read-ahead the next leaf block, if any.
434 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
437 * Check all the extents to make sure they are OK.
438 * If we had a previous block, the last entry should
439 * conform with the first entry in this one.
442 ep
= xfs_bmbt_rec_addr(mp
, block
, 1);
444 ASSERT(xfs_bmbt_disk_get_startoff(&last
) +
445 xfs_bmbt_disk_get_blockcount(&last
) <=
446 xfs_bmbt_disk_get_startoff(ep
));
448 for (j
= 1; j
< num_recs
; j
++) {
449 nextp
= xfs_bmbt_rec_addr(mp
, block
, j
+ 1);
450 ASSERT(xfs_bmbt_disk_get_startoff(ep
) +
451 xfs_bmbt_disk_get_blockcount(ep
) <=
452 xfs_bmbt_disk_get_startoff(nextp
));
460 xfs_trans_brelse(NULL
, bp
);
464 * If we've reached the end, stop.
466 if (bno
== NULLFSBLOCK
)
470 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
473 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
474 if (xfs_metadata_is_sick(error
))
475 xfs_btree_mark_sick(cur
);
479 block
= XFS_BUF_TO_BLOCK(bp
);
485 xfs_warn(mp
, "%s: at error0", __func__
);
487 xfs_trans_brelse(NULL
, bp
);
489 xfs_warn(mp
, "%s: BAD after btree leaves for %llu extents",
491 xfs_err(mp
, "%s: CORRUPTED BTREE OR SOMETHING", __func__
);
492 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
497 * Validate that the bmbt_irecs being returned from bmapi are valid
498 * given the caller's original parameters. Specifically check the
499 * ranges of the returned irecs to ensure that they only extend beyond
500 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
503 xfs_bmap_validate_ret(
507 xfs_bmbt_irec_t
*mval
,
511 int i
; /* index to map values */
513 ASSERT(ret_nmap
<= nmap
);
515 for (i
= 0; i
< ret_nmap
; i
++) {
516 ASSERT(mval
[i
].br_blockcount
> 0);
517 if (!(flags
& XFS_BMAPI_ENTIRE
)) {
518 ASSERT(mval
[i
].br_startoff
>= bno
);
519 ASSERT(mval
[i
].br_blockcount
<= len
);
520 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
<=
523 ASSERT(mval
[i
].br_startoff
< bno
+ len
);
524 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
>
528 mval
[i
- 1].br_startoff
+ mval
[i
- 1].br_blockcount
==
529 mval
[i
].br_startoff
);
530 ASSERT(mval
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
531 mval
[i
].br_startblock
!= HOLESTARTBLOCK
);
532 ASSERT(mval
[i
].br_state
== XFS_EXT_NORM
||
533 mval
[i
].br_state
== XFS_EXT_UNWRITTEN
);
538 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
539 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
543 * Inode fork format manipulation functions
547 * Convert the inode format to extent format if it currently is in btree format,
548 * but the extent list is small enough that it fits into the extent format.
550 * Since the extents are already in-core, all we have to do is give up the space
551 * for the btree root and pitch the leaf block.
553 STATIC
int /* error */
554 xfs_bmap_btree_to_extents(
555 struct xfs_trans
*tp
, /* transaction pointer */
556 struct xfs_inode
*ip
, /* incore inode pointer */
557 struct xfs_btree_cur
*cur
, /* btree cursor */
558 int *logflagsp
, /* inode logging flags */
559 int whichfork
) /* data or attr fork */
561 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
562 struct xfs_mount
*mp
= ip
->i_mount
;
563 struct xfs_btree_block
*rblock
= ifp
->if_broot
;
564 struct xfs_btree_block
*cblock
;/* child btree block */
565 xfs_fsblock_t cbno
; /* child block number */
566 struct xfs_buf
*cbp
; /* child block's buffer */
567 int error
; /* error return value */
568 __be64
*pp
; /* ptr to block address */
569 struct xfs_owner_info oinfo
;
571 /* check if we actually need the extent format first: */
572 if (!xfs_bmap_wants_extents(ip
, whichfork
))
576 ASSERT(whichfork
!= XFS_COW_FORK
);
577 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
578 ASSERT(be16_to_cpu(rblock
->bb_level
) == 1);
579 ASSERT(be16_to_cpu(rblock
->bb_numrecs
) == 1);
580 ASSERT(xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, false) == 1);
582 pp
= xfs_bmap_broot_ptr_addr(mp
, rblock
, 1, ifp
->if_broot_bytes
);
583 cbno
= be64_to_cpu(*pp
);
585 if (XFS_IS_CORRUPT(cur
->bc_mp
, !xfs_verify_fsbno(mp
, cbno
))) {
586 xfs_btree_mark_sick(cur
);
587 return -EFSCORRUPTED
;
590 error
= xfs_bmap_read_buf(mp
, tp
, cbno
, &cbp
);
591 if (xfs_metadata_is_sick(error
))
592 xfs_btree_mark_sick(cur
);
595 cblock
= XFS_BUF_TO_BLOCK(cbp
);
596 if ((error
= xfs_btree_check_block(cur
, cblock
, 0, cbp
)))
599 xfs_rmap_ino_bmbt_owner(&oinfo
, ip
->i_ino
, whichfork
);
600 error
= xfs_free_extent_later(cur
->bc_tp
, cbno
, 1, &oinfo
,
601 XFS_AG_RESV_NONE
, 0);
606 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
607 xfs_trans_binval(tp
, cbp
);
608 if (cur
->bc_levels
[0].bp
== cbp
)
609 cur
->bc_levels
[0].bp
= NULL
;
610 xfs_bmap_broot_realloc(ip
, whichfork
, 0);
611 ASSERT(ifp
->if_broot
== NULL
);
612 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
613 *logflagsp
|= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
618 * Convert an extents-format file into a btree-format file.
619 * The new file will have a root block (in the inode) and a single child block.
621 STATIC
int /* error */
622 xfs_bmap_extents_to_btree(
623 struct xfs_trans
*tp
, /* transaction pointer */
624 struct xfs_inode
*ip
, /* incore inode pointer */
625 struct xfs_btree_cur
**curp
, /* cursor returned to caller */
626 int wasdel
, /* converting a delayed alloc */
627 int *logflagsp
, /* inode logging flags */
628 int whichfork
) /* data or attr fork */
630 struct xfs_btree_block
*ablock
; /* allocated (child) bt block */
631 struct xfs_buf
*abp
; /* buffer for ablock */
632 struct xfs_alloc_arg args
; /* allocation arguments */
633 struct xfs_bmbt_rec
*arp
; /* child record pointer */
634 struct xfs_btree_block
*block
; /* btree root block */
635 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
636 int error
; /* error return value */
637 struct xfs_ifork
*ifp
; /* inode fork pointer */
638 struct xfs_bmbt_key
*kp
; /* root block key pointer */
639 struct xfs_mount
*mp
; /* mount structure */
640 xfs_bmbt_ptr_t
*pp
; /* root block address pointer */
641 struct xfs_iext_cursor icur
;
642 struct xfs_bmbt_irec rec
;
643 xfs_extnum_t cnt
= 0;
646 ASSERT(whichfork
!= XFS_COW_FORK
);
647 ifp
= xfs_ifork_ptr(ip
, whichfork
);
648 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
);
651 * Make space in the inode incore. This needs to be undone if we fail
652 * to expand the root.
654 block
= xfs_bmap_broot_realloc(ip
, whichfork
, 1);
659 xfs_bmbt_init_block(ip
, block
, NULL
, 1, 1);
661 * Need a cursor. Can't allocate until bb_level is filled in.
663 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
665 cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
667 * Convert to a btree with two levels, one record in root.
669 ifp
->if_format
= XFS_DINODE_FMT_BTREE
;
670 memset(&args
, 0, sizeof(args
));
673 xfs_rmap_ino_bmbt_owner(&args
.oinfo
, ip
->i_ino
, whichfork
);
675 args
.minlen
= args
.maxlen
= args
.prod
= 1;
676 args
.wasdel
= wasdel
;
678 error
= xfs_alloc_vextent_start_ag(&args
,
679 XFS_INO_TO_FSB(mp
, ip
->i_ino
));
681 goto out_root_realloc
;
684 * Allocation can't fail, the space was reserved.
686 if (WARN_ON_ONCE(args
.fsbno
== NULLFSBLOCK
)) {
688 goto out_root_realloc
;
691 cur
->bc_bmap
.allocated
++;
693 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
694 error
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
695 XFS_FSB_TO_DADDR(mp
, args
.fsbno
),
696 mp
->m_bsize
, 0, &abp
);
698 goto out_unreserve_dquot
;
701 * Fill in the child block.
703 ablock
= XFS_BUF_TO_BLOCK(abp
);
704 xfs_bmbt_init_block(ip
, ablock
, abp
, 0, 0);
706 for_each_xfs_iext(ifp
, &icur
, &rec
) {
707 if (isnullstartblock(rec
.br_startblock
))
709 arp
= xfs_bmbt_rec_addr(mp
, ablock
, 1 + cnt
);
710 xfs_bmbt_disk_set_all(arp
, &rec
);
713 ASSERT(cnt
== ifp
->if_nextents
);
714 xfs_btree_set_numrecs(ablock
, cnt
);
717 * Fill in the root key and pointer.
719 kp
= xfs_bmbt_key_addr(mp
, block
, 1);
720 arp
= xfs_bmbt_rec_addr(mp
, ablock
, 1);
721 kp
->br_startoff
= cpu_to_be64(xfs_bmbt_disk_get_startoff(arp
));
722 pp
= xfs_bmbt_ptr_addr(mp
, block
, 1, xfs_bmbt_get_maxrecs(cur
,
723 be16_to_cpu(block
->bb_level
)));
724 *pp
= cpu_to_be64(args
.fsbno
);
727 * Do all this logging at the end so that
728 * the root is at the right level.
730 xfs_btree_log_block(cur
, abp
, XFS_BB_ALL_BITS
);
731 xfs_btree_log_recs(cur
, abp
, 1, be16_to_cpu(ablock
->bb_numrecs
));
732 ASSERT(*curp
== NULL
);
734 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fbroot(whichfork
);
738 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
740 xfs_bmap_broot_realloc(ip
, whichfork
, 0);
741 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
742 ASSERT(ifp
->if_broot
== NULL
);
743 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
749 * Convert a local file to an extents file.
750 * This code is out of bounds for data forks of regular files,
751 * since the file data needs to get logged so things will stay consistent.
752 * (The bmap-level manipulations are ok, though).
755 xfs_bmap_local_to_extents_empty(
756 struct xfs_trans
*tp
,
757 struct xfs_inode
*ip
,
760 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
762 ASSERT(whichfork
!= XFS_COW_FORK
);
763 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
764 ASSERT(ifp
->if_bytes
== 0);
765 ASSERT(ifp
->if_nextents
== 0);
767 xfs_bmap_forkoff_reset(ip
, whichfork
);
770 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
771 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
776 xfs_bmap_local_to_extents(
777 xfs_trans_t
*tp
, /* transaction pointer */
778 xfs_inode_t
*ip
, /* incore inode pointer */
779 xfs_extlen_t total
, /* total blocks needed by transaction */
780 int *logflagsp
, /* inode logging flags */
782 void (*init_fn
)(struct xfs_trans
*tp
,
784 struct xfs_inode
*ip
,
785 struct xfs_ifork
*ifp
, void *priv
),
789 int flags
; /* logging flags returned */
790 struct xfs_ifork
*ifp
; /* inode fork pointer */
791 xfs_alloc_arg_t args
; /* allocation arguments */
792 struct xfs_buf
*bp
; /* buffer for extent block */
793 struct xfs_bmbt_irec rec
;
794 struct xfs_iext_cursor icur
;
797 * We don't want to deal with the case of keeping inode data inline yet.
798 * So sending the data fork of a regular inode is invalid.
800 ASSERT(!(S_ISREG(VFS_I(ip
)->i_mode
) && whichfork
== XFS_DATA_FORK
));
801 ifp
= xfs_ifork_ptr(ip
, whichfork
);
802 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
804 if (!ifp
->if_bytes
) {
805 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
806 flags
= XFS_ILOG_CORE
;
812 memset(&args
, 0, sizeof(args
));
814 args
.mp
= ip
->i_mount
;
816 args
.minlen
= args
.maxlen
= args
.prod
= 1;
817 xfs_rmap_ino_owner(&args
.oinfo
, ip
->i_ino
, whichfork
, 0);
820 * Allocate a block. We know we need only one, since the
821 * file currently fits in an inode.
824 args
.minlen
= args
.maxlen
= args
.prod
= 1;
825 error
= xfs_alloc_vextent_start_ag(&args
,
826 XFS_INO_TO_FSB(args
.mp
, ip
->i_ino
));
830 /* Can't fail, the space was reserved. */
831 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
832 ASSERT(args
.len
== 1);
833 error
= xfs_trans_get_buf(tp
, args
.mp
->m_ddev_targp
,
834 XFS_FSB_TO_DADDR(args
.mp
, args
.fsbno
),
835 args
.mp
->m_bsize
, 0, &bp
);
840 * Initialize the block, copy the data and log the remote buffer.
842 * The callout is responsible for logging because the remote format
843 * might differ from the local format and thus we don't know how much to
844 * log here. Note that init_fn must also set the buffer log item type
847 init_fn(tp
, bp
, ip
, ifp
, priv
);
849 /* account for the change in fork size */
850 xfs_idata_realloc(ip
, -ifp
->if_bytes
, whichfork
);
851 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
852 flags
|= XFS_ILOG_CORE
;
858 rec
.br_startblock
= args
.fsbno
;
859 rec
.br_blockcount
= 1;
860 rec
.br_state
= XFS_EXT_NORM
;
861 xfs_iext_first(ifp
, &icur
);
862 xfs_iext_insert(ip
, &icur
, &rec
, 0);
864 ifp
->if_nextents
= 1;
866 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
867 flags
|= xfs_ilog_fext(whichfork
);
875 * Called from xfs_bmap_add_attrfork to handle btree format files.
877 STATIC
int /* error */
878 xfs_bmap_add_attrfork_btree(
879 xfs_trans_t
*tp
, /* transaction pointer */
880 xfs_inode_t
*ip
, /* incore inode pointer */
881 int *flags
) /* inode logging flags */
883 struct xfs_btree_block
*block
= ip
->i_df
.if_broot
;
884 struct xfs_btree_cur
*cur
; /* btree cursor */
885 int error
; /* error return value */
886 xfs_mount_t
*mp
; /* file system mount struct */
887 int stat
; /* newroot status */
891 if (xfs_bmap_bmdr_space(block
) <= xfs_inode_data_fork_size(ip
))
892 *flags
|= XFS_ILOG_DBROOT
;
894 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
895 error
= xfs_bmbt_lookup_first(cur
, &stat
);
898 /* must be at least one entry */
899 if (XFS_IS_CORRUPT(mp
, stat
!= 1)) {
900 xfs_btree_mark_sick(cur
);
901 error
= -EFSCORRUPTED
;
904 if ((error
= xfs_btree_new_iroot(cur
, flags
, &stat
)))
907 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
910 cur
->bc_bmap
.allocated
= 0;
911 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
915 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
920 * Called from xfs_bmap_add_attrfork to handle extents format files.
922 STATIC
int /* error */
923 xfs_bmap_add_attrfork_extents(
924 struct xfs_trans
*tp
, /* transaction pointer */
925 struct xfs_inode
*ip
, /* incore inode pointer */
926 int *flags
) /* inode logging flags */
928 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
929 int error
; /* error return value */
931 if (ip
->i_df
.if_nextents
* sizeof(struct xfs_bmbt_rec
) <=
932 xfs_inode_data_fork_size(ip
))
935 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0, flags
,
938 cur
->bc_bmap
.allocated
= 0;
939 xfs_btree_del_cursor(cur
, error
);
945 * Called from xfs_bmap_add_attrfork to handle local format files. Each
946 * different data fork content type needs a different callout to do the
947 * conversion. Some are basic and only require special block initialisation
948 * callouts for the data formating, others (directories) are so specialised they
949 * handle everything themselves.
951 * XXX (dgc): investigate whether directory conversion can use the generic
952 * formatting callout. It should be possible - it's just a very complex
955 STATIC
int /* error */
956 xfs_bmap_add_attrfork_local(
957 struct xfs_trans
*tp
, /* transaction pointer */
958 struct xfs_inode
*ip
, /* incore inode pointer */
959 int *flags
) /* inode logging flags */
961 struct xfs_da_args dargs
; /* args for dir/attr code */
963 if (ip
->i_df
.if_bytes
<= xfs_inode_data_fork_size(ip
))
966 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
967 memset(&dargs
, 0, sizeof(dargs
));
968 dargs
.geo
= ip
->i_mount
->m_dir_geo
;
970 dargs
.total
= dargs
.geo
->fsbcount
;
971 dargs
.whichfork
= XFS_DATA_FORK
;
973 dargs
.owner
= ip
->i_ino
;
974 return xfs_dir2_sf_to_block(&dargs
);
977 if (S_ISLNK(VFS_I(ip
)->i_mode
))
978 return xfs_bmap_local_to_extents(tp
, ip
, 1, flags
,
979 XFS_DATA_FORK
, xfs_symlink_local_to_remote
,
982 /* should only be called for types that support local format data */
984 xfs_bmap_mark_sick(ip
, XFS_ATTR_FORK
);
985 return -EFSCORRUPTED
;
989 * Set an inode attr fork offset based on the format of the data fork.
992 xfs_bmap_set_attrforkoff(
993 struct xfs_inode
*ip
,
997 int default_size
= xfs_default_attroffset(ip
) >> 3;
999 switch (ip
->i_df
.if_format
) {
1000 case XFS_DINODE_FMT_DEV
:
1001 ip
->i_forkoff
= default_size
;
1003 case XFS_DINODE_FMT_LOCAL
:
1004 case XFS_DINODE_FMT_EXTENTS
:
1005 case XFS_DINODE_FMT_BTREE
:
1006 ip
->i_forkoff
= xfs_attr_shortform_bytesfit(ip
, size
);
1008 ip
->i_forkoff
= default_size
;
1009 else if (xfs_has_attr2(ip
->i_mount
) && version
)
1021 * Convert inode from non-attributed to attributed. Caller must hold the
1022 * ILOCK_EXCL and the file cannot have an attr fork.
1024 int /* error code */
1025 xfs_bmap_add_attrfork(
1026 struct xfs_trans
*tp
,
1027 struct xfs_inode
*ip
, /* incore inode pointer */
1028 int size
, /* space new attribute needs */
1029 int rsvd
) /* xact may use reserved blks */
1031 struct xfs_mount
*mp
= tp
->t_mountp
;
1032 int version
= 1; /* superblock attr version */
1033 int logflags
; /* logging flags */
1034 int error
; /* error return value */
1036 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1037 if (!xfs_is_metadir_inode(ip
))
1038 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1039 ASSERT(!xfs_inode_has_attr_fork(ip
));
1041 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1042 error
= xfs_bmap_set_attrforkoff(ip
, size
, &version
);
1046 xfs_ifork_init_attr(ip
, XFS_DINODE_FMT_EXTENTS
, 0);
1048 switch (ip
->i_df
.if_format
) {
1049 case XFS_DINODE_FMT_LOCAL
:
1050 error
= xfs_bmap_add_attrfork_local(tp
, ip
, &logflags
);
1052 case XFS_DINODE_FMT_EXTENTS
:
1053 error
= xfs_bmap_add_attrfork_extents(tp
, ip
, &logflags
);
1055 case XFS_DINODE_FMT_BTREE
:
1056 error
= xfs_bmap_add_attrfork_btree(tp
, ip
, &logflags
);
1063 xfs_trans_log_inode(tp
, ip
, logflags
);
1066 if (!xfs_has_attr(mp
) ||
1067 (!xfs_has_attr2(mp
) && version
== 2)) {
1068 bool log_sb
= false;
1070 spin_lock(&mp
->m_sb_lock
);
1071 if (!xfs_has_attr(mp
)) {
1075 if (!xfs_has_attr2(mp
) && version
== 2) {
1079 spin_unlock(&mp
->m_sb_lock
);
1088 * Internal and external extent tree search functions.
1091 struct xfs_iread_state
{
1092 struct xfs_iext_cursor icur
;
1093 xfs_extnum_t loaded
;
1097 xfs_bmap_complain_bad_rec(
1098 struct xfs_inode
*ip
,
1101 const struct xfs_bmbt_irec
*irec
)
1103 struct xfs_mount
*mp
= ip
->i_mount
;
1104 const char *forkname
;
1106 switch (whichfork
) {
1107 case XFS_DATA_FORK
: forkname
= "data"; break;
1108 case XFS_ATTR_FORK
: forkname
= "attr"; break;
1109 case XFS_COW_FORK
: forkname
= "CoW"; break;
1110 default: forkname
= "???"; break;
1114 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1115 ip
->i_ino
, forkname
, fa
);
1117 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1118 irec
->br_startoff
, irec
->br_startblock
, irec
->br_blockcount
,
1121 return -EFSCORRUPTED
;
1124 /* Stuff every bmbt record from this block into the incore extent map. */
1126 xfs_iread_bmbt_block(
1127 struct xfs_btree_cur
*cur
,
1131 struct xfs_iread_state
*ir
= priv
;
1132 struct xfs_mount
*mp
= cur
->bc_mp
;
1133 struct xfs_inode
*ip
= cur
->bc_ino
.ip
;
1134 struct xfs_btree_block
*block
;
1136 struct xfs_bmbt_rec
*frp
;
1137 xfs_extnum_t num_recs
;
1139 int whichfork
= cur
->bc_ino
.whichfork
;
1140 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1142 block
= xfs_btree_get_block(cur
, level
, &bp
);
1144 /* Abort if we find more records than nextents. */
1145 num_recs
= xfs_btree_get_numrecs(block
);
1146 if (unlikely(ir
->loaded
+ num_recs
> ifp
->if_nextents
)) {
1147 xfs_warn(ip
->i_mount
, "corrupt dinode %llu, (btree extents).",
1148 (unsigned long long)ip
->i_ino
);
1149 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
, __func__
, block
,
1150 sizeof(*block
), __this_address
);
1151 xfs_bmap_mark_sick(ip
, whichfork
);
1152 return -EFSCORRUPTED
;
1155 /* Copy records into the incore cache. */
1156 frp
= xfs_bmbt_rec_addr(mp
, block
, 1);
1157 for (j
= 0; j
< num_recs
; j
++, frp
++, ir
->loaded
++) {
1158 struct xfs_bmbt_irec
new;
1161 xfs_bmbt_disk_get_all(frp
, &new);
1162 fa
= xfs_bmap_validate_extent(ip
, whichfork
, &new);
1164 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
,
1165 "xfs_iread_extents(2)", frp
,
1167 xfs_bmap_mark_sick(ip
, whichfork
);
1168 return xfs_bmap_complain_bad_rec(ip
, whichfork
, fa
,
1171 xfs_iext_insert(ip
, &ir
->icur
, &new,
1172 xfs_bmap_fork_to_state(whichfork
));
1173 trace_xfs_read_extent(ip
, &ir
->icur
,
1174 xfs_bmap_fork_to_state(whichfork
), _THIS_IP_
);
1175 xfs_iext_next(ifp
, &ir
->icur
);
1182 * Read in extents from a btree-format inode.
1186 struct xfs_trans
*tp
,
1187 struct xfs_inode
*ip
,
1190 struct xfs_iread_state ir
;
1191 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1192 struct xfs_mount
*mp
= ip
->i_mount
;
1193 struct xfs_btree_cur
*cur
;
1196 if (!xfs_need_iread_extents(ifp
))
1199 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1202 xfs_iext_first(ifp
, &ir
.icur
);
1203 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
1204 error
= xfs_btree_visit_blocks(cur
, xfs_iread_bmbt_block
,
1205 XFS_BTREE_VISIT_RECORDS
, &ir
);
1206 xfs_btree_del_cursor(cur
, error
);
1210 if (XFS_IS_CORRUPT(mp
, ir
.loaded
!= ifp
->if_nextents
)) {
1211 xfs_bmap_mark_sick(ip
, whichfork
);
1212 error
= -EFSCORRUPTED
;
1215 ASSERT(ir
.loaded
== xfs_iext_count(ifp
));
1217 * Use release semantics so that we can use acquire semantics in
1218 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1221 smp_store_release(&ifp
->if_needextents
, 0);
1224 if (xfs_metadata_is_sick(error
))
1225 xfs_bmap_mark_sick(ip
, whichfork
);
1226 xfs_iext_destroy(ifp
);
1231 * Returns the relative block number of the first unused block(s) in the given
1232 * fork with at least "len" logically contiguous blocks free. This is the
1233 * lowest-address hole if the fork has holes, else the first block past the end
1234 * of fork. Return 0 if the fork is currently local (in-inode).
1237 xfs_bmap_first_unused(
1238 struct xfs_trans
*tp
, /* transaction pointer */
1239 struct xfs_inode
*ip
, /* incore inode */
1240 xfs_extlen_t len
, /* size of hole to find */
1241 xfs_fileoff_t
*first_unused
, /* unused block */
1242 int whichfork
) /* data or attr fork */
1244 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1245 struct xfs_bmbt_irec got
;
1246 struct xfs_iext_cursor icur
;
1247 xfs_fileoff_t lastaddr
= 0;
1248 xfs_fileoff_t lowest
, max
;
1251 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
) {
1256 ASSERT(xfs_ifork_has_extents(ifp
));
1258 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1262 lowest
= max
= *first_unused
;
1263 for_each_xfs_iext(ifp
, &icur
, &got
) {
1265 * See if the hole before this extent will work.
1267 if (got
.br_startoff
>= lowest
+ len
&&
1268 got
.br_startoff
- max
>= len
)
1270 lastaddr
= got
.br_startoff
+ got
.br_blockcount
;
1271 max
= XFS_FILEOFF_MAX(lastaddr
, lowest
);
1274 *first_unused
= max
;
1279 * Returns the file-relative block number of the last block - 1 before
1280 * last_block (input value) in the file.
1281 * This is not based on i_size, it is based on the extent records.
1282 * Returns 0 for local files, as they do not have extent records.
1285 xfs_bmap_last_before(
1286 struct xfs_trans
*tp
, /* transaction pointer */
1287 struct xfs_inode
*ip
, /* incore inode */
1288 xfs_fileoff_t
*last_block
, /* last block */
1289 int whichfork
) /* data or attr fork */
1291 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1292 struct xfs_bmbt_irec got
;
1293 struct xfs_iext_cursor icur
;
1296 switch (ifp
->if_format
) {
1297 case XFS_DINODE_FMT_LOCAL
:
1300 case XFS_DINODE_FMT_BTREE
:
1301 case XFS_DINODE_FMT_EXTENTS
:
1305 xfs_bmap_mark_sick(ip
, whichfork
);
1306 return -EFSCORRUPTED
;
1309 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1313 if (!xfs_iext_lookup_extent_before(ip
, ifp
, last_block
, &icur
, &got
))
1319 xfs_bmap_last_extent(
1320 struct xfs_trans
*tp
,
1321 struct xfs_inode
*ip
,
1323 struct xfs_bmbt_irec
*rec
,
1326 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1327 struct xfs_iext_cursor icur
;
1330 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1334 xfs_iext_last(ifp
, &icur
);
1335 if (!xfs_iext_get_extent(ifp
, &icur
, rec
))
1343 * Check the last inode extent to determine whether this allocation will result
1344 * in blocks being allocated at the end of the file. When we allocate new data
1345 * blocks at the end of the file which do not start at the previous data block,
1346 * we will try to align the new blocks at stripe unit boundaries.
1348 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1349 * at, or past the EOF.
1353 struct xfs_bmalloca
*bma
,
1356 struct xfs_bmbt_irec rec
;
1361 error
= xfs_bmap_last_extent(NULL
, bma
->ip
, whichfork
, &rec
,
1372 * Check if we are allocation or past the last extent, or at least into
1373 * the last delayed allocated extent.
1375 bma
->aeof
= bma
->offset
>= rec
.br_startoff
+ rec
.br_blockcount
||
1376 (bma
->offset
>= rec
.br_startoff
&&
1377 isnullstartblock(rec
.br_startblock
));
1382 * Returns the file-relative block number of the first block past eof in
1383 * the file. This is not based on i_size, it is based on the extent records.
1384 * Returns 0 for local files, as they do not have extent records.
1387 xfs_bmap_last_offset(
1388 struct xfs_inode
*ip
,
1389 xfs_fileoff_t
*last_block
,
1392 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1393 struct xfs_bmbt_irec rec
;
1399 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
)
1402 if (XFS_IS_CORRUPT(ip
->i_mount
, !xfs_ifork_has_extents(ifp
))) {
1403 xfs_bmap_mark_sick(ip
, whichfork
);
1404 return -EFSCORRUPTED
;
1407 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, &is_empty
);
1408 if (error
|| is_empty
)
1411 *last_block
= rec
.br_startoff
+ rec
.br_blockcount
;
1416 * Extent tree manipulation functions used during allocation.
1420 xfs_bmap_same_rtgroup(
1421 struct xfs_inode
*ip
,
1423 struct xfs_bmbt_irec
*left
,
1424 struct xfs_bmbt_irec
*right
)
1426 struct xfs_mount
*mp
= ip
->i_mount
;
1428 if (xfs_ifork_is_realtime(ip
, whichfork
) && xfs_has_rtgroups(mp
)) {
1429 if (xfs_rtb_to_rgno(mp
, left
->br_startblock
) !=
1430 xfs_rtb_to_rgno(mp
, right
->br_startblock
))
1438 * Convert a delayed allocation to a real allocation.
1440 STATIC
int /* error */
1441 xfs_bmap_add_extent_delay_real(
1442 struct xfs_bmalloca
*bma
,
1445 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
1446 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
1447 struct xfs_bmbt_irec
*new = &bma
->got
;
1448 int error
; /* error return value */
1449 int i
; /* temp state */
1450 xfs_fileoff_t new_endoff
; /* end offset of new entry */
1451 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
1452 /* left is 0, right is 1, prev is 2 */
1453 int rval
=0; /* return value (logging flags) */
1454 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
1455 xfs_filblks_t da_new
; /* new count del alloc blocks used */
1456 xfs_filblks_t da_old
; /* old count del alloc blocks used */
1457 xfs_filblks_t temp
=0; /* value for da_new calculations */
1458 int tmp_rval
; /* partial logging flags */
1459 struct xfs_bmbt_irec old
;
1461 ASSERT(whichfork
!= XFS_ATTR_FORK
);
1462 ASSERT(!isnullstartblock(new->br_startblock
));
1463 ASSERT(!bma
->cur
|| (bma
->cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
1465 XFS_STATS_INC(mp
, xs_add_exlist
);
1472 * Set up a bunch of variables to make the tests simpler.
1474 xfs_iext_get_extent(ifp
, &bma
->icur
, &PREV
);
1475 new_endoff
= new->br_startoff
+ new->br_blockcount
;
1476 ASSERT(isnullstartblock(PREV
.br_startblock
));
1477 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
1478 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
1480 da_old
= startblockval(PREV
.br_startblock
);
1484 * Set flags determining what part of the previous delayed allocation
1485 * extent is being replaced by a real allocation.
1487 if (PREV
.br_startoff
== new->br_startoff
)
1488 state
|= BMAP_LEFT_FILLING
;
1489 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
1490 state
|= BMAP_RIGHT_FILLING
;
1493 * Check and set flags if this segment has a left neighbor.
1494 * Don't set contiguous if the combined extent would be too large.
1496 if (xfs_iext_peek_prev_extent(ifp
, &bma
->icur
, &LEFT
)) {
1497 state
|= BMAP_LEFT_VALID
;
1498 if (isnullstartblock(LEFT
.br_startblock
))
1499 state
|= BMAP_LEFT_DELAY
;
1502 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
1503 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
1504 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
1505 LEFT
.br_state
== new->br_state
&&
1506 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
1507 xfs_bmap_same_rtgroup(bma
->ip
, whichfork
, &LEFT
, new))
1508 state
|= BMAP_LEFT_CONTIG
;
1511 * Check and set flags if this segment has a right neighbor.
1512 * Don't set contiguous if the combined extent would be too large.
1513 * Also check for all-three-contiguous being too large.
1515 if (xfs_iext_peek_next_extent(ifp
, &bma
->icur
, &RIGHT
)) {
1516 state
|= BMAP_RIGHT_VALID
;
1517 if (isnullstartblock(RIGHT
.br_startblock
))
1518 state
|= BMAP_RIGHT_DELAY
;
1521 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
1522 new_endoff
== RIGHT
.br_startoff
&&
1523 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
1524 new->br_state
== RIGHT
.br_state
&&
1525 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
1526 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1527 BMAP_RIGHT_FILLING
)) !=
1528 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1529 BMAP_RIGHT_FILLING
) ||
1530 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
1531 <= XFS_MAX_BMBT_EXTLEN
) &&
1532 xfs_bmap_same_rtgroup(bma
->ip
, whichfork
, new, &RIGHT
))
1533 state
|= BMAP_RIGHT_CONTIG
;
1537 * Switch out based on the FILLING and CONTIG state bits.
1539 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1540 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
1541 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1542 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1544 * Filling in all of a previously delayed allocation extent.
1545 * The left and right neighbors are both contiguous with new.
1547 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
1549 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1550 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1551 xfs_iext_prev(ifp
, &bma
->icur
);
1552 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1555 if (bma
->cur
== NULL
)
1556 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1558 rval
= XFS_ILOG_CORE
;
1559 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1562 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1563 xfs_btree_mark_sick(bma
->cur
);
1564 error
= -EFSCORRUPTED
;
1567 error
= xfs_btree_delete(bma
->cur
, &i
);
1570 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1571 xfs_btree_mark_sick(bma
->cur
);
1572 error
= -EFSCORRUPTED
;
1575 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
1578 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1579 xfs_btree_mark_sick(bma
->cur
);
1580 error
= -EFSCORRUPTED
;
1583 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1587 ASSERT(da_new
<= da_old
);
1590 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1592 * Filling in all of a previously delayed allocation extent.
1593 * The left neighbor is contiguous, the right is not.
1596 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
1598 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1599 xfs_iext_prev(ifp
, &bma
->icur
);
1600 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1602 if (bma
->cur
== NULL
)
1603 rval
= XFS_ILOG_DEXT
;
1606 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1609 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1610 xfs_btree_mark_sick(bma
->cur
);
1611 error
= -EFSCORRUPTED
;
1614 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1618 ASSERT(da_new
<= da_old
);
1621 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1623 * Filling in all of a previously delayed allocation extent.
1624 * The right neighbor is contiguous, the left is not. Take care
1625 * with delay -> unwritten extent allocation here because the
1626 * delalloc record we are overwriting is always written.
1628 PREV
.br_startblock
= new->br_startblock
;
1629 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
1630 PREV
.br_state
= new->br_state
;
1632 xfs_iext_next(ifp
, &bma
->icur
);
1633 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1634 xfs_iext_prev(ifp
, &bma
->icur
);
1635 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1637 if (bma
->cur
== NULL
)
1638 rval
= XFS_ILOG_DEXT
;
1641 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1644 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1645 xfs_btree_mark_sick(bma
->cur
);
1646 error
= -EFSCORRUPTED
;
1649 error
= xfs_bmbt_update(bma
->cur
, &PREV
);
1653 ASSERT(da_new
<= da_old
);
1656 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
1658 * Filling in all of a previously delayed allocation extent.
1659 * Neither the left nor right neighbors are contiguous with
1662 PREV
.br_startblock
= new->br_startblock
;
1663 PREV
.br_state
= new->br_state
;
1664 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1667 if (bma
->cur
== NULL
)
1668 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1670 rval
= XFS_ILOG_CORE
;
1671 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1674 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1675 xfs_btree_mark_sick(bma
->cur
);
1676 error
= -EFSCORRUPTED
;
1679 error
= xfs_btree_insert(bma
->cur
, &i
);
1682 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1683 xfs_btree_mark_sick(bma
->cur
);
1684 error
= -EFSCORRUPTED
;
1688 ASSERT(da_new
<= da_old
);
1691 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
1693 * Filling in the first part of a previous delayed allocation.
1694 * The left neighbor is contiguous.
1697 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1698 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1699 startblockval(PREV
.br_startblock
));
1701 LEFT
.br_blockcount
+= new->br_blockcount
;
1703 PREV
.br_blockcount
= temp
;
1704 PREV
.br_startoff
+= new->br_blockcount
;
1705 PREV
.br_startblock
= nullstartblock(da_new
);
1707 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1708 xfs_iext_prev(ifp
, &bma
->icur
);
1709 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1711 if (bma
->cur
== NULL
)
1712 rval
= XFS_ILOG_DEXT
;
1715 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1718 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1719 xfs_btree_mark_sick(bma
->cur
);
1720 error
= -EFSCORRUPTED
;
1723 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1727 ASSERT(da_new
<= da_old
);
1730 case BMAP_LEFT_FILLING
:
1732 * Filling in the first part of a previous delayed allocation.
1733 * The left neighbor is not contiguous.
1735 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1738 if (bma
->cur
== NULL
)
1739 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1741 rval
= XFS_ILOG_CORE
;
1742 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1745 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1746 xfs_btree_mark_sick(bma
->cur
);
1747 error
= -EFSCORRUPTED
;
1750 error
= xfs_btree_insert(bma
->cur
, &i
);
1753 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1754 xfs_btree_mark_sick(bma
->cur
);
1755 error
= -EFSCORRUPTED
;
1760 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1761 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1762 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1768 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1769 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1770 startblockval(PREV
.br_startblock
) -
1771 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1773 PREV
.br_startoff
= new_endoff
;
1774 PREV
.br_blockcount
= temp
;
1775 PREV
.br_startblock
= nullstartblock(da_new
);
1776 xfs_iext_next(ifp
, &bma
->icur
);
1777 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1778 xfs_iext_prev(ifp
, &bma
->icur
);
1781 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1783 * Filling in the last part of a previous delayed allocation.
1784 * The right neighbor is contiguous with the new allocation.
1787 RIGHT
.br_startoff
= new->br_startoff
;
1788 RIGHT
.br_startblock
= new->br_startblock
;
1789 RIGHT
.br_blockcount
+= new->br_blockcount
;
1791 if (bma
->cur
== NULL
)
1792 rval
= XFS_ILOG_DEXT
;
1795 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1798 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1799 xfs_btree_mark_sick(bma
->cur
);
1800 error
= -EFSCORRUPTED
;
1803 error
= xfs_bmbt_update(bma
->cur
, &RIGHT
);
1808 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1809 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1810 startblockval(PREV
.br_startblock
));
1812 PREV
.br_blockcount
= temp
;
1813 PREV
.br_startblock
= nullstartblock(da_new
);
1815 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1816 xfs_iext_next(ifp
, &bma
->icur
);
1817 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &RIGHT
);
1818 ASSERT(da_new
<= da_old
);
1821 case BMAP_RIGHT_FILLING
:
1823 * Filling in the last part of a previous delayed allocation.
1824 * The right neighbor is not contiguous.
1826 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1829 if (bma
->cur
== NULL
)
1830 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1832 rval
= XFS_ILOG_CORE
;
1833 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1836 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1837 xfs_btree_mark_sick(bma
->cur
);
1838 error
= -EFSCORRUPTED
;
1841 error
= xfs_btree_insert(bma
->cur
, &i
);
1844 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1845 xfs_btree_mark_sick(bma
->cur
);
1846 error
= -EFSCORRUPTED
;
1851 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1852 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1853 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1859 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1860 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1861 startblockval(PREV
.br_startblock
) -
1862 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1864 PREV
.br_startblock
= nullstartblock(da_new
);
1865 PREV
.br_blockcount
= temp
;
1866 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1867 xfs_iext_next(ifp
, &bma
->icur
);
1868 ASSERT(da_new
<= da_old
);
1873 * Filling in the middle part of a previous delayed allocation.
1874 * Contiguity is impossible here.
1875 * This case is avoided almost all the time.
1877 * We start with a delayed allocation:
1879 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1882 * and we are allocating:
1883 * +rrrrrrrrrrrrrrrrr+
1886 * and we set it up for insertion as:
1887 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1889 * PREV @ idx LEFT RIGHT
1890 * inserted at idx + 1
1894 /* LEFT is the new middle */
1897 /* RIGHT is the new right */
1898 RIGHT
.br_state
= PREV
.br_state
;
1899 RIGHT
.br_startoff
= new_endoff
;
1900 RIGHT
.br_blockcount
=
1901 PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
1902 RIGHT
.br_startblock
=
1903 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1904 RIGHT
.br_blockcount
));
1907 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
1908 PREV
.br_startblock
=
1909 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1910 PREV
.br_blockcount
));
1911 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1913 xfs_iext_next(ifp
, &bma
->icur
);
1914 xfs_iext_insert(bma
->ip
, &bma
->icur
, &RIGHT
, state
);
1915 xfs_iext_insert(bma
->ip
, &bma
->icur
, &LEFT
, state
);
1918 if (bma
->cur
== NULL
)
1919 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1921 rval
= XFS_ILOG_CORE
;
1922 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1925 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1926 xfs_btree_mark_sick(bma
->cur
);
1927 error
= -EFSCORRUPTED
;
1930 error
= xfs_btree_insert(bma
->cur
, &i
);
1933 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1934 xfs_btree_mark_sick(bma
->cur
);
1935 error
= -EFSCORRUPTED
;
1940 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1941 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1942 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1948 da_new
= startblockval(PREV
.br_startblock
) +
1949 startblockval(RIGHT
.br_startblock
);
1952 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1953 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1954 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
1955 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1956 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1957 case BMAP_LEFT_CONTIG
:
1958 case BMAP_RIGHT_CONTIG
:
1960 * These cases are all impossible.
1965 /* add reverse mapping unless caller opted out */
1966 if (!(bma
->flags
& XFS_BMAPI_NORMAP
))
1967 xfs_rmap_map_extent(bma
->tp
, bma
->ip
, whichfork
, new);
1969 /* convert to a btree if necessary */
1970 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1971 int tmp_logflags
; /* partial log flag return val */
1973 ASSERT(bma
->cur
== NULL
);
1974 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1975 &bma
->cur
, da_old
> 0, &tmp_logflags
,
1977 bma
->logflags
|= tmp_logflags
;
1982 if (da_new
!= da_old
)
1983 xfs_mod_delalloc(bma
->ip
, 0, (int64_t)da_new
- da_old
);
1986 da_new
+= bma
->cur
->bc_bmap
.allocated
;
1987 bma
->cur
->bc_bmap
.allocated
= 0;
1990 /* adjust for changes in reserved delayed indirect blocks */
1991 if (da_new
< da_old
)
1992 xfs_add_fdblocks(mp
, da_old
- da_new
);
1993 else if (da_new
> da_old
)
1994 error
= xfs_dec_fdblocks(mp
, da_new
- da_old
, true);
1996 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
1998 if (whichfork
!= XFS_COW_FORK
)
1999 bma
->logflags
|= rval
;
2007 * Convert an unwritten allocation to a real allocation or vice versa.
2010 xfs_bmap_add_extent_unwritten_real(
2011 struct xfs_trans
*tp
,
2012 xfs_inode_t
*ip
, /* incore inode pointer */
2014 struct xfs_iext_cursor
*icur
,
2015 struct xfs_btree_cur
**curp
, /* if *curp is null, not a btree */
2016 xfs_bmbt_irec_t
*new, /* new data to add to file extents */
2017 int *logflagsp
) /* inode logging flags */
2019 struct xfs_btree_cur
*cur
; /* btree cursor */
2020 int error
; /* error return value */
2021 int i
; /* temp state */
2022 struct xfs_ifork
*ifp
; /* inode fork pointer */
2023 xfs_fileoff_t new_endoff
; /* end offset of new entry */
2024 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
2025 /* left is 0, right is 1, prev is 2 */
2026 int rval
=0; /* return value (logging flags) */
2027 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2028 struct xfs_mount
*mp
= ip
->i_mount
;
2029 struct xfs_bmbt_irec old
;
2034 ifp
= xfs_ifork_ptr(ip
, whichfork
);
2036 ASSERT(!isnullstartblock(new->br_startblock
));
2038 XFS_STATS_INC(mp
, xs_add_exlist
);
2045 * Set up a bunch of variables to make the tests simpler.
2048 xfs_iext_get_extent(ifp
, icur
, &PREV
);
2049 ASSERT(new->br_state
!= PREV
.br_state
);
2050 new_endoff
= new->br_startoff
+ new->br_blockcount
;
2051 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
2052 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
2055 * Set flags determining what part of the previous oldext allocation
2056 * extent is being replaced by a newext allocation.
2058 if (PREV
.br_startoff
== new->br_startoff
)
2059 state
|= BMAP_LEFT_FILLING
;
2060 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
2061 state
|= BMAP_RIGHT_FILLING
;
2064 * Check and set flags if this segment has a left neighbor.
2065 * Don't set contiguous if the combined extent would be too large.
2067 if (xfs_iext_peek_prev_extent(ifp
, icur
, &LEFT
)) {
2068 state
|= BMAP_LEFT_VALID
;
2069 if (isnullstartblock(LEFT
.br_startblock
))
2070 state
|= BMAP_LEFT_DELAY
;
2073 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2074 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
2075 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
2076 LEFT
.br_state
== new->br_state
&&
2077 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2078 xfs_bmap_same_rtgroup(ip
, whichfork
, &LEFT
, new))
2079 state
|= BMAP_LEFT_CONTIG
;
2082 * Check and set flags if this segment has a right neighbor.
2083 * Don't set contiguous if the combined extent would be too large.
2084 * Also check for all-three-contiguous being too large.
2086 if (xfs_iext_peek_next_extent(ifp
, icur
, &RIGHT
)) {
2087 state
|= BMAP_RIGHT_VALID
;
2088 if (isnullstartblock(RIGHT
.br_startblock
))
2089 state
|= BMAP_RIGHT_DELAY
;
2092 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2093 new_endoff
== RIGHT
.br_startoff
&&
2094 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
2095 new->br_state
== RIGHT
.br_state
&&
2096 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2097 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2098 BMAP_RIGHT_FILLING
)) !=
2099 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2100 BMAP_RIGHT_FILLING
) ||
2101 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
2102 <= XFS_MAX_BMBT_EXTLEN
) &&
2103 xfs_bmap_same_rtgroup(ip
, whichfork
, new, &RIGHT
))
2104 state
|= BMAP_RIGHT_CONTIG
;
2107 * Switch out based on the FILLING and CONTIG state bits.
2109 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2110 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
2111 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2112 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2114 * Setting all of a previous oldext extent to newext.
2115 * The left and right neighbors are both contiguous with new.
2117 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
2119 xfs_iext_remove(ip
, icur
, state
);
2120 xfs_iext_remove(ip
, icur
, state
);
2121 xfs_iext_prev(ifp
, icur
);
2122 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2123 ifp
->if_nextents
-= 2;
2125 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2127 rval
= XFS_ILOG_CORE
;
2128 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2131 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2132 xfs_btree_mark_sick(cur
);
2133 error
= -EFSCORRUPTED
;
2136 if ((error
= xfs_btree_delete(cur
, &i
)))
2138 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2139 xfs_btree_mark_sick(cur
);
2140 error
= -EFSCORRUPTED
;
2143 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2145 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2146 xfs_btree_mark_sick(cur
);
2147 error
= -EFSCORRUPTED
;
2150 if ((error
= xfs_btree_delete(cur
, &i
)))
2152 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2153 xfs_btree_mark_sick(cur
);
2154 error
= -EFSCORRUPTED
;
2157 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2159 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2160 xfs_btree_mark_sick(cur
);
2161 error
= -EFSCORRUPTED
;
2164 error
= xfs_bmbt_update(cur
, &LEFT
);
2170 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2172 * Setting all of a previous oldext extent to newext.
2173 * The left neighbor is contiguous, the right is not.
2175 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
2177 xfs_iext_remove(ip
, icur
, state
);
2178 xfs_iext_prev(ifp
, icur
);
2179 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2182 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2184 rval
= XFS_ILOG_CORE
;
2185 error
= xfs_bmbt_lookup_eq(cur
, &PREV
, &i
);
2188 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2189 xfs_btree_mark_sick(cur
);
2190 error
= -EFSCORRUPTED
;
2193 if ((error
= xfs_btree_delete(cur
, &i
)))
2195 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2196 xfs_btree_mark_sick(cur
);
2197 error
= -EFSCORRUPTED
;
2200 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2202 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2203 xfs_btree_mark_sick(cur
);
2204 error
= -EFSCORRUPTED
;
2207 error
= xfs_bmbt_update(cur
, &LEFT
);
2213 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2215 * Setting all of a previous oldext extent to newext.
2216 * The right neighbor is contiguous, the left is not.
2218 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
2219 PREV
.br_state
= new->br_state
;
2221 xfs_iext_next(ifp
, icur
);
2222 xfs_iext_remove(ip
, icur
, state
);
2223 xfs_iext_prev(ifp
, icur
);
2224 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2228 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2230 rval
= XFS_ILOG_CORE
;
2231 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2234 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2235 xfs_btree_mark_sick(cur
);
2236 error
= -EFSCORRUPTED
;
2239 if ((error
= xfs_btree_delete(cur
, &i
)))
2241 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2242 xfs_btree_mark_sick(cur
);
2243 error
= -EFSCORRUPTED
;
2246 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2248 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2249 xfs_btree_mark_sick(cur
);
2250 error
= -EFSCORRUPTED
;
2253 error
= xfs_bmbt_update(cur
, &PREV
);
2259 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
2261 * Setting all of a previous oldext extent to newext.
2262 * Neither the left nor right neighbors are contiguous with
2265 PREV
.br_state
= new->br_state
;
2266 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2269 rval
= XFS_ILOG_DEXT
;
2272 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2275 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2276 xfs_btree_mark_sick(cur
);
2277 error
= -EFSCORRUPTED
;
2280 error
= xfs_bmbt_update(cur
, &PREV
);
2286 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
2288 * Setting the first part of a previous oldext extent to newext.
2289 * The left neighbor is contiguous.
2291 LEFT
.br_blockcount
+= new->br_blockcount
;
2294 PREV
.br_startoff
+= new->br_blockcount
;
2295 PREV
.br_startblock
+= new->br_blockcount
;
2296 PREV
.br_blockcount
-= new->br_blockcount
;
2298 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2299 xfs_iext_prev(ifp
, icur
);
2300 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2303 rval
= XFS_ILOG_DEXT
;
2306 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2309 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2310 xfs_btree_mark_sick(cur
);
2311 error
= -EFSCORRUPTED
;
2314 error
= xfs_bmbt_update(cur
, &PREV
);
2317 error
= xfs_btree_decrement(cur
, 0, &i
);
2320 error
= xfs_bmbt_update(cur
, &LEFT
);
2326 case BMAP_LEFT_FILLING
:
2328 * Setting the first part of a previous oldext extent to newext.
2329 * The left neighbor is not contiguous.
2332 PREV
.br_startoff
+= new->br_blockcount
;
2333 PREV
.br_startblock
+= new->br_blockcount
;
2334 PREV
.br_blockcount
-= new->br_blockcount
;
2336 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2337 xfs_iext_insert(ip
, icur
, new, state
);
2341 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2343 rval
= XFS_ILOG_CORE
;
2344 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2347 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2348 xfs_btree_mark_sick(cur
);
2349 error
= -EFSCORRUPTED
;
2352 error
= xfs_bmbt_update(cur
, &PREV
);
2355 cur
->bc_rec
.b
= *new;
2356 if ((error
= xfs_btree_insert(cur
, &i
)))
2358 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2359 xfs_btree_mark_sick(cur
);
2360 error
= -EFSCORRUPTED
;
2366 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2368 * Setting the last part of a previous oldext extent to newext.
2369 * The right neighbor is contiguous with the new allocation.
2372 PREV
.br_blockcount
-= new->br_blockcount
;
2374 RIGHT
.br_startoff
= new->br_startoff
;
2375 RIGHT
.br_startblock
= new->br_startblock
;
2376 RIGHT
.br_blockcount
+= new->br_blockcount
;
2378 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2379 xfs_iext_next(ifp
, icur
);
2380 xfs_iext_update_extent(ip
, state
, icur
, &RIGHT
);
2383 rval
= XFS_ILOG_DEXT
;
2386 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2389 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2390 xfs_btree_mark_sick(cur
);
2391 error
= -EFSCORRUPTED
;
2394 error
= xfs_bmbt_update(cur
, &PREV
);
2397 error
= xfs_btree_increment(cur
, 0, &i
);
2400 error
= xfs_bmbt_update(cur
, &RIGHT
);
2406 case BMAP_RIGHT_FILLING
:
2408 * Setting the last part of a previous oldext extent to newext.
2409 * The right neighbor is not contiguous.
2412 PREV
.br_blockcount
-= new->br_blockcount
;
2414 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2415 xfs_iext_next(ifp
, icur
);
2416 xfs_iext_insert(ip
, icur
, new, state
);
2420 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2422 rval
= XFS_ILOG_CORE
;
2423 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2426 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2427 xfs_btree_mark_sick(cur
);
2428 error
= -EFSCORRUPTED
;
2431 error
= xfs_bmbt_update(cur
, &PREV
);
2434 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2437 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2438 xfs_btree_mark_sick(cur
);
2439 error
= -EFSCORRUPTED
;
2442 if ((error
= xfs_btree_insert(cur
, &i
)))
2444 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2445 xfs_btree_mark_sick(cur
);
2446 error
= -EFSCORRUPTED
;
2454 * Setting the middle part of a previous oldext extent to
2455 * newext. Contiguity is impossible here.
2456 * One extent becomes three extents.
2459 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
2462 r
[1].br_startoff
= new_endoff
;
2463 r
[1].br_blockcount
=
2464 old
.br_startoff
+ old
.br_blockcount
- new_endoff
;
2465 r
[1].br_startblock
= new->br_startblock
+ new->br_blockcount
;
2466 r
[1].br_state
= PREV
.br_state
;
2468 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2469 xfs_iext_next(ifp
, icur
);
2470 xfs_iext_insert(ip
, icur
, &r
[1], state
);
2471 xfs_iext_insert(ip
, icur
, &r
[0], state
);
2472 ifp
->if_nextents
+= 2;
2475 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2477 rval
= XFS_ILOG_CORE
;
2478 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2481 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2482 xfs_btree_mark_sick(cur
);
2483 error
= -EFSCORRUPTED
;
2486 /* new right extent - oldext */
2487 error
= xfs_bmbt_update(cur
, &r
[1]);
2490 /* new left extent - oldext */
2491 cur
->bc_rec
.b
= PREV
;
2492 if ((error
= xfs_btree_insert(cur
, &i
)))
2494 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2495 xfs_btree_mark_sick(cur
);
2496 error
= -EFSCORRUPTED
;
2500 * Reset the cursor to the position of the new extent
2501 * we are about to insert as we can't trust it after
2502 * the previous insert.
2504 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2507 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2508 xfs_btree_mark_sick(cur
);
2509 error
= -EFSCORRUPTED
;
2512 /* new middle extent - newext */
2513 if ((error
= xfs_btree_insert(cur
, &i
)))
2515 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2516 xfs_btree_mark_sick(cur
);
2517 error
= -EFSCORRUPTED
;
2523 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2524 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2525 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2526 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2527 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2528 case BMAP_LEFT_CONTIG
:
2529 case BMAP_RIGHT_CONTIG
:
2531 * These cases are all impossible.
2536 /* update reverse mappings */
2537 xfs_rmap_convert_extent(mp
, tp
, ip
, whichfork
, new);
2539 /* convert to a btree if necessary */
2540 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2541 int tmp_logflags
; /* partial log flag return val */
2543 ASSERT(cur
== NULL
);
2544 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
2545 &tmp_logflags
, whichfork
);
2546 *logflagsp
|= tmp_logflags
;
2551 /* clear out the allocated field, done with it now in any case. */
2553 cur
->bc_bmap
.allocated
= 0;
2557 xfs_bmap_check_leaf_extents(*curp
, ip
, whichfork
);
2567 * Convert a hole to a real allocation.
2569 STATIC
int /* error */
2570 xfs_bmap_add_extent_hole_real(
2571 struct xfs_trans
*tp
,
2572 struct xfs_inode
*ip
,
2574 struct xfs_iext_cursor
*icur
,
2575 struct xfs_btree_cur
**curp
,
2576 struct xfs_bmbt_irec
*new,
2580 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
2581 struct xfs_mount
*mp
= ip
->i_mount
;
2582 struct xfs_btree_cur
*cur
= *curp
;
2583 int error
; /* error return value */
2584 int i
; /* temp state */
2585 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2586 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2587 int rval
=0; /* return value (logging flags) */
2588 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2589 struct xfs_bmbt_irec old
;
2591 ASSERT(!isnullstartblock(new->br_startblock
));
2592 ASSERT(!cur
|| !(cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
2594 XFS_STATS_INC(mp
, xs_add_exlist
);
2597 * Check and set flags if this segment has a left neighbor.
2599 if (xfs_iext_peek_prev_extent(ifp
, icur
, &left
)) {
2600 state
|= BMAP_LEFT_VALID
;
2601 if (isnullstartblock(left
.br_startblock
))
2602 state
|= BMAP_LEFT_DELAY
;
2606 * Check and set flags if this segment has a current value.
2607 * Not true if we're inserting into the "hole" at eof.
2609 if (xfs_iext_get_extent(ifp
, icur
, &right
)) {
2610 state
|= BMAP_RIGHT_VALID
;
2611 if (isnullstartblock(right
.br_startblock
))
2612 state
|= BMAP_RIGHT_DELAY
;
2616 * We're inserting a real allocation between "left" and "right".
2617 * Set the contiguity flags. Don't let extents get too large.
2619 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2620 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2621 left
.br_startblock
+ left
.br_blockcount
== new->br_startblock
&&
2622 left
.br_state
== new->br_state
&&
2623 left
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2624 xfs_bmap_same_rtgroup(ip
, whichfork
, &left
, new))
2625 state
|= BMAP_LEFT_CONTIG
;
2627 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2628 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2629 new->br_startblock
+ new->br_blockcount
== right
.br_startblock
&&
2630 new->br_state
== right
.br_state
&&
2631 new->br_blockcount
+ right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2632 (!(state
& BMAP_LEFT_CONTIG
) ||
2633 left
.br_blockcount
+ new->br_blockcount
+
2634 right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
) &&
2635 xfs_bmap_same_rtgroup(ip
, whichfork
, new, &right
))
2636 state
|= BMAP_RIGHT_CONTIG
;
2640 * Select which case we're in here, and implement it.
2642 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2643 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2645 * New allocation is contiguous with real allocations on the
2646 * left and on the right.
2647 * Merge all three into a single extent record.
2649 left
.br_blockcount
+= new->br_blockcount
+ right
.br_blockcount
;
2651 xfs_iext_remove(ip
, icur
, state
);
2652 xfs_iext_prev(ifp
, icur
);
2653 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2657 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2659 rval
= XFS_ILOG_CORE
;
2660 error
= xfs_bmbt_lookup_eq(cur
, &right
, &i
);
2663 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2664 xfs_btree_mark_sick(cur
);
2665 error
= -EFSCORRUPTED
;
2668 error
= xfs_btree_delete(cur
, &i
);
2671 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2672 xfs_btree_mark_sick(cur
);
2673 error
= -EFSCORRUPTED
;
2676 error
= xfs_btree_decrement(cur
, 0, &i
);
2679 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2680 xfs_btree_mark_sick(cur
);
2681 error
= -EFSCORRUPTED
;
2684 error
= xfs_bmbt_update(cur
, &left
);
2690 case BMAP_LEFT_CONTIG
:
2692 * New allocation is contiguous with a real allocation
2694 * Merge the new allocation with the left neighbor.
2697 left
.br_blockcount
+= new->br_blockcount
;
2699 xfs_iext_prev(ifp
, icur
);
2700 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2703 rval
= xfs_ilog_fext(whichfork
);
2706 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2709 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2710 xfs_btree_mark_sick(cur
);
2711 error
= -EFSCORRUPTED
;
2714 error
= xfs_bmbt_update(cur
, &left
);
2720 case BMAP_RIGHT_CONTIG
:
2722 * New allocation is contiguous with a real allocation
2724 * Merge the new allocation with the right neighbor.
2728 right
.br_startoff
= new->br_startoff
;
2729 right
.br_startblock
= new->br_startblock
;
2730 right
.br_blockcount
+= new->br_blockcount
;
2731 xfs_iext_update_extent(ip
, state
, icur
, &right
);
2734 rval
= xfs_ilog_fext(whichfork
);
2737 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2740 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2741 xfs_btree_mark_sick(cur
);
2742 error
= -EFSCORRUPTED
;
2745 error
= xfs_bmbt_update(cur
, &right
);
2753 * New allocation is not contiguous with another
2755 * Insert a new entry.
2757 xfs_iext_insert(ip
, icur
, new, state
);
2761 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2763 rval
= XFS_ILOG_CORE
;
2764 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2767 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2768 xfs_btree_mark_sick(cur
);
2769 error
= -EFSCORRUPTED
;
2772 error
= xfs_btree_insert(cur
, &i
);
2775 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2776 xfs_btree_mark_sick(cur
);
2777 error
= -EFSCORRUPTED
;
2784 /* add reverse mapping unless caller opted out */
2785 if (!(flags
& XFS_BMAPI_NORMAP
))
2786 xfs_rmap_map_extent(tp
, ip
, whichfork
, new);
2788 /* convert to a btree if necessary */
2789 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2790 int tmp_logflags
; /* partial log flag return val */
2792 ASSERT(cur
== NULL
);
2793 error
= xfs_bmap_extents_to_btree(tp
, ip
, curp
, 0,
2794 &tmp_logflags
, whichfork
);
2795 *logflagsp
|= tmp_logflags
;
2801 /* clear out the allocated field, done with it now in any case. */
2803 cur
->bc_bmap
.allocated
= 0;
2805 xfs_bmap_check_leaf_extents(cur
, ip
, whichfork
);
2812 * Functions used in the extent read, allocate and remove paths
2816 * Adjust the size of the new extent based on i_extsize and rt extsize.
2819 xfs_bmap_extsize_align(
2821 xfs_bmbt_irec_t
*gotp
, /* next extent pointer */
2822 xfs_bmbt_irec_t
*prevp
, /* previous extent pointer */
2823 xfs_extlen_t extsz
, /* align to this extent size */
2824 int rt
, /* is this a realtime inode? */
2825 int eof
, /* is extent at end-of-file? */
2826 int delay
, /* creating delalloc extent? */
2827 int convert
, /* overwriting unwritten extent? */
2828 xfs_fileoff_t
*offp
, /* in/out: aligned offset */
2829 xfs_extlen_t
*lenp
) /* in/out: aligned length */
2831 xfs_fileoff_t orig_off
; /* original offset */
2832 xfs_extlen_t orig_alen
; /* original length */
2833 xfs_fileoff_t orig_end
; /* original off+len */
2834 xfs_fileoff_t nexto
; /* next file offset */
2835 xfs_fileoff_t prevo
; /* previous file offset */
2836 xfs_fileoff_t align_off
; /* temp for offset */
2837 xfs_extlen_t align_alen
; /* temp for length */
2838 xfs_extlen_t temp
; /* temp for calculations */
2843 orig_off
= align_off
= *offp
;
2844 orig_alen
= align_alen
= *lenp
;
2845 orig_end
= orig_off
+ orig_alen
;
2848 * If this request overlaps an existing extent, then don't
2849 * attempt to perform any additional alignment.
2851 if (!delay
&& !eof
&&
2852 (orig_off
>= gotp
->br_startoff
) &&
2853 (orig_end
<= gotp
->br_startoff
+ gotp
->br_blockcount
)) {
2858 * If the file offset is unaligned vs. the extent size
2859 * we need to align it. This will be possible unless
2860 * the file was previously written with a kernel that didn't
2861 * perform this alignment, or if a truncate shot us in the
2864 div_u64_rem(orig_off
, extsz
, &temp
);
2870 /* Same adjustment for the end of the requested area. */
2871 temp
= (align_alen
% extsz
);
2873 align_alen
+= extsz
- temp
;
2876 * For large extent hint sizes, the aligned extent might be larger than
2877 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2878 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2879 * allocation loops handle short allocation just fine, so it is safe to
2880 * do this. We only want to do it when we are forced to, though, because
2881 * it means more allocation operations are required.
2883 while (align_alen
> XFS_MAX_BMBT_EXTLEN
)
2884 align_alen
-= extsz
;
2885 ASSERT(align_alen
<= XFS_MAX_BMBT_EXTLEN
);
2888 * If the previous block overlaps with this proposed allocation
2889 * then move the start forward without adjusting the length.
2891 if (prevp
->br_startoff
!= NULLFILEOFF
) {
2892 if (prevp
->br_startblock
== HOLESTARTBLOCK
)
2893 prevo
= prevp
->br_startoff
;
2895 prevo
= prevp
->br_startoff
+ prevp
->br_blockcount
;
2898 if (align_off
!= orig_off
&& align_off
< prevo
)
2901 * If the next block overlaps with this proposed allocation
2902 * then move the start back without adjusting the length,
2903 * but not before offset 0.
2904 * This may of course make the start overlap previous block,
2905 * and if we hit the offset 0 limit then the next block
2906 * can still overlap too.
2908 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
) {
2909 if ((delay
&& gotp
->br_startblock
== HOLESTARTBLOCK
) ||
2910 (!delay
&& gotp
->br_startblock
== DELAYSTARTBLOCK
))
2911 nexto
= gotp
->br_startoff
+ gotp
->br_blockcount
;
2913 nexto
= gotp
->br_startoff
;
2915 nexto
= NULLFILEOFF
;
2917 align_off
+ align_alen
!= orig_end
&&
2918 align_off
+ align_alen
> nexto
)
2919 align_off
= nexto
> align_alen
? nexto
- align_alen
: 0;
2921 * If we're now overlapping the next or previous extent that
2922 * means we can't fit an extsz piece in this hole. Just move
2923 * the start forward to the first valid spot and set
2924 * the length so we hit the end.
2926 if (align_off
!= orig_off
&& align_off
< prevo
)
2928 if (align_off
+ align_alen
!= orig_end
&&
2929 align_off
+ align_alen
> nexto
&&
2930 nexto
!= NULLFILEOFF
) {
2931 ASSERT(nexto
> prevo
);
2932 align_alen
= nexto
- align_off
;
2936 * If realtime, and the result isn't a multiple of the realtime
2937 * extent size we need to remove blocks until it is.
2939 if (rt
&& (temp
= xfs_extlen_to_rtxmod(mp
, align_alen
))) {
2941 * We're not covering the original request, or
2942 * we won't be able to once we fix the length.
2944 if (orig_off
< align_off
||
2945 orig_end
> align_off
+ align_alen
||
2946 align_alen
- temp
< orig_alen
)
2949 * Try to fix it by moving the start up.
2951 if (align_off
+ temp
<= orig_off
) {
2956 * Try to fix it by moving the end in.
2958 else if (align_off
+ align_alen
- temp
>= orig_end
)
2961 * Set the start to the minimum then trim the length.
2964 align_alen
-= orig_off
- align_off
;
2965 align_off
= orig_off
;
2966 align_alen
-= xfs_extlen_to_rtxmod(mp
, align_alen
);
2969 * Result doesn't cover the request, fail it.
2971 if (orig_off
< align_off
|| orig_end
> align_off
+ align_alen
)
2974 ASSERT(orig_off
>= align_off
);
2975 /* see XFS_BMBT_MAX_EXTLEN handling above */
2976 ASSERT(orig_end
<= align_off
+ align_alen
||
2977 align_alen
+ extsz
> XFS_MAX_BMBT_EXTLEN
);
2981 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
)
2982 ASSERT(align_off
+ align_alen
<= gotp
->br_startoff
);
2983 if (prevp
->br_startoff
!= NULLFILEOFF
)
2984 ASSERT(align_off
>= prevp
->br_startoff
+ prevp
->br_blockcount
);
2993 xfs_bmap_adjacent_valid(
2994 struct xfs_bmalloca
*ap
,
2998 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3000 if (XFS_IS_REALTIME_INODE(ap
->ip
) &&
3001 (ap
->datatype
& XFS_ALLOC_USERDATA
)) {
3002 if (!xfs_has_rtgroups(mp
))
3003 return x
< mp
->m_sb
.sb_rblocks
;
3005 return xfs_rtb_to_rgno(mp
, x
) == xfs_rtb_to_rgno(mp
, y
) &&
3006 xfs_rtb_to_rgno(mp
, x
) < mp
->m_sb
.sb_rgcount
&&
3007 xfs_rtb_to_rtx(mp
, x
) < mp
->m_sb
.sb_rgextents
;
3011 return XFS_FSB_TO_AGNO(mp
, x
) == XFS_FSB_TO_AGNO(mp
, y
) &&
3012 XFS_FSB_TO_AGNO(mp
, x
) < mp
->m_sb
.sb_agcount
&&
3013 XFS_FSB_TO_AGBNO(mp
, x
) < mp
->m_sb
.sb_agblocks
;
3016 #define XFS_ALLOC_GAP_UNITS 4
3018 /* returns true if ap->blkno was modified */
3021 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3023 xfs_fsblock_t adjust
; /* adjustment to block numbers */
3026 * If allocating at eof, and there's a previous real block,
3027 * try to use its last block as our starting point.
3029 if (ap
->eof
&& ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3030 !isnullstartblock(ap
->prev
.br_startblock
) &&
3031 xfs_bmap_adjacent_valid(ap
,
3032 ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
,
3033 ap
->prev
.br_startblock
)) {
3034 ap
->blkno
= ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
;
3036 * Adjust for the gap between prevp and us.
3038 adjust
= ap
->offset
-
3039 (ap
->prev
.br_startoff
+ ap
->prev
.br_blockcount
);
3040 if (adjust
&& xfs_bmap_adjacent_valid(ap
, ap
->blkno
+ adjust
,
3041 ap
->prev
.br_startblock
))
3042 ap
->blkno
+= adjust
;
3046 * If not at eof, then compare the two neighbor blocks.
3047 * Figure out whether either one gives us a good starting point,
3048 * and pick the better one.
3051 xfs_fsblock_t gotbno
; /* right side block number */
3052 xfs_fsblock_t gotdiff
=0; /* right side difference */
3053 xfs_fsblock_t prevbno
; /* left side block number */
3054 xfs_fsblock_t prevdiff
=0; /* left side difference */
3057 * If there's a previous (left) block, select a requested
3058 * start block based on it.
3060 if (ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3061 !isnullstartblock(ap
->prev
.br_startblock
) &&
3062 (prevbno
= ap
->prev
.br_startblock
+
3063 ap
->prev
.br_blockcount
) &&
3064 xfs_bmap_adjacent_valid(ap
, prevbno
,
3065 ap
->prev
.br_startblock
)) {
3067 * Calculate gap to end of previous block.
3069 adjust
= prevdiff
= ap
->offset
-
3070 (ap
->prev
.br_startoff
+
3071 ap
->prev
.br_blockcount
);
3073 * Figure the startblock based on the previous block's
3074 * end and the gap size.
3076 * If the gap is large relative to the piece we're
3077 * allocating, or using it gives us an invalid block
3078 * number, then just use the end of the previous block.
3080 if (prevdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3081 xfs_bmap_adjacent_valid(ap
, prevbno
+ prevdiff
,
3082 ap
->prev
.br_startblock
))
3088 * No previous block or can't follow it, just default.
3091 prevbno
= NULLFSBLOCK
;
3093 * If there's a following (right) block, select a requested
3094 * start block based on it.
3096 if (!isnullstartblock(ap
->got
.br_startblock
)) {
3098 * Calculate gap to start of next block.
3100 adjust
= gotdiff
= ap
->got
.br_startoff
- ap
->offset
;
3102 * Figure the startblock based on the next block's
3103 * start and the gap size.
3105 gotbno
= ap
->got
.br_startblock
;
3108 * If the gap is large relative to the piece we're
3109 * allocating, or using it gives us an invalid block
3110 * number, then just use the start of the next block
3111 * offset by our length.
3113 if (gotdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3114 xfs_bmap_adjacent_valid(ap
, gotbno
- gotdiff
,
3117 else if (xfs_bmap_adjacent_valid(ap
, gotbno
- ap
->length
,
3119 gotbno
-= ap
->length
;
3120 gotdiff
+= adjust
- ap
->length
;
3125 * No next block, just default.
3128 gotbno
= NULLFSBLOCK
;
3130 * If both valid, pick the better one, else the only good
3131 * one, else ap->blkno is already set (to 0 or the inode block).
3133 if (prevbno
!= NULLFSBLOCK
&& gotbno
!= NULLFSBLOCK
) {
3134 ap
->blkno
= prevdiff
<= gotdiff
? prevbno
: gotbno
;
3137 if (prevbno
!= NULLFSBLOCK
) {
3138 ap
->blkno
= prevbno
;
3141 if (gotbno
!= NULLFSBLOCK
) {
3151 xfs_bmap_longest_free_extent(
3152 struct xfs_perag
*pag
,
3153 struct xfs_trans
*tp
,
3156 xfs_extlen_t longest
;
3159 if (!xfs_perag_initialised_agf(pag
)) {
3160 error
= xfs_alloc_read_agf(pag
, tp
, XFS_ALLOC_FLAG_TRYLOCK
,
3166 longest
= xfs_alloc_longest_free_extent(pag
,
3167 xfs_alloc_min_freelist(pag_mount(pag
), pag
),
3168 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
));
3169 if (*blen
< longest
)
3176 xfs_bmap_select_minlen(
3177 struct xfs_bmalloca
*ap
,
3178 struct xfs_alloc_arg
*args
,
3183 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3184 * possible that there is enough contiguous free space for this request.
3186 if (blen
< ap
->minlen
)
3190 * If the best seen length is less than the request length,
3191 * use the best as the minimum, otherwise we've got the maxlen we
3194 if (blen
< args
->maxlen
)
3196 return args
->maxlen
;
3200 xfs_bmap_btalloc_select_lengths(
3201 struct xfs_bmalloca
*ap
,
3202 struct xfs_alloc_arg
*args
,
3205 struct xfs_mount
*mp
= args
->mp
;
3206 struct xfs_perag
*pag
;
3207 xfs_agnumber_t agno
, startag
;
3210 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3211 args
->total
= ap
->minlen
;
3212 args
->minlen
= ap
->minlen
;
3216 args
->total
= ap
->total
;
3217 startag
= XFS_FSB_TO_AGNO(mp
, ap
->blkno
);
3218 if (startag
== NULLAGNUMBER
)
3222 for_each_perag_wrap(mp
, startag
, agno
, pag
) {
3223 error
= xfs_bmap_longest_free_extent(pag
, args
->tp
, blen
);
3224 if (error
&& error
!= -EAGAIN
)
3227 if (*blen
>= args
->maxlen
)
3231 xfs_perag_rele(pag
);
3233 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, *blen
);
3237 /* Update all inode and quota accounting for the allocation we just did. */
3239 xfs_bmap_alloc_account(
3240 struct xfs_bmalloca
*ap
)
3242 bool isrt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3243 !(ap
->flags
& XFS_BMAPI_ATTRFORK
);
3246 if (ap
->flags
& XFS_BMAPI_COWFORK
) {
3248 * COW fork blocks are in-core only and thus are treated as
3249 * in-core quota reservation (like delalloc blocks) even when
3250 * converted to real blocks. The quota reservation is not
3251 * accounted to disk until blocks are remapped to the data
3252 * fork. So if these blocks were previously delalloc, we
3253 * already have quota reservation and there's nothing to do
3257 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3262 * Otherwise, we've allocated blocks in a hole. The transaction
3263 * has acquired in-core quota reservation for this extent.
3264 * Rather than account these as real blocks, however, we reduce
3265 * the transaction quota reservation based on the allocation.
3266 * This essentially transfers the transaction quota reservation
3267 * to that of a delalloc extent.
3269 ap
->ip
->i_delayed_blks
+= ap
->length
;
3270 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, isrt
?
3271 XFS_TRANS_DQ_RES_RTBLKS
: XFS_TRANS_DQ_RES_BLKS
,
3276 /* data/attr fork only */
3277 ap
->ip
->i_nblocks
+= ap
->length
;
3278 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3280 ap
->ip
->i_delayed_blks
-= ap
->length
;
3281 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3282 fld
= isrt
? XFS_TRANS_DQ_DELRTBCOUNT
: XFS_TRANS_DQ_DELBCOUNT
;
3284 fld
= isrt
? XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
3287 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, fld
, ap
->length
);
3291 xfs_bmap_compute_alignments(
3292 struct xfs_bmalloca
*ap
,
3293 struct xfs_alloc_arg
*args
)
3295 struct xfs_mount
*mp
= args
->mp
;
3296 xfs_extlen_t align
= 0; /* minimum allocation alignment */
3297 int stripe_align
= 0;
3299 /* stripe alignment for allocation is determined by mount parameters */
3300 if (mp
->m_swidth
&& xfs_has_swalloc(mp
))
3301 stripe_align
= mp
->m_swidth
;
3302 else if (mp
->m_dalign
)
3303 stripe_align
= mp
->m_dalign
;
3305 if (ap
->flags
& XFS_BMAPI_COWFORK
)
3306 align
= xfs_get_cowextsz_hint(ap
->ip
);
3307 else if (ap
->datatype
& XFS_ALLOC_USERDATA
)
3308 align
= xfs_get_extsz_hint(ap
->ip
);
3310 /* Try to align start block to any minimum allocation alignment */
3311 if (align
> 1 && (ap
->flags
& XFS_BMAPI_EXTSZALIGN
))
3312 args
->alignment
= align
;
3315 if (xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
, align
, 0,
3316 ap
->eof
, 0, ap
->conv
, &ap
->offset
,
3322 /* apply extent size hints if obtained earlier */
3325 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3327 args
->mod
= args
->prod
- args
->mod
;
3328 } else if (mp
->m_sb
.sb_blocksize
>= PAGE_SIZE
) {
3332 args
->prod
= PAGE_SIZE
>> mp
->m_sb
.sb_blocklog
;
3333 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3335 args
->mod
= args
->prod
- args
->mod
;
3338 return stripe_align
;
3342 xfs_bmap_process_allocated_extent(
3343 struct xfs_bmalloca
*ap
,
3344 struct xfs_alloc_arg
*args
,
3345 xfs_fileoff_t orig_offset
,
3346 xfs_extlen_t orig_length
)
3348 ap
->blkno
= args
->fsbno
;
3349 ap
->length
= args
->len
;
3351 * If the extent size hint is active, we tried to round the
3352 * caller's allocation request offset down to extsz and the
3353 * length up to another extsz boundary. If we found a free
3354 * extent we mapped it in starting at this new offset. If the
3355 * newly mapped space isn't long enough to cover any of the
3356 * range of offsets that was originally requested, move the
3357 * mapping up so that we can fill as much of the caller's
3358 * original request as possible. Free space is apparently
3359 * very fragmented so we're unlikely to be able to satisfy the
3362 if (ap
->length
<= orig_length
)
3363 ap
->offset
= orig_offset
;
3364 else if (ap
->offset
+ ap
->length
< orig_offset
+ orig_length
)
3365 ap
->offset
= orig_offset
+ orig_length
- ap
->length
;
3366 xfs_bmap_alloc_account(ap
);
3370 xfs_bmap_exact_minlen_extent_alloc(
3371 struct xfs_bmalloca
*ap
,
3372 struct xfs_alloc_arg
*args
)
3374 if (ap
->minlen
!= 1) {
3375 args
->fsbno
= NULLFSBLOCK
;
3379 args
->alloc_minlen_only
= 1;
3380 args
->minlen
= args
->maxlen
= ap
->minlen
;
3381 args
->total
= ap
->total
;
3384 * Unlike the longest extent available in an AG, we don't track
3385 * the length of an AG's shortest extent.
3386 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3387 * hence we can afford to start traversing from the 0th AG since
3388 * we need not be concerned about a drop in performance in
3389 * "debug only" code paths.
3391 ap
->blkno
= XFS_AGB_TO_FSB(ap
->ip
->i_mount
, 0, 0);
3394 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3395 * iteration and then drops args->total to args->minlen, which might be
3396 * required to find an allocation for the transaction reservation when
3397 * the file system is very full.
3399 return xfs_bmap_btalloc_low_space(ap
, args
);
3403 * If we are not low on available data blocks and we are allocating at
3404 * EOF, optimise allocation for contiguous file extension and/or stripe
3405 * alignment of the new extent.
3407 * NOTE: ap->aeof is only set if the allocation length is >= the
3408 * stripe unit and the allocation offset is at the end of file.
3411 xfs_bmap_btalloc_at_eof(
3412 struct xfs_bmalloca
*ap
,
3413 struct xfs_alloc_arg
*args
,
3418 struct xfs_mount
*mp
= args
->mp
;
3419 struct xfs_perag
*caller_pag
= args
->pag
;
3423 * If there are already extents in the file, and xfs_bmap_adjacent() has
3424 * given a better blkno, try an exact EOF block allocation to extend the
3425 * file as a contiguous extent. If that fails, or it's the first
3426 * allocation in a file, just try for a stripe aligned allocation.
3429 xfs_extlen_t nextminlen
= 0;
3432 * Compute the minlen+alignment for the next case. Set slop so
3433 * that the value of minlen+alignment+slop doesn't go up between
3436 args
->alignment
= 1;
3437 if (blen
> stripe_align
&& blen
<= args
->maxlen
)
3438 nextminlen
= blen
- stripe_align
;
3440 nextminlen
= args
->minlen
;
3441 if (nextminlen
+ stripe_align
> args
->minlen
+ 1)
3442 args
->minalignslop
= nextminlen
+ stripe_align
-
3445 args
->minalignslop
= 0;
3448 args
->pag
= xfs_perag_get(mp
, XFS_FSB_TO_AGNO(mp
, ap
->blkno
));
3449 error
= xfs_alloc_vextent_exact_bno(args
, ap
->blkno
);
3451 xfs_perag_put(args
->pag
);
3457 if (args
->fsbno
!= NULLFSBLOCK
)
3460 * Exact allocation failed. Reset to try an aligned allocation
3461 * according to the original allocation specification.
3463 args
->alignment
= stripe_align
;
3464 args
->minlen
= nextminlen
;
3465 args
->minalignslop
= 0;
3468 * Adjust minlen to try and preserve alignment if we
3469 * can't guarantee an aligned maxlen extent.
3471 args
->alignment
= stripe_align
;
3472 if (blen
> args
->alignment
&&
3473 blen
<= args
->maxlen
+ args
->alignment
)
3474 args
->minlen
= blen
- args
->alignment
;
3475 args
->minalignslop
= 0;
3479 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3482 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3483 ASSERT(args
->pag
== NULL
);
3484 args
->pag
= caller_pag
;
3489 if (args
->fsbno
!= NULLFSBLOCK
)
3493 * Allocation failed, so turn return the allocation args to their
3494 * original non-aligned state so the caller can proceed on allocation
3495 * failure as if this function was never called.
3497 args
->alignment
= 1;
3502 * We have failed multiple allocation attempts so now are in a low space
3503 * allocation situation. Try a locality first full filesystem minimum length
3504 * allocation whilst still maintaining necessary total block reservation
3507 * If that fails, we are now critically low on space, so perform a last resort
3508 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3509 * filesystem free space scan. We also indicate to future allocations in this
3510 * transaction that we are critically low on space so they don't waste time on
3511 * allocation modes that are unlikely to succeed.
3514 xfs_bmap_btalloc_low_space(
3515 struct xfs_bmalloca
*ap
,
3516 struct xfs_alloc_arg
*args
)
3520 if (args
->minlen
> ap
->minlen
) {
3521 args
->minlen
= ap
->minlen
;
3522 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3523 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3527 /* Last ditch attempt before failure is declared. */
3528 args
->total
= ap
->minlen
;
3529 error
= xfs_alloc_vextent_first_ag(args
, 0);
3532 ap
->tp
->t_flags
|= XFS_TRANS_LOWMODE
;
3537 xfs_bmap_btalloc_filestreams(
3538 struct xfs_bmalloca
*ap
,
3539 struct xfs_alloc_arg
*args
,
3542 xfs_extlen_t blen
= 0;
3546 error
= xfs_filestream_select_ag(ap
, args
, &blen
);
3552 * If we are in low space mode, then optimal allocation will fail so
3553 * prepare for minimal allocation and jump to the low space algorithm
3556 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3557 args
->minlen
= ap
->minlen
;
3558 ASSERT(args
->fsbno
== NULLFSBLOCK
);
3562 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, blen
);
3564 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3567 if (!error
&& args
->fsbno
== NULLFSBLOCK
)
3568 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3572 * We are now done with the perag reference for the filestreams
3573 * association provided by xfs_filestream_select_ag(). Release it now as
3574 * we've either succeeded, had a fatal error or we are out of space and
3575 * need to do a full filesystem scan for free space which will take it's
3578 xfs_perag_rele(args
->pag
);
3580 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3583 return xfs_bmap_btalloc_low_space(ap
, args
);
3587 xfs_bmap_btalloc_best_length(
3588 struct xfs_bmalloca
*ap
,
3589 struct xfs_alloc_arg
*args
,
3592 xfs_extlen_t blen
= 0;
3595 ap
->blkno
= XFS_INO_TO_FSB(args
->mp
, ap
->ip
->i_ino
);
3596 if (!xfs_bmap_adjacent(ap
))
3600 * Search for an allocation group with a single extent large enough for
3601 * the request. If one isn't found, then adjust the minimum allocation
3602 * size to the largest space found.
3604 error
= xfs_bmap_btalloc_select_lengths(ap
, args
, &blen
);
3609 * Don't attempt optimal EOF allocation if previous allocations barely
3610 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3611 * optimal or even aligned allocations in this case, so don't waste time
3614 if (ap
->aeof
&& !(ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
)) {
3615 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3617 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3621 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3622 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3625 return xfs_bmap_btalloc_low_space(ap
, args
);
3630 struct xfs_bmalloca
*ap
)
3632 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3633 struct xfs_alloc_arg args
= {
3636 .fsbno
= NULLFSBLOCK
,
3637 .oinfo
= XFS_RMAP_OINFO_SKIP_UPDATE
,
3638 .minleft
= ap
->minleft
,
3639 .wasdel
= ap
->wasdel
,
3640 .resv
= XFS_AG_RESV_NONE
,
3641 .datatype
= ap
->datatype
,
3645 xfs_fileoff_t orig_offset
;
3646 xfs_extlen_t orig_length
;
3651 orig_offset
= ap
->offset
;
3652 orig_length
= ap
->length
;
3654 stripe_align
= xfs_bmap_compute_alignments(ap
, &args
);
3656 /* Trim the allocation back to the maximum an AG can fit. */
3657 args
.maxlen
= min(ap
->length
, mp
->m_ag_max_usable
);
3659 if (unlikely(XFS_TEST_ERROR(false, mp
,
3660 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT
)))
3661 error
= xfs_bmap_exact_minlen_extent_alloc(ap
, &args
);
3662 else if ((ap
->datatype
& XFS_ALLOC_USERDATA
) &&
3663 xfs_inode_is_filestream(ap
->ip
))
3664 error
= xfs_bmap_btalloc_filestreams(ap
, &args
, stripe_align
);
3666 error
= xfs_bmap_btalloc_best_length(ap
, &args
, stripe_align
);
3670 if (args
.fsbno
!= NULLFSBLOCK
) {
3671 xfs_bmap_process_allocated_extent(ap
, &args
, orig_offset
,
3674 ap
->blkno
= NULLFSBLOCK
;
3680 /* Trim extent to fit a logical block range. */
3683 struct xfs_bmbt_irec
*irec
,
3687 xfs_fileoff_t distance
;
3688 xfs_fileoff_t end
= bno
+ len
;
3690 if (irec
->br_startoff
+ irec
->br_blockcount
<= bno
||
3691 irec
->br_startoff
>= end
) {
3692 irec
->br_blockcount
= 0;
3696 if (irec
->br_startoff
< bno
) {
3697 distance
= bno
- irec
->br_startoff
;
3698 if (isnullstartblock(irec
->br_startblock
))
3699 irec
->br_startblock
= DELAYSTARTBLOCK
;
3700 if (irec
->br_startblock
!= DELAYSTARTBLOCK
&&
3701 irec
->br_startblock
!= HOLESTARTBLOCK
)
3702 irec
->br_startblock
+= distance
;
3703 irec
->br_startoff
+= distance
;
3704 irec
->br_blockcount
-= distance
;
3707 if (end
< irec
->br_startoff
+ irec
->br_blockcount
) {
3708 distance
= irec
->br_startoff
+ irec
->br_blockcount
- end
;
3709 irec
->br_blockcount
-= distance
;
3714 * Trim the returned map to the required bounds
3718 struct xfs_bmbt_irec
*mval
,
3719 struct xfs_bmbt_irec
*got
,
3727 if ((flags
& XFS_BMAPI_ENTIRE
) ||
3728 got
->br_startoff
+ got
->br_blockcount
<= obno
) {
3730 if (isnullstartblock(got
->br_startblock
))
3731 mval
->br_startblock
= DELAYSTARTBLOCK
;
3737 ASSERT((*bno
>= obno
) || (n
== 0));
3739 mval
->br_startoff
= *bno
;
3740 if (isnullstartblock(got
->br_startblock
))
3741 mval
->br_startblock
= DELAYSTARTBLOCK
;
3743 mval
->br_startblock
= got
->br_startblock
+
3744 (*bno
- got
->br_startoff
);
3746 * Return the minimum of what we got and what we asked for for
3747 * the length. We can use the len variable here because it is
3748 * modified below and we could have been there before coming
3749 * here if the first part of the allocation didn't overlap what
3752 mval
->br_blockcount
= XFS_FILBLKS_MIN(end
- *bno
,
3753 got
->br_blockcount
- (*bno
- got
->br_startoff
));
3754 mval
->br_state
= got
->br_state
;
3755 ASSERT(mval
->br_blockcount
<= len
);
3760 * Update and validate the extent map to return
3763 xfs_bmapi_update_map(
3764 struct xfs_bmbt_irec
**map
,
3772 xfs_bmbt_irec_t
*mval
= *map
;
3774 ASSERT((flags
& XFS_BMAPI_ENTIRE
) ||
3775 ((mval
->br_startoff
+ mval
->br_blockcount
) <= end
));
3776 ASSERT((flags
& XFS_BMAPI_ENTIRE
) || (mval
->br_blockcount
<= *len
) ||
3777 (mval
->br_startoff
< obno
));
3779 *bno
= mval
->br_startoff
+ mval
->br_blockcount
;
3781 if (*n
> 0 && mval
->br_startoff
== mval
[-1].br_startoff
) {
3782 /* update previous map with new information */
3783 ASSERT(mval
->br_startblock
== mval
[-1].br_startblock
);
3784 ASSERT(mval
->br_blockcount
> mval
[-1].br_blockcount
);
3785 ASSERT(mval
->br_state
== mval
[-1].br_state
);
3786 mval
[-1].br_blockcount
= mval
->br_blockcount
;
3787 mval
[-1].br_state
= mval
->br_state
;
3788 } else if (*n
> 0 && mval
->br_startblock
!= DELAYSTARTBLOCK
&&
3789 mval
[-1].br_startblock
!= DELAYSTARTBLOCK
&&
3790 mval
[-1].br_startblock
!= HOLESTARTBLOCK
&&
3791 mval
->br_startblock
== mval
[-1].br_startblock
+
3792 mval
[-1].br_blockcount
&&
3793 mval
[-1].br_state
== mval
->br_state
) {
3794 ASSERT(mval
->br_startoff
==
3795 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
);
3796 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3797 } else if (*n
> 0 &&
3798 mval
->br_startblock
== DELAYSTARTBLOCK
&&
3799 mval
[-1].br_startblock
== DELAYSTARTBLOCK
&&
3800 mval
->br_startoff
==
3801 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
) {
3802 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3803 mval
[-1].br_state
= mval
->br_state
;
3804 } else if (!((*n
== 0) &&
3805 ((mval
->br_startoff
+ mval
->br_blockcount
) <=
3814 * Map file blocks to filesystem blocks without allocation.
3818 struct xfs_inode
*ip
,
3821 struct xfs_bmbt_irec
*mval
,
3825 struct xfs_mount
*mp
= ip
->i_mount
;
3826 int whichfork
= xfs_bmapi_whichfork(flags
);
3827 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
3828 struct xfs_bmbt_irec got
;
3831 struct xfs_iext_cursor icur
;
3837 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_ENTIRE
)));
3838 xfs_assert_ilocked(ip
, XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
);
3840 if (WARN_ON_ONCE(!ifp
)) {
3841 xfs_bmap_mark_sick(ip
, whichfork
);
3842 return -EFSCORRUPTED
;
3845 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
3846 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
3847 xfs_bmap_mark_sick(ip
, whichfork
);
3848 return -EFSCORRUPTED
;
3851 if (xfs_is_shutdown(mp
))
3854 XFS_STATS_INC(mp
, xs_blk_mapr
);
3856 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
3860 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
))
3865 while (bno
< end
&& n
< *nmap
) {
3866 /* Reading past eof, act as though there's a hole up to end. */
3868 got
.br_startoff
= end
;
3869 if (got
.br_startoff
> bno
) {
3870 /* Reading in a hole. */
3871 mval
->br_startoff
= bno
;
3872 mval
->br_startblock
= HOLESTARTBLOCK
;
3873 mval
->br_blockcount
=
3874 XFS_FILBLKS_MIN(len
, got
.br_startoff
- bno
);
3875 mval
->br_state
= XFS_EXT_NORM
;
3876 bno
+= mval
->br_blockcount
;
3877 len
-= mval
->br_blockcount
;
3883 /* set up the extent map to return. */
3884 xfs_bmapi_trim_map(mval
, &got
, &bno
, len
, obno
, end
, n
, flags
);
3885 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
3887 /* If we're done, stop now. */
3888 if (bno
>= end
|| n
>= *nmap
)
3891 /* Else go on to the next record. */
3892 if (!xfs_iext_next_extent(ifp
, &icur
, &got
))
3901 struct xfs_bmalloca
*bma
)
3903 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
3904 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
3905 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
3908 ASSERT(bma
->length
> 0);
3909 ASSERT(bma
->length
<= XFS_MAX_BMBT_EXTLEN
);
3911 if (bma
->flags
& XFS_BMAPI_CONTIG
)
3912 bma
->minlen
= bma
->length
;
3916 if (!(bma
->flags
& XFS_BMAPI_METADATA
)) {
3918 * For the data and COW fork, the first data in the file is
3919 * treated differently to all other allocations. For the
3920 * attribute fork, we only need to ensure the allocated range
3921 * is not on the busy list.
3923 bma
->datatype
= XFS_ALLOC_NOBUSY
;
3924 if (whichfork
== XFS_DATA_FORK
|| whichfork
== XFS_COW_FORK
) {
3925 bma
->datatype
|= XFS_ALLOC_USERDATA
;
3926 if (bma
->offset
== 0)
3927 bma
->datatype
|= XFS_ALLOC_INITIAL_USER_DATA
;
3929 if (mp
->m_dalign
&& bma
->length
>= mp
->m_dalign
) {
3930 error
= xfs_bmap_isaeof(bma
, whichfork
);
3937 if ((bma
->datatype
& XFS_ALLOC_USERDATA
) &&
3938 XFS_IS_REALTIME_INODE(bma
->ip
))
3939 error
= xfs_bmap_rtalloc(bma
);
3941 error
= xfs_bmap_btalloc(bma
);
3944 if (bma
->blkno
== NULLFSBLOCK
)
3947 if (WARN_ON_ONCE(!xfs_valid_startblock(bma
->ip
, bma
->blkno
))) {
3948 xfs_bmap_mark_sick(bma
->ip
, whichfork
);
3949 return -EFSCORRUPTED
;
3952 if (bma
->flags
& XFS_BMAPI_ZERO
) {
3953 error
= xfs_zero_extent(bma
->ip
, bma
->blkno
, bma
->length
);
3958 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
)
3959 bma
->cur
= xfs_bmbt_init_cursor(mp
, bma
->tp
, bma
->ip
, whichfork
);
3961 * Bump the number of extents we've allocated
3966 if (bma
->cur
&& bma
->wasdel
)
3967 bma
->cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
3969 bma
->got
.br_startoff
= bma
->offset
;
3970 bma
->got
.br_startblock
= bma
->blkno
;
3971 bma
->got
.br_blockcount
= bma
->length
;
3972 bma
->got
.br_state
= XFS_EXT_NORM
;
3974 if (bma
->flags
& XFS_BMAPI_PREALLOC
)
3975 bma
->got
.br_state
= XFS_EXT_UNWRITTEN
;
3978 error
= xfs_bmap_add_extent_delay_real(bma
, whichfork
);
3980 error
= xfs_bmap_add_extent_hole_real(bma
->tp
, bma
->ip
,
3981 whichfork
, &bma
->icur
, &bma
->cur
, &bma
->got
,
3982 &bma
->logflags
, bma
->flags
);
3987 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
3988 * or xfs_bmap_add_extent_hole_real might have merged it into one of
3989 * the neighbouring ones.
3991 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
3993 ASSERT(bma
->got
.br_startoff
<= bma
->offset
);
3994 ASSERT(bma
->got
.br_startoff
+ bma
->got
.br_blockcount
>=
3995 bma
->offset
+ bma
->length
);
3996 ASSERT(bma
->got
.br_state
== XFS_EXT_NORM
||
3997 bma
->got
.br_state
== XFS_EXT_UNWRITTEN
);
4002 xfs_bmapi_convert_unwritten(
4003 struct xfs_bmalloca
*bma
,
4004 struct xfs_bmbt_irec
*mval
,
4008 int whichfork
= xfs_bmapi_whichfork(flags
);
4009 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4010 int tmp_logflags
= 0;
4013 /* check if we need to do unwritten->real conversion */
4014 if (mval
->br_state
== XFS_EXT_UNWRITTEN
&&
4015 (flags
& XFS_BMAPI_PREALLOC
))
4018 /* check if we need to do real->unwritten conversion */
4019 if (mval
->br_state
== XFS_EXT_NORM
&&
4020 (flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
)) !=
4021 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
))
4025 * Modify (by adding) the state flag, if writing.
4027 ASSERT(mval
->br_blockcount
<= len
);
4028 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
) {
4029 bma
->cur
= xfs_bmbt_init_cursor(bma
->ip
->i_mount
, bma
->tp
,
4030 bma
->ip
, whichfork
);
4032 mval
->br_state
= (mval
->br_state
== XFS_EXT_UNWRITTEN
)
4033 ? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
4036 * Before insertion into the bmbt, zero the range being converted
4039 if (flags
& XFS_BMAPI_ZERO
) {
4040 error
= xfs_zero_extent(bma
->ip
, mval
->br_startblock
,
4041 mval
->br_blockcount
);
4046 error
= xfs_bmap_add_extent_unwritten_real(bma
->tp
, bma
->ip
, whichfork
,
4047 &bma
->icur
, &bma
->cur
, mval
, &tmp_logflags
);
4049 * Log the inode core unconditionally in the unwritten extent conversion
4050 * path because the conversion might not have done so (e.g., if the
4051 * extent count hasn't changed). We need to make sure the inode is dirty
4052 * in the transaction for the sake of fsync(), even if nothing has
4053 * changed, because fsync() will not force the log for this transaction
4054 * unless it sees the inode pinned.
4056 * Note: If we're only converting cow fork extents, there aren't
4057 * any on-disk updates to make, so we don't need to log anything.
4059 if (whichfork
!= XFS_COW_FORK
)
4060 bma
->logflags
|= tmp_logflags
| XFS_ILOG_CORE
;
4065 * Update our extent pointer, given that
4066 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4067 * of the neighbouring ones.
4069 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
4072 * We may have combined previously unwritten space with written space,
4073 * so generate another request.
4075 if (mval
->br_blockcount
< len
)
4082 struct xfs_trans
*tp
,
4083 struct xfs_inode
*ip
,
4086 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, fork
);
4088 if (tp
&& tp
->t_highest_agno
!= NULLAGNUMBER
)
4090 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4092 return be16_to_cpu(ifp
->if_broot
->bb_level
) + 1;
4096 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4097 * a case where the data is changed, there's an error, and it's not logged so we
4098 * don't shutdown when we should. Don't bother logging extents/btree changes if
4099 * we converted to the other format.
4103 struct xfs_bmalloca
*bma
,
4107 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4109 if ((bma
->logflags
& xfs_ilog_fext(whichfork
)) &&
4110 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
4111 bma
->logflags
&= ~xfs_ilog_fext(whichfork
);
4112 else if ((bma
->logflags
& xfs_ilog_fbroot(whichfork
)) &&
4113 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4114 bma
->logflags
&= ~xfs_ilog_fbroot(whichfork
);
4117 xfs_trans_log_inode(bma
->tp
, bma
->ip
, bma
->logflags
);
4119 xfs_btree_del_cursor(bma
->cur
, error
);
4123 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4124 * extent state if necessary. Details behaviour is controlled by the flags
4125 * parameter. Only allocates blocks from a single allocation group, to avoid
4128 * Returns 0 on success and places the extent mappings in mval. nmaps is used
4129 * as an input/output parameter where the caller specifies the maximum number
4130 * of mappings that may be returned and xfs_bmapi_write passes back the number
4131 * of mappings (including existing mappings) it found.
4133 * Returns a negative error code on failure, including -ENOSPC when it could not
4134 * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4135 * delalloc range, but those blocks were before the passed in range.
4139 struct xfs_trans
*tp
, /* transaction pointer */
4140 struct xfs_inode
*ip
, /* incore inode */
4141 xfs_fileoff_t bno
, /* starting file offs. mapped */
4142 xfs_filblks_t len
, /* length to map in file */
4143 uint32_t flags
, /* XFS_BMAPI_... */
4144 xfs_extlen_t total
, /* total blocks needed */
4145 struct xfs_bmbt_irec
*mval
, /* output: map values */
4146 int *nmap
) /* i/o: mval size/count */
4148 struct xfs_bmalloca bma
= {
4153 struct xfs_mount
*mp
= ip
->i_mount
;
4154 int whichfork
= xfs_bmapi_whichfork(flags
);
4155 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4156 xfs_fileoff_t end
; /* end of mapped file region */
4157 bool eof
= false; /* after the end of extents */
4158 int error
; /* error return */
4159 int n
; /* current extent index */
4160 xfs_fileoff_t obno
; /* old block number (offset) */
4163 xfs_fileoff_t orig_bno
; /* original block number value */
4164 int orig_flags
; /* original flags arg value */
4165 xfs_filblks_t orig_len
; /* original value of len arg */
4166 struct xfs_bmbt_irec
*orig_mval
; /* original value of mval */
4167 int orig_nmap
; /* original value of *nmap */
4177 ASSERT(*nmap
<= XFS_BMAP_MAX_NMAP
);
4180 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_LOCAL
);
4181 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4182 ASSERT(!(flags
& XFS_BMAPI_REMAP
));
4184 /* zeroing is for currently only for data extents, not metadata */
4185 ASSERT((flags
& (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
)) !=
4186 (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
));
4188 * we can allocate unwritten extents or pre-zero allocated blocks,
4189 * but it makes no sense to do both at once. This would result in
4190 * zeroing the unwritten extent twice, but it still being an
4191 * unwritten extent....
4193 ASSERT((flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
)) !=
4194 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
));
4196 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4197 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4198 xfs_bmap_mark_sick(ip
, whichfork
);
4199 return -EFSCORRUPTED
;
4202 if (xfs_is_shutdown(mp
))
4205 XFS_STATS_INC(mp
, xs_blk_mapw
);
4207 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4211 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &bma
.icur
, &bma
.got
))
4213 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4214 bma
.prev
.br_startoff
= NULLFILEOFF
;
4215 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4220 while (bno
< end
&& n
< *nmap
) {
4221 bool need_alloc
= false, wasdelay
= false;
4223 /* in hole or beyond EOF? */
4224 if (eof
|| bma
.got
.br_startoff
> bno
) {
4226 * CoW fork conversions should /never/ hit EOF or
4227 * holes. There should always be something for us
4230 ASSERT(!((flags
& XFS_BMAPI_CONVERT
) &&
4231 (flags
& XFS_BMAPI_COWFORK
)));
4234 } else if (isnullstartblock(bma
.got
.br_startblock
)) {
4239 * First, deal with the hole before the allocated space
4240 * that we found, if any.
4242 if (need_alloc
|| wasdelay
) {
4244 bma
.conv
= !!(flags
& XFS_BMAPI_CONVERT
);
4245 bma
.wasdel
= wasdelay
;
4250 * There's a 32/64 bit type mismatch between the
4251 * allocation length request (which can be 64 bits in
4252 * length) and the bma length request, which is
4253 * xfs_extlen_t and therefore 32 bits. Hence we have to
4254 * be careful and do the min() using the larger type to
4257 bma
.length
= XFS_FILBLKS_MIN(len
, XFS_MAX_BMBT_EXTLEN
);
4260 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4261 bma
.got
.br_blockcount
-
4262 (bno
- bma
.got
.br_startoff
));
4265 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4266 bma
.got
.br_startoff
- bno
);
4269 ASSERT(bma
.length
> 0);
4270 error
= xfs_bmapi_allocate(&bma
);
4273 * If we already allocated space in a previous
4274 * iteration return what we go so far when
4275 * running out of space.
4277 if (error
== -ENOSPC
&& bma
.nallocs
)
4283 * If this is a CoW allocation, record the data in
4284 * the refcount btree for orphan recovery.
4286 if (whichfork
== XFS_COW_FORK
)
4287 xfs_refcount_alloc_cow_extent(tp
,
4288 XFS_IS_REALTIME_INODE(ip
),
4289 bma
.blkno
, bma
.length
);
4292 /* Deal with the allocated space we found. */
4293 xfs_bmapi_trim_map(mval
, &bma
.got
, &bno
, len
, obno
,
4296 /* Execute unwritten extent conversion if necessary */
4297 error
= xfs_bmapi_convert_unwritten(&bma
, mval
, len
, flags
);
4298 if (error
== -EAGAIN
)
4303 /* update the extent map to return */
4304 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4307 * If we're done, stop now. Stop when we've allocated
4308 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4309 * the transaction may get too big.
4311 if (bno
>= end
|| n
>= *nmap
|| bma
.nallocs
>= *nmap
)
4314 /* Else go on to the next record. */
4316 if (!xfs_iext_next_extent(ifp
, &bma
.icur
, &bma
.got
))
4320 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4325 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_BTREE
||
4326 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
));
4327 xfs_bmapi_finish(&bma
, whichfork
, 0);
4328 xfs_bmap_validate_ret(orig_bno
, orig_len
, orig_flags
, orig_mval
,
4332 * When converting delayed allocations, xfs_bmapi_allocate ignores
4333 * the passed in bno and always converts from the start of the found
4336 * To avoid a successful return with *nmap set to 0, return the magic
4337 * -ENOSR error code for this particular case so that the caller can
4341 ASSERT(bma
.nallocs
>= *nmap
);
4347 xfs_bmapi_finish(&bma
, whichfork
, error
);
4352 * Convert an existing delalloc extent to real blocks based on file offset. This
4353 * attempts to allocate the entire delalloc extent and may require multiple
4354 * invocations to allocate the target offset if a large enough physical extent
4358 xfs_bmapi_convert_one_delalloc(
4359 struct xfs_inode
*ip
,
4362 struct iomap
*iomap
,
4365 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4366 struct xfs_mount
*mp
= ip
->i_mount
;
4367 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
4368 struct xfs_bmalloca bma
= { NULL
};
4370 struct xfs_trans
*tp
;
4373 if (whichfork
== XFS_COW_FORK
)
4374 flags
|= IOMAP_F_SHARED
;
4377 * Space for the extent and indirect blocks was reserved when the
4378 * delalloc extent was created so there's no need to do so here.
4380 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0, 0,
4381 XFS_TRANS_RESERVE
, &tp
);
4385 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
4386 xfs_trans_ijoin(tp
, ip
, 0);
4388 error
= xfs_iext_count_extend(tp
, ip
, whichfork
,
4389 XFS_IEXT_ADD_NOSPLIT_CNT
);
4391 goto out_trans_cancel
;
4393 if (!xfs_iext_lookup_extent(ip
, ifp
, offset_fsb
, &bma
.icur
, &bma
.got
) ||
4394 bma
.got
.br_startoff
> offset_fsb
) {
4396 * No extent found in the range we are trying to convert. This
4397 * should only happen for the COW fork, where another thread
4398 * might have moved the extent to the data fork in the meantime.
4400 WARN_ON_ONCE(whichfork
!= XFS_COW_FORK
);
4402 goto out_trans_cancel
;
4406 * If we find a real extent here we raced with another thread converting
4407 * the extent. Just return the real extent at this offset.
4409 if (!isnullstartblock(bma
.got
.br_startblock
)) {
4410 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4411 xfs_iomap_inode_sequence(ip
, flags
));
4413 *seq
= READ_ONCE(ifp
->if_seq
);
4414 goto out_trans_cancel
;
4420 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4423 * Always allocate convert from the start of the delalloc extent even if
4424 * that is outside the passed in range to create large contiguous
4427 bma
.offset
= bma
.got
.br_startoff
;
4428 bma
.length
= bma
.got
.br_blockcount
;
4431 * When we're converting the delalloc reservations backing dirty pages
4432 * in the page cache, we must be careful about how we create the new
4435 * New CoW fork extents are created unwritten, turned into real extents
4436 * when we're about to write the data to disk, and mapped into the data
4437 * fork after the write finishes. End of story.
4439 * New data fork extents must be mapped in as unwritten and converted
4440 * to real extents after the write succeeds to avoid exposing stale
4441 * disk contents if we crash.
4443 bma
.flags
= XFS_BMAPI_PREALLOC
;
4444 if (whichfork
== XFS_COW_FORK
)
4445 bma
.flags
|= XFS_BMAPI_COWFORK
;
4447 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4448 bma
.prev
.br_startoff
= NULLFILEOFF
;
4450 error
= xfs_bmapi_allocate(&bma
);
4454 XFS_STATS_ADD(mp
, xs_xstrat_bytes
, XFS_FSB_TO_B(mp
, bma
.length
));
4455 XFS_STATS_INC(mp
, xs_xstrat_quick
);
4457 ASSERT(!isnullstartblock(bma
.got
.br_startblock
));
4458 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4459 xfs_iomap_inode_sequence(ip
, flags
));
4461 *seq
= READ_ONCE(ifp
->if_seq
);
4463 if (whichfork
== XFS_COW_FORK
)
4464 xfs_refcount_alloc_cow_extent(tp
, XFS_IS_REALTIME_INODE(ip
),
4465 bma
.blkno
, bma
.length
);
4467 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4472 xfs_bmapi_finish(&bma
, whichfork
, 0);
4473 error
= xfs_trans_commit(tp
);
4474 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4478 xfs_bmapi_finish(&bma
, whichfork
, error
);
4480 xfs_trans_cancel(tp
);
4481 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4486 * Pass in a dellalloc extent and convert it to real extents, return the real
4487 * extent that maps offset_fsb in iomap.
4490 xfs_bmapi_convert_delalloc(
4491 struct xfs_inode
*ip
,
4494 struct iomap
*iomap
,
4500 * Attempt to allocate whatever delalloc extent currently backs offset
4501 * and put the result into iomap. Allocate in a loop because it may
4502 * take several attempts to allocate real blocks for a contiguous
4503 * delalloc extent if free space is sufficiently fragmented.
4506 error
= xfs_bmapi_convert_one_delalloc(ip
, whichfork
, offset
,
4510 } while (iomap
->offset
+ iomap
->length
<= offset
);
4517 struct xfs_trans
*tp
,
4518 struct xfs_inode
*ip
,
4521 xfs_fsblock_t startblock
,
4524 struct xfs_mount
*mp
= ip
->i_mount
;
4525 struct xfs_ifork
*ifp
;
4526 struct xfs_btree_cur
*cur
= NULL
;
4527 struct xfs_bmbt_irec got
;
4528 struct xfs_iext_cursor icur
;
4529 int whichfork
= xfs_bmapi_whichfork(flags
);
4530 int logflags
= 0, error
;
4532 ifp
= xfs_ifork_ptr(ip
, whichfork
);
4534 ASSERT(len
<= (xfs_filblks_t
)XFS_MAX_BMBT_EXTLEN
);
4535 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4536 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
|
4537 XFS_BMAPI_NORMAP
)));
4538 ASSERT((flags
& (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
)) !=
4539 (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
));
4541 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4542 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4543 xfs_bmap_mark_sick(ip
, whichfork
);
4544 return -EFSCORRUPTED
;
4547 if (xfs_is_shutdown(mp
))
4550 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4554 if (xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
)) {
4555 /* make sure we only reflink into a hole. */
4556 ASSERT(got
.br_startoff
> bno
);
4557 ASSERT(got
.br_startoff
- bno
>= len
);
4560 ip
->i_nblocks
+= len
;
4561 ip
->i_delayed_blks
-= len
; /* see xfs_bmap_defer_add */
4562 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
4564 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
4565 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
4567 got
.br_startoff
= bno
;
4568 got
.br_startblock
= startblock
;
4569 got
.br_blockcount
= len
;
4570 if (flags
& XFS_BMAPI_PREALLOC
)
4571 got
.br_state
= XFS_EXT_UNWRITTEN
;
4573 got
.br_state
= XFS_EXT_NORM
;
4575 error
= xfs_bmap_add_extent_hole_real(tp
, ip
, whichfork
, &icur
,
4576 &cur
, &got
, &logflags
, flags
);
4580 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
, whichfork
);
4583 if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_EXTENTS
)
4584 logflags
&= ~XFS_ILOG_DEXT
;
4585 else if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
)
4586 logflags
&= ~XFS_ILOG_DBROOT
;
4589 xfs_trans_log_inode(tp
, ip
, logflags
);
4591 xfs_btree_del_cursor(cur
, error
);
4596 * When a delalloc extent is split (e.g., due to a hole punch), the original
4597 * indlen reservation must be shared across the two new extents that are left
4600 * Given the original reservation and the worst case indlen for the two new
4601 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4602 * reservation fairly across the two new extents. If necessary, steal available
4603 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4604 * ores == 1). The number of stolen blocks is returned. The availability and
4605 * subsequent accounting of stolen blocks is the responsibility of the caller.
4608 xfs_bmap_split_indlen(
4609 xfs_filblks_t ores
, /* original res. */
4610 xfs_filblks_t
*indlen1
, /* ext1 worst indlen */
4611 xfs_filblks_t
*indlen2
) /* ext2 worst indlen */
4613 xfs_filblks_t len1
= *indlen1
;
4614 xfs_filblks_t len2
= *indlen2
;
4615 xfs_filblks_t nres
= len1
+ len2
; /* new total res. */
4616 xfs_filblks_t resfactor
;
4619 * We can't meet the total required reservation for the two extents.
4620 * Calculate the percent of the overall shortage between both extents
4621 * and apply this percentage to each of the requested indlen values.
4622 * This distributes the shortage fairly and reduces the chances that one
4623 * of the two extents is left with nothing when extents are repeatedly
4626 resfactor
= (ores
* 100);
4627 do_div(resfactor
, nres
);
4632 ASSERT(len1
+ len2
<= ores
);
4633 ASSERT(len1
< *indlen1
&& len2
< *indlen2
);
4636 * Hand out the remainder to each extent. If one of the two reservations
4637 * is zero, we want to make sure that one gets a block first. The loop
4638 * below starts with len1, so hand len2 a block right off the bat if it
4641 ores
-= (len1
+ len2
);
4642 ASSERT((*indlen1
- len1
) + (*indlen2
- len2
) >= ores
);
4643 if (ores
&& !len2
&& *indlen2
) {
4648 if (len1
< *indlen1
) {
4654 if (len2
< *indlen2
) {
4665 xfs_bmap_del_extent_delay(
4666 struct xfs_inode
*ip
,
4668 struct xfs_iext_cursor
*icur
,
4669 struct xfs_bmbt_irec
*got
,
4670 struct xfs_bmbt_irec
*del
,
4671 uint32_t bflags
) /* bmapi flags */
4673 struct xfs_mount
*mp
= ip
->i_mount
;
4674 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4675 struct xfs_bmbt_irec
new;
4676 int64_t da_old
, da_new
, da_diff
= 0;
4677 xfs_fileoff_t del_endoff
, got_endoff
;
4678 xfs_filblks_t got_indlen
, new_indlen
, stolen
= 0;
4679 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
4683 XFS_STATS_INC(mp
, xs_del_exlist
);
4685 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
4686 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4687 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4688 da_old
= startblockval(got
->br_startblock
);
4691 ASSERT(del
->br_blockcount
> 0);
4692 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4693 ASSERT(got_endoff
>= del_endoff
);
4696 * Update the inode delalloc counter now and wait to update the
4697 * sb counters as we might have to borrow some blocks for the
4698 * indirect block accounting.
4700 xfs_quota_unreserve_blkres(ip
, del
->br_blockcount
);
4701 ip
->i_delayed_blks
-= del
->br_blockcount
;
4703 if (got
->br_startoff
== del
->br_startoff
)
4704 state
|= BMAP_LEFT_FILLING
;
4705 if (got_endoff
== del_endoff
)
4706 state
|= BMAP_RIGHT_FILLING
;
4708 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4709 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4711 * Matches the whole extent. Delete the entry.
4713 xfs_iext_remove(ip
, icur
, state
);
4714 xfs_iext_prev(ifp
, icur
);
4716 case BMAP_LEFT_FILLING
:
4718 * Deleting the first part of the extent.
4720 got
->br_startoff
= del_endoff
;
4721 got
->br_blockcount
-= del
->br_blockcount
;
4722 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4723 got
->br_blockcount
), da_old
);
4724 got
->br_startblock
= nullstartblock((int)da_new
);
4725 xfs_iext_update_extent(ip
, state
, icur
, got
);
4727 case BMAP_RIGHT_FILLING
:
4729 * Deleting the last part of the extent.
4731 got
->br_blockcount
= got
->br_blockcount
- del
->br_blockcount
;
4732 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4733 got
->br_blockcount
), da_old
);
4734 got
->br_startblock
= nullstartblock((int)da_new
);
4735 xfs_iext_update_extent(ip
, state
, icur
, got
);
4739 * Deleting the middle of the extent.
4741 * Distribute the original indlen reservation across the two new
4742 * extents. Steal blocks from the deleted extent if necessary.
4743 * Stealing blocks simply fudges the fdblocks accounting below.
4744 * Warn if either of the new indlen reservations is zero as this
4745 * can lead to delalloc problems.
4747 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4748 got_indlen
= xfs_bmap_worst_indlen(ip
, got
->br_blockcount
);
4750 new.br_blockcount
= got_endoff
- del_endoff
;
4751 new_indlen
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
4753 WARN_ON_ONCE(!got_indlen
|| !new_indlen
);
4755 * Steal as many blocks as we can to try and satisfy the worst
4756 * case indlen for both new extents.
4758 * However, we can't just steal reservations from the data
4759 * blocks if this is an RT inodes as the data and metadata
4760 * blocks come from different pools. We'll have to live with
4761 * under-filled indirect reservation in this case.
4763 da_new
= got_indlen
+ new_indlen
;
4764 if (da_new
> da_old
&& !isrt
) {
4765 stolen
= XFS_FILBLKS_MIN(da_new
- da_old
,
4766 del
->br_blockcount
);
4769 if (da_new
> da_old
)
4770 xfs_bmap_split_indlen(da_old
, &got_indlen
, &new_indlen
);
4771 da_new
= got_indlen
+ new_indlen
;
4773 got
->br_startblock
= nullstartblock((int)got_indlen
);
4775 new.br_startoff
= del_endoff
;
4776 new.br_state
= got
->br_state
;
4777 new.br_startblock
= nullstartblock((int)new_indlen
);
4779 xfs_iext_update_extent(ip
, state
, icur
, got
);
4780 xfs_iext_next(ifp
, icur
);
4781 xfs_iext_insert(ip
, icur
, &new, state
);
4783 del
->br_blockcount
-= stolen
;
4787 ASSERT(da_old
>= da_new
);
4788 da_diff
= da_old
- da_new
;
4791 if (bflags
& XFS_BMAPI_REMAP
) {
4794 xfs_rtbxlen_t rtxlen
;
4796 rtxlen
= xfs_blen_to_rtbxlen(mp
, del
->br_blockcount
);
4797 if (xfs_is_zoned_inode(ip
))
4798 xfs_zoned_add_available(mp
, rtxlen
);
4799 xfs_add_frextents(mp
, rtxlen
);
4801 fdblocks
+= del
->br_blockcount
;
4804 xfs_add_fdblocks(mp
, fdblocks
);
4805 xfs_mod_delalloc(ip
, -(int64_t)del
->br_blockcount
, -da_diff
);
4809 xfs_bmap_del_extent_cow(
4810 struct xfs_inode
*ip
,
4811 struct xfs_iext_cursor
*icur
,
4812 struct xfs_bmbt_irec
*got
,
4813 struct xfs_bmbt_irec
*del
)
4815 struct xfs_mount
*mp
= ip
->i_mount
;
4816 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, XFS_COW_FORK
);
4817 struct xfs_bmbt_irec
new;
4818 xfs_fileoff_t del_endoff
, got_endoff
;
4819 uint32_t state
= BMAP_COWFORK
;
4821 XFS_STATS_INC(mp
, xs_del_exlist
);
4823 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4824 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4826 ASSERT(del
->br_blockcount
> 0);
4827 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4828 ASSERT(got_endoff
>= del_endoff
);
4829 ASSERT(!isnullstartblock(got
->br_startblock
));
4831 if (got
->br_startoff
== del
->br_startoff
)
4832 state
|= BMAP_LEFT_FILLING
;
4833 if (got_endoff
== del_endoff
)
4834 state
|= BMAP_RIGHT_FILLING
;
4836 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4837 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4839 * Matches the whole extent. Delete the entry.
4841 xfs_iext_remove(ip
, icur
, state
);
4842 xfs_iext_prev(ifp
, icur
);
4844 case BMAP_LEFT_FILLING
:
4846 * Deleting the first part of the extent.
4848 got
->br_startoff
= del_endoff
;
4849 got
->br_blockcount
-= del
->br_blockcount
;
4850 got
->br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
4851 xfs_iext_update_extent(ip
, state
, icur
, got
);
4853 case BMAP_RIGHT_FILLING
:
4855 * Deleting the last part of the extent.
4857 got
->br_blockcount
-= del
->br_blockcount
;
4858 xfs_iext_update_extent(ip
, state
, icur
, got
);
4862 * Deleting the middle of the extent.
4864 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4866 new.br_startoff
= del_endoff
;
4867 new.br_blockcount
= got_endoff
- del_endoff
;
4868 new.br_state
= got
->br_state
;
4869 new.br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
4871 xfs_iext_update_extent(ip
, state
, icur
, got
);
4872 xfs_iext_next(ifp
, icur
);
4873 xfs_iext_insert(ip
, icur
, &new, state
);
4876 ip
->i_delayed_blks
-= del
->br_blockcount
;
4880 xfs_bmap_free_rtblocks(
4881 struct xfs_trans
*tp
,
4882 struct xfs_bmbt_irec
*del
)
4884 struct xfs_rtgroup
*rtg
;
4887 rtg
= xfs_rtgroup_grab(tp
->t_mountp
, 0);
4892 * Ensure the bitmap and summary inodes are locked and joined to the
4893 * transaction before modifying them.
4895 if (!(tp
->t_flags
& XFS_TRANS_RTBITMAP_LOCKED
)) {
4896 tp
->t_flags
|= XFS_TRANS_RTBITMAP_LOCKED
;
4897 xfs_rtgroup_lock(rtg
, XFS_RTGLOCK_BITMAP
);
4898 xfs_rtgroup_trans_join(tp
, rtg
, XFS_RTGLOCK_BITMAP
);
4901 error
= xfs_rtfree_blocks(tp
, rtg
, del
->br_startblock
,
4902 del
->br_blockcount
);
4903 xfs_rtgroup_rele(rtg
);
4908 * Called by xfs_bmapi to update file extent records and the btree
4909 * after removing space.
4911 STATIC
int /* error */
4912 xfs_bmap_del_extent_real(
4913 xfs_inode_t
*ip
, /* incore inode pointer */
4914 xfs_trans_t
*tp
, /* current transaction pointer */
4915 struct xfs_iext_cursor
*icur
,
4916 struct xfs_btree_cur
*cur
, /* if null, not a btree */
4917 xfs_bmbt_irec_t
*del
, /* data to remove from extents */
4918 int *logflagsp
, /* inode logging flags */
4919 int whichfork
, /* data or attr fork */
4920 uint32_t bflags
) /* bmapi flags */
4922 xfs_fsblock_t del_endblock
=0; /* first block past del */
4923 xfs_fileoff_t del_endoff
; /* first offset past del */
4924 int error
= 0; /* error return value */
4925 struct xfs_bmbt_irec got
; /* current extent entry */
4926 xfs_fileoff_t got_endoff
; /* first offset past got */
4927 int i
; /* temp state */
4928 struct xfs_ifork
*ifp
; /* inode fork pointer */
4929 xfs_mount_t
*mp
; /* mount structure */
4930 xfs_filblks_t nblks
; /* quota/sb block count */
4931 xfs_bmbt_irec_t
new; /* new record to be inserted */
4933 uint qfield
; /* quota field to update */
4934 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
4935 struct xfs_bmbt_irec old
;
4940 XFS_STATS_INC(mp
, xs_del_exlist
);
4942 ifp
= xfs_ifork_ptr(ip
, whichfork
);
4943 ASSERT(del
->br_blockcount
> 0);
4944 xfs_iext_get_extent(ifp
, icur
, &got
);
4945 ASSERT(got
.br_startoff
<= del
->br_startoff
);
4946 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4947 got_endoff
= got
.br_startoff
+ got
.br_blockcount
;
4948 ASSERT(got_endoff
>= del_endoff
);
4949 ASSERT(!isnullstartblock(got
.br_startblock
));
4953 * If it's the case where the directory code is running with no block
4954 * reservation, and the deleted block is in the middle of its extent,
4955 * and the resulting insert of an extent would cause transformation to
4956 * btree format, then reject it. The calling code will then swap blocks
4957 * around instead. We have to do this now, rather than waiting for the
4958 * conversion to btree format, since the transaction will be dirty then.
4960 if (tp
->t_blk_res
== 0 &&
4961 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
4962 ifp
->if_nextents
>= XFS_IFORK_MAXEXT(ip
, whichfork
) &&
4963 del
->br_startoff
> got
.br_startoff
&& del_endoff
< got_endoff
)
4966 *logflagsp
= XFS_ILOG_CORE
;
4967 if (xfs_ifork_is_realtime(ip
, whichfork
))
4968 qfield
= XFS_TRANS_DQ_RTBCOUNT
;
4970 qfield
= XFS_TRANS_DQ_BCOUNT
;
4971 nblks
= del
->br_blockcount
;
4973 del_endblock
= del
->br_startblock
+ del
->br_blockcount
;
4975 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
4978 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
4979 xfs_btree_mark_sick(cur
);
4980 return -EFSCORRUPTED
;
4984 if (got
.br_startoff
== del
->br_startoff
)
4985 state
|= BMAP_LEFT_FILLING
;
4986 if (got_endoff
== del_endoff
)
4987 state
|= BMAP_RIGHT_FILLING
;
4989 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4990 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4992 * Matches the whole extent. Delete the entry.
4994 xfs_iext_remove(ip
, icur
, state
);
4995 xfs_iext_prev(ifp
, icur
);
4998 *logflagsp
|= XFS_ILOG_CORE
;
5000 *logflagsp
|= xfs_ilog_fext(whichfork
);
5003 if ((error
= xfs_btree_delete(cur
, &i
)))
5005 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5006 xfs_btree_mark_sick(cur
);
5007 return -EFSCORRUPTED
;
5010 case BMAP_LEFT_FILLING
:
5012 * Deleting the first part of the extent.
5014 got
.br_startoff
= del_endoff
;
5015 got
.br_startblock
= del_endblock
;
5016 got
.br_blockcount
-= del
->br_blockcount
;
5017 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5019 *logflagsp
|= xfs_ilog_fext(whichfork
);
5022 error
= xfs_bmbt_update(cur
, &got
);
5026 case BMAP_RIGHT_FILLING
:
5028 * Deleting the last part of the extent.
5030 got
.br_blockcount
-= del
->br_blockcount
;
5031 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5033 *logflagsp
|= xfs_ilog_fext(whichfork
);
5036 error
= xfs_bmbt_update(cur
, &got
);
5042 * Deleting the middle of the extent.
5047 got
.br_blockcount
= del
->br_startoff
- got
.br_startoff
;
5048 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5050 new.br_startoff
= del_endoff
;
5051 new.br_blockcount
= got_endoff
- del_endoff
;
5052 new.br_state
= got
.br_state
;
5053 new.br_startblock
= del_endblock
;
5055 *logflagsp
|= XFS_ILOG_CORE
;
5057 error
= xfs_bmbt_update(cur
, &got
);
5060 error
= xfs_btree_increment(cur
, 0, &i
);
5063 cur
->bc_rec
.b
= new;
5064 error
= xfs_btree_insert(cur
, &i
);
5065 if (error
&& error
!= -ENOSPC
)
5068 * If get no-space back from btree insert, it tried a
5069 * split, and we have a zero block reservation. Fix up
5070 * our state and return the error.
5072 if (error
== -ENOSPC
) {
5074 * Reset the cursor, don't trust it after any
5077 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5080 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5081 xfs_btree_mark_sick(cur
);
5082 return -EFSCORRUPTED
;
5085 * Update the btree record back
5086 * to the original value.
5088 error
= xfs_bmbt_update(cur
, &old
);
5092 * Reset the extent record back
5093 * to the original value.
5095 xfs_iext_update_extent(ip
, state
, icur
, &old
);
5099 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5100 xfs_btree_mark_sick(cur
);
5101 return -EFSCORRUPTED
;
5104 *logflagsp
|= xfs_ilog_fext(whichfork
);
5107 xfs_iext_next(ifp
, icur
);
5108 xfs_iext_insert(ip
, icur
, &new, state
);
5112 /* remove reverse mapping */
5113 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, del
);
5116 * If we need to, add to list of extents to delete.
5118 if (!(bflags
& XFS_BMAPI_REMAP
)) {
5119 bool isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
5121 if (xfs_is_reflink_inode(ip
) && whichfork
== XFS_DATA_FORK
) {
5122 xfs_refcount_decrease_extent(tp
, isrt
, del
);
5123 } else if (isrt
&& !xfs_has_rtgroups(mp
)) {
5124 error
= xfs_bmap_free_rtblocks(tp
, del
);
5126 unsigned int efi_flags
= 0;
5128 if ((bflags
& XFS_BMAPI_NODISCARD
) ||
5129 del
->br_state
== XFS_EXT_UNWRITTEN
)
5130 efi_flags
|= XFS_FREE_EXTENT_SKIP_DISCARD
;
5133 * Historically, we did not use EFIs to free realtime
5134 * extents. However, when reverse mapping is enabled,
5135 * we must maintain the same order of operations as the
5136 * data device, which is: Remove the file mapping,
5137 * remove the reverse mapping, and then free the
5138 * blocks. Reflink for realtime volumes requires the
5139 * same sort of ordering. Both features rely on
5140 * rtgroups, so let's gate rt EFI usage on rtgroups.
5143 efi_flags
|= XFS_FREE_EXTENT_REALTIME
;
5145 error
= xfs_free_extent_later(tp
, del
->br_startblock
,
5146 del
->br_blockcount
, NULL
,
5147 XFS_AG_RESV_NONE
, efi_flags
);
5154 * Adjust inode # blocks in the file.
5157 ip
->i_nblocks
-= nblks
;
5159 * Adjust quota data.
5161 if (qfield
&& !(bflags
& XFS_BMAPI_REMAP
))
5162 xfs_trans_mod_dquot_byino(tp
, ip
, qfield
, (long)-nblks
);
5168 * Unmap (remove) blocks from a file.
5169 * If nexts is nonzero then the number of extents to remove is limited to
5170 * that value. If not all extents in the block range can be removed then
5175 struct xfs_trans
*tp
, /* transaction pointer */
5176 struct xfs_inode
*ip
, /* incore inode */
5177 xfs_fileoff_t start
, /* first file offset deleted */
5178 xfs_filblks_t
*rlen
, /* i/o: amount remaining */
5179 uint32_t flags
, /* misc flags */
5180 xfs_extnum_t nexts
) /* number of extents max */
5182 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
5183 struct xfs_bmbt_irec del
; /* extent being deleted */
5184 int error
; /* error return value */
5185 xfs_extnum_t extno
; /* extent number in list */
5186 struct xfs_bmbt_irec got
; /* current extent record */
5187 struct xfs_ifork
*ifp
; /* inode fork pointer */
5188 int isrt
; /* freeing in rt area */
5189 int logflags
; /* transaction logging flags */
5190 xfs_extlen_t mod
; /* rt extent offset */
5191 struct xfs_mount
*mp
= ip
->i_mount
;
5192 int tmp_logflags
; /* partial logging flags */
5193 int wasdel
; /* was a delayed alloc extent */
5194 int whichfork
; /* data or attribute fork */
5195 xfs_filblks_t len
= *rlen
; /* length to unmap in file */
5197 struct xfs_iext_cursor icur
;
5200 trace_xfs_bunmap(ip
, start
, len
, flags
, _RET_IP_
);
5202 whichfork
= xfs_bmapi_whichfork(flags
);
5203 ASSERT(whichfork
!= XFS_COW_FORK
);
5204 ifp
= xfs_ifork_ptr(ip
, whichfork
);
5205 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
))) {
5206 xfs_bmap_mark_sick(ip
, whichfork
);
5207 return -EFSCORRUPTED
;
5209 if (xfs_is_shutdown(mp
))
5212 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
5216 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5220 if (xfs_iext_count(ifp
) == 0) {
5224 XFS_STATS_INC(mp
, xs_blk_unmap
);
5225 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
5228 if (!xfs_iext_lookup_extent_before(ip
, ifp
, &end
, &icur
, &got
)) {
5235 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
5236 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
5237 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5242 while (end
!= (xfs_fileoff_t
)-1 && end
>= start
&&
5243 (nexts
== 0 || extno
< nexts
)) {
5245 * Is the found extent after a hole in which end lives?
5246 * Just back up to the previous extent, if so.
5248 if (got
.br_startoff
> end
&&
5249 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5254 * Is the last block of this extent before the range
5255 * we're supposed to delete? If so, we're done.
5257 end
= XFS_FILEOFF_MIN(end
,
5258 got
.br_startoff
+ got
.br_blockcount
- 1);
5262 * Then deal with the (possibly delayed) allocated space
5266 wasdel
= isnullstartblock(del
.br_startblock
);
5268 if (got
.br_startoff
< start
) {
5269 del
.br_startoff
= start
;
5270 del
.br_blockcount
-= start
- got
.br_startoff
;
5272 del
.br_startblock
+= start
- got
.br_startoff
;
5274 if (del
.br_startoff
+ del
.br_blockcount
> end
+ 1)
5275 del
.br_blockcount
= end
+ 1 - del
.br_startoff
;
5277 if (!isrt
|| (flags
& XFS_BMAPI_REMAP
))
5280 mod
= xfs_rtb_to_rtxoff(mp
,
5281 del
.br_startblock
+ del
.br_blockcount
);
5284 * Realtime extent not lined up at the end.
5285 * The extent could have been split into written
5286 * and unwritten pieces, or we could just be
5287 * unmapping part of it. But we can't really
5288 * get rid of part of a realtime extent.
5290 if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5292 * This piece is unwritten, or we're not
5293 * using unwritten extents. Skip over it.
5295 ASSERT((flags
& XFS_BMAPI_REMAP
) || end
>= mod
);
5296 end
-= mod
> del
.br_blockcount
?
5297 del
.br_blockcount
: mod
;
5298 if (end
< got
.br_startoff
&&
5299 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5306 * It's written, turn it unwritten.
5307 * This is better than zeroing it.
5309 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5310 ASSERT(tp
->t_blk_res
> 0);
5312 * If this spans a realtime extent boundary,
5313 * chop it back to the start of the one we end at.
5315 if (del
.br_blockcount
> mod
) {
5316 del
.br_startoff
+= del
.br_blockcount
- mod
;
5317 del
.br_startblock
+= del
.br_blockcount
- mod
;
5318 del
.br_blockcount
= mod
;
5320 del
.br_state
= XFS_EXT_UNWRITTEN
;
5321 error
= xfs_bmap_add_extent_unwritten_real(tp
, ip
,
5322 whichfork
, &icur
, &cur
, &del
,
5329 mod
= xfs_rtb_to_rtxoff(mp
, del
.br_startblock
);
5331 xfs_extlen_t off
= mp
->m_sb
.sb_rextsize
- mod
;
5334 * Realtime extent is lined up at the end but not
5335 * at the front. We'll get rid of full extents if
5338 if (del
.br_blockcount
> off
) {
5339 del
.br_blockcount
-= off
;
5340 del
.br_startoff
+= off
;
5341 del
.br_startblock
+= off
;
5342 } else if (del
.br_startoff
== start
&&
5343 (del
.br_state
== XFS_EXT_UNWRITTEN
||
5344 tp
->t_blk_res
== 0)) {
5346 * Can't make it unwritten. There isn't
5347 * a full extent here so just skip it.
5349 ASSERT(end
>= del
.br_blockcount
);
5350 end
-= del
.br_blockcount
;
5351 if (got
.br_startoff
> end
&&
5352 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5357 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5358 struct xfs_bmbt_irec prev
;
5359 xfs_fileoff_t unwrite_start
;
5362 * This one is already unwritten.
5363 * It must have a written left neighbor.
5364 * Unwrite the killed part of that one and
5367 if (!xfs_iext_prev_extent(ifp
, &icur
, &prev
))
5369 ASSERT(prev
.br_state
== XFS_EXT_NORM
);
5370 ASSERT(!isnullstartblock(prev
.br_startblock
));
5371 ASSERT(del
.br_startblock
==
5372 prev
.br_startblock
+ prev
.br_blockcount
);
5373 unwrite_start
= max3(start
,
5374 del
.br_startoff
- mod
,
5376 mod
= unwrite_start
- prev
.br_startoff
;
5377 prev
.br_startoff
= unwrite_start
;
5378 prev
.br_startblock
+= mod
;
5379 prev
.br_blockcount
-= mod
;
5380 prev
.br_state
= XFS_EXT_UNWRITTEN
;
5381 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5382 ip
, whichfork
, &icur
, &cur
,
5388 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5389 del
.br_state
= XFS_EXT_UNWRITTEN
;
5390 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5391 ip
, whichfork
, &icur
, &cur
,
5401 xfs_bmap_del_extent_delay(ip
, whichfork
, &icur
, &got
,
5404 error
= xfs_bmap_del_extent_real(ip
, tp
, &icur
, cur
,
5405 &del
, &tmp_logflags
, whichfork
,
5407 logflags
|= tmp_logflags
;
5412 end
= del
.br_startoff
- 1;
5415 * If not done go on to the next (previous) record.
5417 if (end
!= (xfs_fileoff_t
)-1 && end
>= start
) {
5418 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
5419 (got
.br_startoff
> end
&&
5420 !xfs_iext_prev_extent(ifp
, &icur
, &got
))) {
5427 if (done
|| end
== (xfs_fileoff_t
)-1 || end
< start
)
5430 *rlen
= end
- start
+ 1;
5433 * Convert to a btree if necessary.
5435 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5436 ASSERT(cur
== NULL
);
5437 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
5438 &tmp_logflags
, whichfork
);
5439 logflags
|= tmp_logflags
;
5441 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
,
5447 * Log everything. Do this after conversion, there's no point in
5448 * logging the extent records if we've converted to btree format.
5450 if ((logflags
& xfs_ilog_fext(whichfork
)) &&
5451 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
5452 logflags
&= ~xfs_ilog_fext(whichfork
);
5453 else if ((logflags
& xfs_ilog_fbroot(whichfork
)) &&
5454 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
5455 logflags
&= ~xfs_ilog_fbroot(whichfork
);
5457 * Log inode even in the error case, if the transaction
5458 * is dirty we'll need to shut down the filesystem.
5461 xfs_trans_log_inode(tp
, ip
, logflags
);
5464 cur
->bc_bmap
.allocated
= 0;
5465 xfs_btree_del_cursor(cur
, error
);
5470 /* Unmap a range of a file. */
5474 struct xfs_inode
*ip
,
5483 error
= __xfs_bunmapi(tp
, ip
, bno
, &len
, flags
, nexts
);
5489 * Determine whether an extent shift can be accomplished by a merge with the
5490 * extent that precedes the target hole of the shift.
5494 struct xfs_inode
*ip
,
5496 struct xfs_bmbt_irec
*left
, /* preceding extent */
5497 struct xfs_bmbt_irec
*got
, /* current extent to shift */
5498 xfs_fileoff_t shift
) /* shift fsb */
5500 xfs_fileoff_t startoff
;
5502 startoff
= got
->br_startoff
- shift
;
5505 * The extent, once shifted, must be adjacent in-file and on-disk with
5506 * the preceding extent.
5508 if ((left
->br_startoff
+ left
->br_blockcount
!= startoff
) ||
5509 (left
->br_startblock
+ left
->br_blockcount
!= got
->br_startblock
) ||
5510 (left
->br_state
!= got
->br_state
) ||
5511 (left
->br_blockcount
+ got
->br_blockcount
> XFS_MAX_BMBT_EXTLEN
) ||
5512 !xfs_bmap_same_rtgroup(ip
, whichfork
, left
, got
))
5519 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5520 * hole in the file. If an extent shift would result in the extent being fully
5521 * adjacent to the extent that currently precedes the hole, we can merge with
5522 * the preceding extent rather than do the shift.
5524 * This function assumes the caller has verified a shift-by-merge is possible
5525 * with the provided extents via xfs_bmse_can_merge().
5529 struct xfs_trans
*tp
,
5530 struct xfs_inode
*ip
,
5532 xfs_fileoff_t shift
, /* shift fsb */
5533 struct xfs_iext_cursor
*icur
,
5534 struct xfs_bmbt_irec
*got
, /* extent to shift */
5535 struct xfs_bmbt_irec
*left
, /* preceding extent */
5536 struct xfs_btree_cur
*cur
,
5537 int *logflags
) /* output */
5539 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5540 struct xfs_bmbt_irec
new;
5541 xfs_filblks_t blockcount
;
5543 struct xfs_mount
*mp
= ip
->i_mount
;
5545 blockcount
= left
->br_blockcount
+ got
->br_blockcount
;
5547 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5548 ASSERT(xfs_bmse_can_merge(ip
, whichfork
, left
, got
, shift
));
5551 new.br_blockcount
= blockcount
;
5554 * Update the on-disk extent count, the btree if necessary and log the
5558 *logflags
|= XFS_ILOG_CORE
;
5560 *logflags
|= XFS_ILOG_DEXT
;
5564 /* lookup and remove the extent to merge */
5565 error
= xfs_bmbt_lookup_eq(cur
, got
, &i
);
5568 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5569 xfs_btree_mark_sick(cur
);
5570 return -EFSCORRUPTED
;
5573 error
= xfs_btree_delete(cur
, &i
);
5576 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5577 xfs_btree_mark_sick(cur
);
5578 return -EFSCORRUPTED
;
5581 /* lookup and update size of the previous extent */
5582 error
= xfs_bmbt_lookup_eq(cur
, left
, &i
);
5585 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5586 xfs_btree_mark_sick(cur
);
5587 return -EFSCORRUPTED
;
5590 error
= xfs_bmbt_update(cur
, &new);
5594 /* change to extent format if required after extent removal */
5595 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, logflags
, whichfork
);
5600 xfs_iext_remove(ip
, icur
, 0);
5601 xfs_iext_prev(ifp
, icur
);
5602 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5605 /* update reverse mapping. rmap functions merge the rmaps for us */
5606 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, got
);
5607 memcpy(&new, got
, sizeof(new));
5608 new.br_startoff
= left
->br_startoff
+ left
->br_blockcount
;
5609 xfs_rmap_map_extent(tp
, ip
, whichfork
, &new);
5614 xfs_bmap_shift_update_extent(
5615 struct xfs_trans
*tp
,
5616 struct xfs_inode
*ip
,
5618 struct xfs_iext_cursor
*icur
,
5619 struct xfs_bmbt_irec
*got
,
5620 struct xfs_btree_cur
*cur
,
5622 xfs_fileoff_t startoff
)
5624 struct xfs_mount
*mp
= ip
->i_mount
;
5625 struct xfs_bmbt_irec prev
= *got
;
5628 *logflags
|= XFS_ILOG_CORE
;
5630 got
->br_startoff
= startoff
;
5633 error
= xfs_bmbt_lookup_eq(cur
, &prev
, &i
);
5636 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5637 xfs_btree_mark_sick(cur
);
5638 return -EFSCORRUPTED
;
5641 error
= xfs_bmbt_update(cur
, got
);
5645 *logflags
|= XFS_ILOG_DEXT
;
5648 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5651 /* update reverse mapping */
5652 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, &prev
);
5653 xfs_rmap_map_extent(tp
, ip
, whichfork
, got
);
5658 xfs_bmap_collapse_extents(
5659 struct xfs_trans
*tp
,
5660 struct xfs_inode
*ip
,
5661 xfs_fileoff_t
*next_fsb
,
5662 xfs_fileoff_t offset_shift_fsb
,
5665 int whichfork
= XFS_DATA_FORK
;
5666 struct xfs_mount
*mp
= ip
->i_mount
;
5667 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5668 struct xfs_btree_cur
*cur
= NULL
;
5669 struct xfs_bmbt_irec got
, prev
;
5670 struct xfs_iext_cursor icur
;
5671 xfs_fileoff_t new_startoff
;
5675 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5676 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5677 xfs_bmap_mark_sick(ip
, whichfork
);
5678 return -EFSCORRUPTED
;
5681 if (xfs_is_shutdown(mp
))
5684 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5686 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5690 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
5691 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5693 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
5697 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
5698 xfs_bmap_mark_sick(ip
, whichfork
);
5699 error
= -EFSCORRUPTED
;
5703 new_startoff
= got
.br_startoff
- offset_shift_fsb
;
5704 if (xfs_iext_peek_prev_extent(ifp
, &icur
, &prev
)) {
5705 if (new_startoff
< prev
.br_startoff
+ prev
.br_blockcount
) {
5710 if (xfs_bmse_can_merge(ip
, whichfork
, &prev
, &got
,
5711 offset_shift_fsb
)) {
5712 error
= xfs_bmse_merge(tp
, ip
, whichfork
,
5713 offset_shift_fsb
, &icur
, &got
, &prev
,
5720 if (got
.br_startoff
< offset_shift_fsb
) {
5726 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
5727 cur
, &logflags
, new_startoff
);
5732 if (!xfs_iext_next_extent(ifp
, &icur
, &got
)) {
5737 *next_fsb
= got
.br_startoff
;
5740 xfs_btree_del_cursor(cur
, error
);
5742 xfs_trans_log_inode(tp
, ip
, logflags
);
5746 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5748 xfs_bmap_can_insert_extents(
5749 struct xfs_inode
*ip
,
5751 xfs_fileoff_t shift
)
5753 struct xfs_bmbt_irec got
;
5757 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
);
5759 if (xfs_is_shutdown(ip
->i_mount
))
5762 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
5763 error
= xfs_bmap_last_extent(NULL
, ip
, XFS_DATA_FORK
, &got
, &is_empty
);
5764 if (!error
&& !is_empty
&& got
.br_startoff
>= off
&&
5765 ((got
.br_startoff
+ shift
) & BMBT_STARTOFF_MASK
) < got
.br_startoff
)
5767 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
5773 xfs_bmap_insert_extents(
5774 struct xfs_trans
*tp
,
5775 struct xfs_inode
*ip
,
5776 xfs_fileoff_t
*next_fsb
,
5777 xfs_fileoff_t offset_shift_fsb
,
5779 xfs_fileoff_t stop_fsb
)
5781 int whichfork
= XFS_DATA_FORK
;
5782 struct xfs_mount
*mp
= ip
->i_mount
;
5783 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5784 struct xfs_btree_cur
*cur
= NULL
;
5785 struct xfs_bmbt_irec got
, next
;
5786 struct xfs_iext_cursor icur
;
5787 xfs_fileoff_t new_startoff
;
5791 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5792 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5793 xfs_bmap_mark_sick(ip
, whichfork
);
5794 return -EFSCORRUPTED
;
5797 if (xfs_is_shutdown(mp
))
5800 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5802 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5806 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
5807 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5809 if (*next_fsb
== NULLFSBLOCK
) {
5810 xfs_iext_last(ifp
, &icur
);
5811 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
5812 stop_fsb
> got
.br_startoff
) {
5817 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
5822 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
5823 xfs_bmap_mark_sick(ip
, whichfork
);
5824 error
= -EFSCORRUPTED
;
5828 if (XFS_IS_CORRUPT(mp
, stop_fsb
> got
.br_startoff
)) {
5829 xfs_bmap_mark_sick(ip
, whichfork
);
5830 error
= -EFSCORRUPTED
;
5834 new_startoff
= got
.br_startoff
+ offset_shift_fsb
;
5835 if (xfs_iext_peek_next_extent(ifp
, &icur
, &next
)) {
5836 if (new_startoff
+ got
.br_blockcount
> next
.br_startoff
) {
5842 * Unlike a left shift (which involves a hole punch), a right
5843 * shift does not modify extent neighbors in any way. We should
5844 * never find mergeable extents in this scenario. Check anyways
5845 * and warn if we encounter two extents that could be one.
5847 if (xfs_bmse_can_merge(ip
, whichfork
, &got
, &next
,
5852 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
5853 cur
, &logflags
, new_startoff
);
5857 if (!xfs_iext_prev_extent(ifp
, &icur
, &got
) ||
5858 stop_fsb
>= got
.br_startoff
+ got
.br_blockcount
) {
5863 *next_fsb
= got
.br_startoff
;
5866 xfs_btree_del_cursor(cur
, error
);
5868 xfs_trans_log_inode(tp
, ip
, logflags
);
5873 * Splits an extent into two extents at split_fsb block such that it is the
5874 * first block of the current_ext. @ext is a target extent to be split.
5875 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5876 * hole or the first block of extents, just return 0.
5879 xfs_bmap_split_extent(
5880 struct xfs_trans
*tp
,
5881 struct xfs_inode
*ip
,
5882 xfs_fileoff_t split_fsb
)
5884 int whichfork
= XFS_DATA_FORK
;
5885 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5886 struct xfs_btree_cur
*cur
= NULL
;
5887 struct xfs_bmbt_irec got
;
5888 struct xfs_bmbt_irec
new; /* split extent */
5889 struct xfs_mount
*mp
= ip
->i_mount
;
5890 xfs_fsblock_t gotblkcnt
; /* new block count for got */
5891 struct xfs_iext_cursor icur
;
5896 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5897 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5898 xfs_bmap_mark_sick(ip
, whichfork
);
5899 return -EFSCORRUPTED
;
5902 if (xfs_is_shutdown(mp
))
5905 /* Read in all the extents */
5906 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5911 * If there are not extents, or split_fsb lies in a hole we are done.
5913 if (!xfs_iext_lookup_extent(ip
, ifp
, split_fsb
, &icur
, &got
) ||
5914 got
.br_startoff
>= split_fsb
)
5917 gotblkcnt
= split_fsb
- got
.br_startoff
;
5918 new.br_startoff
= split_fsb
;
5919 new.br_startblock
= got
.br_startblock
+ gotblkcnt
;
5920 new.br_blockcount
= got
.br_blockcount
- gotblkcnt
;
5921 new.br_state
= got
.br_state
;
5923 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
5924 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5925 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5928 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5929 xfs_btree_mark_sick(cur
);
5930 error
= -EFSCORRUPTED
;
5935 got
.br_blockcount
= gotblkcnt
;
5936 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), &icur
,
5939 logflags
= XFS_ILOG_CORE
;
5941 error
= xfs_bmbt_update(cur
, &got
);
5945 logflags
|= XFS_ILOG_DEXT
;
5947 /* Add new extent */
5948 xfs_iext_next(ifp
, &icur
);
5949 xfs_iext_insert(ip
, &icur
, &new, 0);
5953 error
= xfs_bmbt_lookup_eq(cur
, &new, &i
);
5956 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
5957 xfs_btree_mark_sick(cur
);
5958 error
= -EFSCORRUPTED
;
5961 error
= xfs_btree_insert(cur
, &i
);
5964 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5965 xfs_btree_mark_sick(cur
);
5966 error
= -EFSCORRUPTED
;
5972 * Convert to a btree if necessary.
5974 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5975 int tmp_logflags
; /* partial log flag return val */
5977 ASSERT(cur
== NULL
);
5978 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
5979 &tmp_logflags
, whichfork
);
5980 logflags
|= tmp_logflags
;
5985 cur
->bc_bmap
.allocated
= 0;
5986 xfs_btree_del_cursor(cur
, error
);
5990 xfs_trans_log_inode(tp
, ip
, logflags
);
5994 /* Record a bmap intent. */
5997 struct xfs_trans
*tp
,
5998 enum xfs_bmap_intent_type type
,
5999 struct xfs_inode
*ip
,
6001 struct xfs_bmbt_irec
*bmap
)
6003 struct xfs_bmap_intent
*bi
;
6005 if ((whichfork
!= XFS_DATA_FORK
&& whichfork
!= XFS_ATTR_FORK
) ||
6006 bmap
->br_startblock
== HOLESTARTBLOCK
||
6007 bmap
->br_startblock
== DELAYSTARTBLOCK
)
6010 bi
= kmem_cache_alloc(xfs_bmap_intent_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
6011 INIT_LIST_HEAD(&bi
->bi_list
);
6014 bi
->bi_whichfork
= whichfork
;
6015 bi
->bi_bmap
= *bmap
;
6017 xfs_bmap_defer_add(tp
, bi
);
6020 /* Map an extent into a file. */
6022 xfs_bmap_map_extent(
6023 struct xfs_trans
*tp
,
6024 struct xfs_inode
*ip
,
6026 struct xfs_bmbt_irec
*PREV
)
6028 __xfs_bmap_add(tp
, XFS_BMAP_MAP
, ip
, whichfork
, PREV
);
6031 /* Unmap an extent out of a file. */
6033 xfs_bmap_unmap_extent(
6034 struct xfs_trans
*tp
,
6035 struct xfs_inode
*ip
,
6037 struct xfs_bmbt_irec
*PREV
)
6039 __xfs_bmap_add(tp
, XFS_BMAP_UNMAP
, ip
, whichfork
, PREV
);
6043 * Process one of the deferred bmap operations. We pass back the
6044 * btree cursor to maintain our lock on the bmapbt between calls.
6047 xfs_bmap_finish_one(
6048 struct xfs_trans
*tp
,
6049 struct xfs_bmap_intent
*bi
)
6051 struct xfs_bmbt_irec
*bmap
= &bi
->bi_bmap
;
6055 if (bi
->bi_whichfork
== XFS_ATTR_FORK
)
6056 flags
|= XFS_BMAPI_ATTRFORK
;
6058 ASSERT(tp
->t_highest_agno
== NULLAGNUMBER
);
6060 trace_xfs_bmap_deferred(bi
);
6062 if (XFS_TEST_ERROR(false, tp
->t_mountp
, XFS_ERRTAG_BMAP_FINISH_ONE
))
6065 switch (bi
->bi_type
) {
6067 if (bi
->bi_bmap
.br_state
== XFS_EXT_UNWRITTEN
)
6068 flags
|= XFS_BMAPI_PREALLOC
;
6069 error
= xfs_bmapi_remap(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6070 bmap
->br_blockcount
, bmap
->br_startblock
,
6072 bmap
->br_blockcount
= 0;
6074 case XFS_BMAP_UNMAP
:
6075 error
= __xfs_bunmapi(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6076 &bmap
->br_blockcount
, flags
| XFS_BMAPI_REMAP
,
6081 xfs_bmap_mark_sick(bi
->bi_owner
, bi
->bi_whichfork
);
6082 error
= -EFSCORRUPTED
;
6088 /* Check that an extent does not have invalid flags or bad ranges. */
6090 xfs_bmap_validate_extent_raw(
6091 struct xfs_mount
*mp
,
6094 struct xfs_bmbt_irec
*irec
)
6096 if (!xfs_verify_fileext(mp
, irec
->br_startoff
, irec
->br_blockcount
))
6097 return __this_address
;
6099 if (rtfile
&& whichfork
== XFS_DATA_FORK
) {
6100 if (!xfs_verify_rtbext(mp
, irec
->br_startblock
,
6101 irec
->br_blockcount
))
6102 return __this_address
;
6104 if (!xfs_verify_fsbext(mp
, irec
->br_startblock
,
6105 irec
->br_blockcount
))
6106 return __this_address
;
6108 if (irec
->br_state
!= XFS_EXT_NORM
&& whichfork
!= XFS_DATA_FORK
)
6109 return __this_address
;
6114 xfs_bmap_intent_init_cache(void)
6116 xfs_bmap_intent_cache
= kmem_cache_create("xfs_bmap_intent",
6117 sizeof(struct xfs_bmap_intent
),
6120 return xfs_bmap_intent_cache
!= NULL
? 0 : -ENOMEM
;
6124 xfs_bmap_intent_destroy_cache(void)
6126 kmem_cache_destroy(xfs_bmap_intent_cache
);
6127 xfs_bmap_intent_cache
= NULL
;
6130 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6132 xfs_bmap_validate_extent(
6133 struct xfs_inode
*ip
,
6135 struct xfs_bmbt_irec
*irec
)
6137 return xfs_bmap_validate_extent_raw(ip
->i_mount
,
6138 XFS_IS_REALTIME_INODE(ip
), whichfork
, irec
);
6142 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6143 * freed from a file in a single transaction.
6145 #define XFS_ITRUNC_MAX_EXTENTS 2
6148 * Unmap every extent in part of an inode's fork. We don't do any higher level
6149 * invalidation work at all.
6153 struct xfs_trans
**tpp
,
6154 struct xfs_inode
*ip
,
6156 xfs_fileoff_t startoff
,
6157 xfs_fileoff_t endoff
)
6159 xfs_filblks_t unmap_len
= endoff
- startoff
+ 1;
6162 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
6164 while (unmap_len
> 0) {
6165 ASSERT((*tpp
)->t_highest_agno
== NULLAGNUMBER
);
6166 error
= __xfs_bunmapi(*tpp
, ip
, startoff
, &unmap_len
, flags
,
6167 XFS_ITRUNC_MAX_EXTENTS
);
6171 /* free the just unmapped extents */
6172 error
= xfs_defer_finish(tpp
);
6181 struct xfs_bmap_query_range
{
6182 xfs_bmap_query_range_fn fn
;
6186 /* Format btree record and pass to our callback. */
6188 xfs_bmap_query_range_helper(
6189 struct xfs_btree_cur
*cur
,
6190 const union xfs_btree_rec
*rec
,
6193 struct xfs_bmap_query_range
*query
= priv
;
6194 struct xfs_bmbt_irec irec
;
6197 xfs_bmbt_disk_get_all(&rec
->bmbt
, &irec
);
6198 fa
= xfs_bmap_validate_extent(cur
->bc_ino
.ip
, cur
->bc_ino
.whichfork
,
6201 xfs_btree_mark_sick(cur
);
6202 return xfs_bmap_complain_bad_rec(cur
->bc_ino
.ip
,
6203 cur
->bc_ino
.whichfork
, fa
, &irec
);
6206 return query
->fn(cur
, &irec
, query
->priv
);
6209 /* Find all bmaps. */
6212 struct xfs_btree_cur
*cur
,
6213 xfs_bmap_query_range_fn fn
,
6216 struct xfs_bmap_query_range query
= {
6221 return xfs_btree_query_all(cur
, xfs_bmap_query_range_helper
, &query
);
6224 /* Helper function to extract extent size hint from inode */
6227 struct xfs_inode
*ip
)
6230 * No point in aligning allocations if we need to COW to actually
6233 if (!xfs_is_always_cow_inode(ip
) &&
6234 (ip
->i_diflags
& XFS_DIFLAG_EXTSIZE
) && ip
->i_extsize
)
6235 return ip
->i_extsize
;
6236 if (XFS_IS_REALTIME_INODE(ip
) &&
6237 ip
->i_mount
->m_sb
.sb_rextsize
> 1)
6238 return ip
->i_mount
->m_sb
.sb_rextsize
;
6243 * Helper function to extract CoW extent size hint from inode.
6244 * Between the extent size hint and the CoW extent size hint, we
6245 * return the greater of the two. If the value is zero (automatic),
6246 * use the default size.
6249 xfs_get_cowextsz_hint(
6250 struct xfs_inode
*ip
)
6255 if (ip
->i_diflags2
& XFS_DIFLAG2_COWEXTSIZE
)
6256 a
= ip
->i_cowextsize
;
6257 if (XFS_IS_REALTIME_INODE(ip
)) {
6259 if (ip
->i_diflags
& XFS_DIFLAG_EXTSIZE
)
6262 b
= xfs_get_extsz_hint(ip
);
6267 return XFS_DEFAULT_COWEXTSZ_HINT
;