1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include "libxfs_priv.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_errortag.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_trace.h"
26 #include "xfs_attr_leaf.h"
27 #include "xfs_quota_defs.h"
30 #include "xfs_ag_resv.h"
31 #include "xfs_refcount.h"
32 #include "xfs_rtbitmap.h"
33 #include "xfs_health.h"
34 #include "defer_item.h"
35 #include "xfs_symlink_remote.h"
36 #include "xfs_inode_util.h"
37 #include "xfs_rtgroup.h"
39 struct kmem_cache
*xfs_bmap_intent_cache
;
42 * Miscellaneous helper functions
46 * Compute and fill in the value of the maximum depth of a bmap btree
47 * in this filesystem. Done once, during mount.
50 xfs_bmap_compute_maxlevels(
51 xfs_mount_t
*mp
, /* file system mount structure */
52 int whichfork
) /* data or attr fork */
54 uint64_t maxblocks
; /* max blocks at this level */
55 xfs_extnum_t maxleafents
; /* max leaf entries possible */
56 int level
; /* btree level */
57 int maxrootrecs
; /* max records in root block */
58 int minleafrecs
; /* min records in leaf block */
59 int minnoderecs
; /* min records in node block */
60 int sz
; /* root block size */
63 * The maximum number of extents in a fork, hence the maximum number of
64 * leaf entries, is controlled by the size of the on-disk extent count.
66 * Note that we can no longer assume that if we are in ATTR1 that the
67 * fork offset of all the inodes will be
68 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
69 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
70 * but probably at various positions. Therefore, for both ATTR1 and
71 * ATTR2 we have to assume the worst case scenario of a minimum size
74 maxleafents
= xfs_iext_max_nextents(xfs_has_large_extent_counts(mp
),
76 if (whichfork
== XFS_DATA_FORK
)
77 sz
= xfs_bmdr_space_calc(MINDBTPTRS
);
79 sz
= xfs_bmdr_space_calc(MINABTPTRS
);
81 maxrootrecs
= xfs_bmdr_maxrecs(sz
, 0);
82 minleafrecs
= mp
->m_bmap_dmnr
[0];
83 minnoderecs
= mp
->m_bmap_dmnr
[1];
84 maxblocks
= howmany_64(maxleafents
, minleafrecs
);
85 for (level
= 1; maxblocks
> 1; level
++) {
86 if (maxblocks
<= maxrootrecs
)
89 maxblocks
= howmany_64(maxblocks
, minnoderecs
);
91 mp
->m_bm_maxlevels
[whichfork
] = level
;
92 ASSERT(mp
->m_bm_maxlevels
[whichfork
] <= xfs_bmbt_maxlevels_ondisk());
96 xfs_bmap_compute_attr_offset(
99 if (mp
->m_sb
.sb_inodesize
== 256)
100 return XFS_LITINO(mp
) - xfs_bmdr_space_calc(MINABTPTRS
);
101 return xfs_bmdr_space_calc(6 * MINABTPTRS
);
104 STATIC
int /* error */
106 struct xfs_btree_cur
*cur
,
107 struct xfs_bmbt_irec
*irec
,
108 int *stat
) /* success/failure */
110 cur
->bc_rec
.b
= *irec
;
111 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
114 STATIC
int /* error */
115 xfs_bmbt_lookup_first(
116 struct xfs_btree_cur
*cur
,
117 int *stat
) /* success/failure */
119 cur
->bc_rec
.b
.br_startoff
= 0;
120 cur
->bc_rec
.b
.br_startblock
= 0;
121 cur
->bc_rec
.b
.br_blockcount
= 0;
122 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
126 * Check if the inode needs to be converted to btree format.
128 static inline bool xfs_bmap_needs_btree(struct xfs_inode
*ip
, int whichfork
)
130 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
132 return whichfork
!= XFS_COW_FORK
&&
133 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
134 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
);
138 * Check if the inode should be converted to extent format.
140 static inline bool xfs_bmap_wants_extents(struct xfs_inode
*ip
, int whichfork
)
142 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
144 return whichfork
!= XFS_COW_FORK
&&
145 ifp
->if_format
== XFS_DINODE_FMT_BTREE
&&
146 ifp
->if_nextents
<= XFS_IFORK_MAXEXT(ip
, whichfork
);
150 * Update the record referred to by cur to the value given by irec
151 * This either works (return 0) or gets an EFSCORRUPTED error.
155 struct xfs_btree_cur
*cur
,
156 struct xfs_bmbt_irec
*irec
)
158 union xfs_btree_rec rec
;
160 xfs_bmbt_disk_set_all(&rec
.bmbt
, irec
);
161 return xfs_btree_update(cur
, &rec
);
165 * Compute the worst-case number of indirect blocks that will be used
166 * for ip's delayed extent of length "len".
169 xfs_bmap_worst_indlen(
170 struct xfs_inode
*ip
, /* incore inode pointer */
171 xfs_filblks_t len
) /* delayed extent length */
173 struct xfs_mount
*mp
= ip
->i_mount
;
174 int maxrecs
= mp
->m_bmap_dmxr
[0];
178 for (level
= 0, rval
= 0;
179 level
< XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
);
182 do_div(len
, maxrecs
);
185 return rval
+ XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
) -
188 maxrecs
= mp
->m_bmap_dmxr
[1];
194 * Calculate the default attribute fork offset for newly created inodes.
197 xfs_default_attroffset(
198 struct xfs_inode
*ip
)
200 if (ip
->i_df
.if_format
== XFS_DINODE_FMT_DEV
)
201 return roundup(sizeof(xfs_dev_t
), 8);
202 return M_IGEO(ip
->i_mount
)->attr_fork_offset
;
206 * Helper routine to reset inode i_forkoff field when switching attribute fork
207 * from local to extent format - we reset it where possible to make space
208 * available for inline data fork extents.
211 xfs_bmap_forkoff_reset(
215 if (whichfork
== XFS_ATTR_FORK
&&
216 ip
->i_df
.if_format
!= XFS_DINODE_FMT_DEV
&&
217 ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
) {
218 uint dfl_forkoff
= xfs_default_attroffset(ip
) >> 3;
220 if (dfl_forkoff
> ip
->i_forkoff
)
221 ip
->i_forkoff
= dfl_forkoff
;
227 struct xfs_mount
*mp
, /* file system mount point */
228 struct xfs_trans
*tp
, /* transaction pointer */
229 xfs_fsblock_t fsbno
, /* file system block number */
230 struct xfs_buf
**bpp
) /* buffer for fsbno */
232 struct xfs_buf
*bp
; /* return value */
235 if (!xfs_verify_fsbno(mp
, fsbno
))
236 return -EFSCORRUPTED
;
237 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
238 XFS_FSB_TO_DADDR(mp
, fsbno
), mp
->m_bsize
, 0, &bp
,
241 xfs_buf_set_ref(bp
, XFS_BMAP_BTREE_REF
);
248 STATIC
struct xfs_buf
*
250 struct xfs_btree_cur
*cur
,
253 struct xfs_log_item
*lip
;
259 for (i
= 0; i
< cur
->bc_maxlevels
; i
++) {
260 if (!cur
->bc_levels
[i
].bp
)
262 if (xfs_buf_daddr(cur
->bc_levels
[i
].bp
) == bno
)
263 return cur
->bc_levels
[i
].bp
;
266 /* Chase down all the log items to see if the bp is there */
267 list_for_each_entry(lip
, &cur
->bc_tp
->t_items
, li_trans
) {
268 struct xfs_buf_log_item
*bip
= (struct xfs_buf_log_item
*)lip
;
270 if (bip
->bli_item
.li_type
== XFS_LI_BUF
&&
271 xfs_buf_daddr(bip
->bli_buf
) == bno
)
280 struct xfs_btree_block
*block
,
286 __be64
*pp
, *thispa
; /* pointer to block address */
287 xfs_bmbt_key_t
*prevp
, *keyp
;
289 ASSERT(be16_to_cpu(block
->bb_level
) > 0);
292 for( i
= 1; i
<= xfs_btree_get_numrecs(block
); i
++) {
293 dmxr
= mp
->m_bmap_dmxr
[0];
294 keyp
= xfs_bmbt_key_addr(mp
, block
, i
);
297 ASSERT(be64_to_cpu(prevp
->br_startoff
) <
298 be64_to_cpu(keyp
->br_startoff
));
303 * Compare the block numbers to see if there are dups.
306 pp
= xfs_bmap_broot_ptr_addr(mp
, block
, i
, sz
);
308 pp
= xfs_bmbt_ptr_addr(mp
, block
, i
, dmxr
);
310 for (j
= i
+1; j
<= be16_to_cpu(block
->bb_numrecs
); j
++) {
312 thispa
= xfs_bmap_broot_ptr_addr(mp
, block
, j
, sz
);
314 thispa
= xfs_bmbt_ptr_addr(mp
, block
, j
, dmxr
);
315 if (*thispa
== *pp
) {
316 xfs_warn(mp
, "%s: thispa(%d) == pp(%d) %lld",
318 (unsigned long long)be64_to_cpu(*thispa
));
319 xfs_err(mp
, "%s: ptrs are equal in node\n",
321 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
328 * Check that the extents for the inode ip are in the right order in all
329 * btree leaves. THis becomes prohibitively expensive for large extent count
330 * files, so don't bother with inodes that have more than 10,000 extents in
331 * them. The btree record ordering checks will still be done, so for such large
332 * bmapbt constructs that is going to catch most corruptions.
335 xfs_bmap_check_leaf_extents(
336 struct xfs_btree_cur
*cur
, /* btree cursor or null */
337 xfs_inode_t
*ip
, /* incore inode pointer */
338 int whichfork
) /* data or attr fork */
340 struct xfs_mount
*mp
= ip
->i_mount
;
341 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
342 struct xfs_btree_block
*block
; /* current btree block */
343 xfs_fsblock_t bno
; /* block # of "block" */
344 struct xfs_buf
*bp
; /* buffer for "block" */
345 int error
; /* error return value */
346 xfs_extnum_t i
=0, j
; /* index into the extents list */
347 int level
; /* btree level, for checking */
348 __be64
*pp
; /* pointer to block address */
349 xfs_bmbt_rec_t
*ep
; /* pointer to current extent */
350 xfs_bmbt_rec_t last
= {0, 0}; /* last extent in prev block */
351 xfs_bmbt_rec_t
*nextp
; /* pointer to next extent */
354 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
357 /* skip large extent count inodes */
358 if (ip
->i_df
.if_nextents
> 10000)
362 block
= ifp
->if_broot
;
364 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
366 level
= be16_to_cpu(block
->bb_level
);
368 xfs_check_block(block
, mp
, 1, ifp
->if_broot_bytes
);
369 pp
= xfs_bmap_broot_ptr_addr(mp
, block
, 1, ifp
->if_broot_bytes
);
370 bno
= be64_to_cpu(*pp
);
372 ASSERT(bno
!= NULLFSBLOCK
);
373 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
374 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
377 * Go down the tree until leaf level is reached, following the first
378 * pointer (leftmost) at each level.
380 while (level
-- > 0) {
381 /* See if buf is in cur first */
383 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
386 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
387 if (xfs_metadata_is_sick(error
))
388 xfs_btree_mark_sick(cur
);
392 block
= XFS_BUF_TO_BLOCK(bp
);
397 * Check this block for basic sanity (increasing keys and
398 * no duplicate blocks).
401 xfs_check_block(block
, mp
, 0, 0);
402 pp
= xfs_bmbt_ptr_addr(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
403 bno
= be64_to_cpu(*pp
);
404 if (XFS_IS_CORRUPT(mp
, !xfs_verify_fsbno(mp
, bno
))) {
405 xfs_btree_mark_sick(cur
);
406 error
= -EFSCORRUPTED
;
411 xfs_trans_brelse(NULL
, bp
);
416 * Here with bp and block set to the leftmost leaf node in the tree.
421 * Loop over all leaf nodes checking that all extents are in the right order.
424 xfs_fsblock_t nextbno
;
425 xfs_extnum_t num_recs
;
428 num_recs
= xfs_btree_get_numrecs(block
);
431 * Read-ahead the next leaf block, if any.
434 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
437 * Check all the extents to make sure they are OK.
438 * If we had a previous block, the last entry should
439 * conform with the first entry in this one.
442 ep
= xfs_bmbt_rec_addr(mp
, block
, 1);
444 ASSERT(xfs_bmbt_disk_get_startoff(&last
) +
445 xfs_bmbt_disk_get_blockcount(&last
) <=
446 xfs_bmbt_disk_get_startoff(ep
));
448 for (j
= 1; j
< num_recs
; j
++) {
449 nextp
= xfs_bmbt_rec_addr(mp
, block
, j
+ 1);
450 ASSERT(xfs_bmbt_disk_get_startoff(ep
) +
451 xfs_bmbt_disk_get_blockcount(ep
) <=
452 xfs_bmbt_disk_get_startoff(nextp
));
460 xfs_trans_brelse(NULL
, bp
);
464 * If we've reached the end, stop.
466 if (bno
== NULLFSBLOCK
)
470 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
473 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
474 if (xfs_metadata_is_sick(error
))
475 xfs_btree_mark_sick(cur
);
479 block
= XFS_BUF_TO_BLOCK(bp
);
485 xfs_warn(mp
, "%s: at error0", __func__
);
487 xfs_trans_brelse(NULL
, bp
);
489 xfs_warn(mp
, "%s: BAD after btree leaves for %llu extents",
491 xfs_err(mp
, "%s: CORRUPTED BTREE OR SOMETHING", __func__
);
492 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
497 * Validate that the bmbt_irecs being returned from bmapi are valid
498 * given the caller's original parameters. Specifically check the
499 * ranges of the returned irecs to ensure that they only extend beyond
500 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
503 xfs_bmap_validate_ret(
507 xfs_bmbt_irec_t
*mval
,
511 int i
; /* index to map values */
513 ASSERT(ret_nmap
<= nmap
);
515 for (i
= 0; i
< ret_nmap
; i
++) {
516 ASSERT(mval
[i
].br_blockcount
> 0);
517 if (!(flags
& XFS_BMAPI_ENTIRE
)) {
518 ASSERT(mval
[i
].br_startoff
>= bno
);
519 ASSERT(mval
[i
].br_blockcount
<= len
);
520 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
<=
523 ASSERT(mval
[i
].br_startoff
< bno
+ len
);
524 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
>
528 mval
[i
- 1].br_startoff
+ mval
[i
- 1].br_blockcount
==
529 mval
[i
].br_startoff
);
530 ASSERT(mval
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
531 mval
[i
].br_startblock
!= HOLESTARTBLOCK
);
532 ASSERT(mval
[i
].br_state
== XFS_EXT_NORM
||
533 mval
[i
].br_state
== XFS_EXT_UNWRITTEN
);
538 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
539 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
543 * Inode fork format manipulation functions
547 * Convert the inode format to extent format if it currently is in btree format,
548 * but the extent list is small enough that it fits into the extent format.
550 * Since the extents are already in-core, all we have to do is give up the space
551 * for the btree root and pitch the leaf block.
553 STATIC
int /* error */
554 xfs_bmap_btree_to_extents(
555 struct xfs_trans
*tp
, /* transaction pointer */
556 struct xfs_inode
*ip
, /* incore inode pointer */
557 struct xfs_btree_cur
*cur
, /* btree cursor */
558 int *logflagsp
, /* inode logging flags */
559 int whichfork
) /* data or attr fork */
561 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
562 struct xfs_mount
*mp
= ip
->i_mount
;
563 struct xfs_btree_block
*rblock
= ifp
->if_broot
;
564 struct xfs_btree_block
*cblock
;/* child btree block */
565 xfs_fsblock_t cbno
; /* child block number */
566 struct xfs_buf
*cbp
; /* child block's buffer */
567 int error
; /* error return value */
568 __be64
*pp
; /* ptr to block address */
569 struct xfs_owner_info oinfo
;
571 /* check if we actually need the extent format first: */
572 if (!xfs_bmap_wants_extents(ip
, whichfork
))
576 ASSERT(whichfork
!= XFS_COW_FORK
);
577 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
578 ASSERT(be16_to_cpu(rblock
->bb_level
) == 1);
579 ASSERT(be16_to_cpu(rblock
->bb_numrecs
) == 1);
580 ASSERT(xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, false) == 1);
582 pp
= xfs_bmap_broot_ptr_addr(mp
, rblock
, 1, ifp
->if_broot_bytes
);
583 cbno
= be64_to_cpu(*pp
);
585 if (XFS_IS_CORRUPT(cur
->bc_mp
, !xfs_verify_fsbno(mp
, cbno
))) {
586 xfs_btree_mark_sick(cur
);
587 return -EFSCORRUPTED
;
590 error
= xfs_bmap_read_buf(mp
, tp
, cbno
, &cbp
);
591 if (xfs_metadata_is_sick(error
))
592 xfs_btree_mark_sick(cur
);
595 cblock
= XFS_BUF_TO_BLOCK(cbp
);
596 if ((error
= xfs_btree_check_block(cur
, cblock
, 0, cbp
)))
599 xfs_rmap_ino_bmbt_owner(&oinfo
, ip
->i_ino
, whichfork
);
600 error
= xfs_free_extent_later(cur
->bc_tp
, cbno
, 1, &oinfo
,
601 XFS_AG_RESV_NONE
, 0);
606 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
607 xfs_trans_binval(tp
, cbp
);
608 if (cur
->bc_levels
[0].bp
== cbp
)
609 cur
->bc_levels
[0].bp
= NULL
;
610 xfs_bmap_broot_realloc(ip
, whichfork
, 0);
611 ASSERT(ifp
->if_broot
== NULL
);
612 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
613 *logflagsp
|= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
618 * Convert an extents-format file into a btree-format file.
619 * The new file will have a root block (in the inode) and a single child block.
621 STATIC
int /* error */
622 xfs_bmap_extents_to_btree(
623 struct xfs_trans
*tp
, /* transaction pointer */
624 struct xfs_inode
*ip
, /* incore inode pointer */
625 struct xfs_btree_cur
**curp
, /* cursor returned to caller */
626 int wasdel
, /* converting a delayed alloc */
627 int *logflagsp
, /* inode logging flags */
628 int whichfork
) /* data or attr fork */
630 struct xfs_btree_block
*ablock
; /* allocated (child) bt block */
631 struct xfs_buf
*abp
; /* buffer for ablock */
632 struct xfs_alloc_arg args
; /* allocation arguments */
633 struct xfs_bmbt_rec
*arp
; /* child record pointer */
634 struct xfs_btree_block
*block
; /* btree root block */
635 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
636 int error
; /* error return value */
637 struct xfs_ifork
*ifp
; /* inode fork pointer */
638 struct xfs_bmbt_key
*kp
; /* root block key pointer */
639 struct xfs_mount
*mp
; /* mount structure */
640 xfs_bmbt_ptr_t
*pp
; /* root block address pointer */
641 struct xfs_iext_cursor icur
;
642 struct xfs_bmbt_irec rec
;
643 xfs_extnum_t cnt
= 0;
646 ASSERT(whichfork
!= XFS_COW_FORK
);
647 ifp
= xfs_ifork_ptr(ip
, whichfork
);
648 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
);
651 * Make space in the inode incore. This needs to be undone if we fail
652 * to expand the root.
654 block
= xfs_bmap_broot_realloc(ip
, whichfork
, 1);
659 xfs_bmbt_init_block(ip
, block
, NULL
, 1, 1);
661 * Need a cursor. Can't allocate until bb_level is filled in.
663 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
665 cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
667 * Convert to a btree with two levels, one record in root.
669 ifp
->if_format
= XFS_DINODE_FMT_BTREE
;
670 memset(&args
, 0, sizeof(args
));
673 xfs_rmap_ino_bmbt_owner(&args
.oinfo
, ip
->i_ino
, whichfork
);
675 args
.minlen
= args
.maxlen
= args
.prod
= 1;
676 args
.wasdel
= wasdel
;
678 error
= xfs_alloc_vextent_start_ag(&args
,
679 XFS_INO_TO_FSB(mp
, ip
->i_ino
));
681 goto out_root_realloc
;
684 * Allocation can't fail, the space was reserved.
686 if (WARN_ON_ONCE(args
.fsbno
== NULLFSBLOCK
)) {
688 goto out_root_realloc
;
691 cur
->bc_bmap
.allocated
++;
693 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
694 error
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
695 XFS_FSB_TO_DADDR(mp
, args
.fsbno
),
696 mp
->m_bsize
, 0, &abp
);
698 goto out_unreserve_dquot
;
701 * Fill in the child block.
703 ablock
= XFS_BUF_TO_BLOCK(abp
);
704 xfs_bmbt_init_block(ip
, ablock
, abp
, 0, 0);
706 for_each_xfs_iext(ifp
, &icur
, &rec
) {
707 if (isnullstartblock(rec
.br_startblock
))
709 arp
= xfs_bmbt_rec_addr(mp
, ablock
, 1 + cnt
);
710 xfs_bmbt_disk_set_all(arp
, &rec
);
713 ASSERT(cnt
== ifp
->if_nextents
);
714 xfs_btree_set_numrecs(ablock
, cnt
);
717 * Fill in the root key and pointer.
719 kp
= xfs_bmbt_key_addr(mp
, block
, 1);
720 arp
= xfs_bmbt_rec_addr(mp
, ablock
, 1);
721 kp
->br_startoff
= cpu_to_be64(xfs_bmbt_disk_get_startoff(arp
));
722 pp
= xfs_bmbt_ptr_addr(mp
, block
, 1, xfs_bmbt_get_maxrecs(cur
,
723 be16_to_cpu(block
->bb_level
)));
724 *pp
= cpu_to_be64(args
.fsbno
);
727 * Do all this logging at the end so that
728 * the root is at the right level.
730 xfs_btree_log_block(cur
, abp
, XFS_BB_ALL_BITS
);
731 xfs_btree_log_recs(cur
, abp
, 1, be16_to_cpu(ablock
->bb_numrecs
));
732 ASSERT(*curp
== NULL
);
734 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fbroot(whichfork
);
738 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
740 xfs_bmap_broot_realloc(ip
, whichfork
, 0);
741 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
742 ASSERT(ifp
->if_broot
== NULL
);
743 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
749 * Convert a local file to an extents file.
750 * This code is out of bounds for data forks of regular files,
751 * since the file data needs to get logged so things will stay consistent.
752 * (The bmap-level manipulations are ok, though).
755 xfs_bmap_local_to_extents_empty(
756 struct xfs_trans
*tp
,
757 struct xfs_inode
*ip
,
760 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
762 ASSERT(whichfork
!= XFS_COW_FORK
);
763 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
764 ASSERT(ifp
->if_bytes
== 0);
765 ASSERT(ifp
->if_nextents
== 0);
767 xfs_bmap_forkoff_reset(ip
, whichfork
);
770 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
771 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
776 xfs_bmap_local_to_extents(
777 xfs_trans_t
*tp
, /* transaction pointer */
778 xfs_inode_t
*ip
, /* incore inode pointer */
779 xfs_extlen_t total
, /* total blocks needed by transaction */
780 int *logflagsp
, /* inode logging flags */
782 void (*init_fn
)(struct xfs_trans
*tp
,
784 struct xfs_inode
*ip
,
785 struct xfs_ifork
*ifp
, void *priv
),
789 int flags
; /* logging flags returned */
790 struct xfs_ifork
*ifp
; /* inode fork pointer */
791 xfs_alloc_arg_t args
; /* allocation arguments */
792 struct xfs_buf
*bp
; /* buffer for extent block */
793 struct xfs_bmbt_irec rec
;
794 struct xfs_iext_cursor icur
;
797 * We don't want to deal with the case of keeping inode data inline yet.
798 * So sending the data fork of a regular inode is invalid.
800 ASSERT(!(S_ISREG(VFS_I(ip
)->i_mode
) && whichfork
== XFS_DATA_FORK
));
801 ifp
= xfs_ifork_ptr(ip
, whichfork
);
802 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
804 if (!ifp
->if_bytes
) {
805 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
806 flags
= XFS_ILOG_CORE
;
812 memset(&args
, 0, sizeof(args
));
814 args
.mp
= ip
->i_mount
;
816 args
.minlen
= args
.maxlen
= args
.prod
= 1;
817 xfs_rmap_ino_owner(&args
.oinfo
, ip
->i_ino
, whichfork
, 0);
820 * Allocate a block. We know we need only one, since the
821 * file currently fits in an inode.
824 args
.minlen
= args
.maxlen
= args
.prod
= 1;
825 error
= xfs_alloc_vextent_start_ag(&args
,
826 XFS_INO_TO_FSB(args
.mp
, ip
->i_ino
));
830 /* Can't fail, the space was reserved. */
831 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
832 ASSERT(args
.len
== 1);
833 error
= xfs_trans_get_buf(tp
, args
.mp
->m_ddev_targp
,
834 XFS_FSB_TO_DADDR(args
.mp
, args
.fsbno
),
835 args
.mp
->m_bsize
, 0, &bp
);
840 * Initialize the block, copy the data and log the remote buffer.
842 * The callout is responsible for logging because the remote format
843 * might differ from the local format and thus we don't know how much to
844 * log here. Note that init_fn must also set the buffer log item type
847 init_fn(tp
, bp
, ip
, ifp
, priv
);
849 /* account for the change in fork size */
850 xfs_idata_realloc(ip
, -ifp
->if_bytes
, whichfork
);
851 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
852 flags
|= XFS_ILOG_CORE
;
858 rec
.br_startblock
= args
.fsbno
;
859 rec
.br_blockcount
= 1;
860 rec
.br_state
= XFS_EXT_NORM
;
861 xfs_iext_first(ifp
, &icur
);
862 xfs_iext_insert(ip
, &icur
, &rec
, 0);
864 ifp
->if_nextents
= 1;
866 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
867 flags
|= xfs_ilog_fext(whichfork
);
875 * Called from xfs_bmap_add_attrfork to handle btree format files.
877 STATIC
int /* error */
878 xfs_bmap_add_attrfork_btree(
879 xfs_trans_t
*tp
, /* transaction pointer */
880 xfs_inode_t
*ip
, /* incore inode pointer */
881 int *flags
) /* inode logging flags */
883 struct xfs_btree_block
*block
= ip
->i_df
.if_broot
;
884 struct xfs_btree_cur
*cur
; /* btree cursor */
885 int error
; /* error return value */
886 xfs_mount_t
*mp
; /* file system mount struct */
887 int stat
; /* newroot status */
891 if (xfs_bmap_bmdr_space(block
) <= xfs_inode_data_fork_size(ip
))
892 *flags
|= XFS_ILOG_DBROOT
;
894 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
895 error
= xfs_bmbt_lookup_first(cur
, &stat
);
898 /* must be at least one entry */
899 if (XFS_IS_CORRUPT(mp
, stat
!= 1)) {
900 xfs_btree_mark_sick(cur
);
901 error
= -EFSCORRUPTED
;
904 if ((error
= xfs_btree_new_iroot(cur
, flags
, &stat
)))
907 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
910 cur
->bc_bmap
.allocated
= 0;
911 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
915 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
920 * Called from xfs_bmap_add_attrfork to handle extents format files.
922 STATIC
int /* error */
923 xfs_bmap_add_attrfork_extents(
924 struct xfs_trans
*tp
, /* transaction pointer */
925 struct xfs_inode
*ip
, /* incore inode pointer */
926 int *flags
) /* inode logging flags */
928 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
929 int error
; /* error return value */
931 if (ip
->i_df
.if_nextents
* sizeof(struct xfs_bmbt_rec
) <=
932 xfs_inode_data_fork_size(ip
))
935 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0, flags
,
938 cur
->bc_bmap
.allocated
= 0;
939 xfs_btree_del_cursor(cur
, error
);
945 * Called from xfs_bmap_add_attrfork to handle local format files. Each
946 * different data fork content type needs a different callout to do the
947 * conversion. Some are basic and only require special block initialisation
948 * callouts for the data formating, others (directories) are so specialised they
949 * handle everything themselves.
951 * XXX (dgc): investigate whether directory conversion can use the generic
952 * formatting callout. It should be possible - it's just a very complex
955 STATIC
int /* error */
956 xfs_bmap_add_attrfork_local(
957 struct xfs_trans
*tp
, /* transaction pointer */
958 struct xfs_inode
*ip
, /* incore inode pointer */
959 int *flags
) /* inode logging flags */
961 struct xfs_da_args dargs
; /* args for dir/attr code */
963 if (ip
->i_df
.if_bytes
<= xfs_inode_data_fork_size(ip
))
966 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
967 memset(&dargs
, 0, sizeof(dargs
));
968 dargs
.geo
= ip
->i_mount
->m_dir_geo
;
970 dargs
.total
= dargs
.geo
->fsbcount
;
971 dargs
.whichfork
= XFS_DATA_FORK
;
973 dargs
.owner
= ip
->i_ino
;
974 return xfs_dir2_sf_to_block(&dargs
);
977 if (S_ISLNK(VFS_I(ip
)->i_mode
))
978 return xfs_bmap_local_to_extents(tp
, ip
, 1, flags
,
979 XFS_DATA_FORK
, xfs_symlink_local_to_remote
,
982 /* should only be called for types that support local format data */
984 xfs_bmap_mark_sick(ip
, XFS_ATTR_FORK
);
985 return -EFSCORRUPTED
;
989 * Set an inode attr fork offset based on the format of the data fork.
992 xfs_bmap_set_attrforkoff(
993 struct xfs_inode
*ip
,
997 int default_size
= xfs_default_attroffset(ip
) >> 3;
999 switch (ip
->i_df
.if_format
) {
1000 case XFS_DINODE_FMT_DEV
:
1001 ip
->i_forkoff
= default_size
;
1003 case XFS_DINODE_FMT_LOCAL
:
1004 case XFS_DINODE_FMT_EXTENTS
:
1005 case XFS_DINODE_FMT_BTREE
:
1006 ip
->i_forkoff
= xfs_attr_shortform_bytesfit(ip
, size
);
1008 ip
->i_forkoff
= default_size
;
1009 else if (xfs_has_attr2(ip
->i_mount
) && version
)
1021 * Convert inode from non-attributed to attributed. Caller must hold the
1022 * ILOCK_EXCL and the file cannot have an attr fork.
1024 int /* error code */
1025 xfs_bmap_add_attrfork(
1026 struct xfs_trans
*tp
,
1027 struct xfs_inode
*ip
, /* incore inode pointer */
1028 int size
, /* space new attribute needs */
1029 int rsvd
) /* xact may use reserved blks */
1031 struct xfs_mount
*mp
= tp
->t_mountp
;
1032 int version
= 1; /* superblock attr version */
1033 int logflags
; /* logging flags */
1034 int error
; /* error return value */
1036 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1037 if (!xfs_is_metadir_inode(ip
))
1038 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1039 ASSERT(!xfs_inode_has_attr_fork(ip
));
1041 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1042 error
= xfs_bmap_set_attrforkoff(ip
, size
, &version
);
1046 xfs_ifork_init_attr(ip
, XFS_DINODE_FMT_EXTENTS
, 0);
1048 switch (ip
->i_df
.if_format
) {
1049 case XFS_DINODE_FMT_LOCAL
:
1050 error
= xfs_bmap_add_attrfork_local(tp
, ip
, &logflags
);
1052 case XFS_DINODE_FMT_EXTENTS
:
1053 error
= xfs_bmap_add_attrfork_extents(tp
, ip
, &logflags
);
1055 case XFS_DINODE_FMT_BTREE
:
1056 error
= xfs_bmap_add_attrfork_btree(tp
, ip
, &logflags
);
1063 xfs_trans_log_inode(tp
, ip
, logflags
);
1066 if (!xfs_has_attr(mp
) ||
1067 (!xfs_has_attr2(mp
) && version
== 2)) {
1068 bool log_sb
= false;
1070 spin_lock(&mp
->m_sb_lock
);
1071 if (!xfs_has_attr(mp
)) {
1075 if (!xfs_has_attr2(mp
) && version
== 2) {
1079 spin_unlock(&mp
->m_sb_lock
);
1088 * Internal and external extent tree search functions.
1091 struct xfs_iread_state
{
1092 struct xfs_iext_cursor icur
;
1093 xfs_extnum_t loaded
;
1097 xfs_bmap_complain_bad_rec(
1098 struct xfs_inode
*ip
,
1101 const struct xfs_bmbt_irec
*irec
)
1103 struct xfs_mount
*mp
= ip
->i_mount
;
1104 const char *forkname
;
1106 switch (whichfork
) {
1107 case XFS_DATA_FORK
: forkname
= "data"; break;
1108 case XFS_ATTR_FORK
: forkname
= "attr"; break;
1109 case XFS_COW_FORK
: forkname
= "CoW"; break;
1110 default: forkname
= "???"; break;
1114 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1115 ip
->i_ino
, forkname
, fa
);
1117 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1118 irec
->br_startoff
, irec
->br_startblock
, irec
->br_blockcount
,
1121 return -EFSCORRUPTED
;
1124 /* Stuff every bmbt record from this block into the incore extent map. */
1126 xfs_iread_bmbt_block(
1127 struct xfs_btree_cur
*cur
,
1131 struct xfs_iread_state
*ir
= priv
;
1132 struct xfs_mount
*mp
= cur
->bc_mp
;
1133 struct xfs_inode
*ip
= cur
->bc_ino
.ip
;
1134 struct xfs_btree_block
*block
;
1136 struct xfs_bmbt_rec
*frp
;
1137 xfs_extnum_t num_recs
;
1139 int whichfork
= cur
->bc_ino
.whichfork
;
1140 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1142 block
= xfs_btree_get_block(cur
, level
, &bp
);
1144 /* Abort if we find more records than nextents. */
1145 num_recs
= xfs_btree_get_numrecs(block
);
1146 if (unlikely(ir
->loaded
+ num_recs
> ifp
->if_nextents
)) {
1147 xfs_warn(ip
->i_mount
, "corrupt dinode %llu, (btree extents).",
1148 (unsigned long long)ip
->i_ino
);
1149 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
, __func__
, block
,
1150 sizeof(*block
), __this_address
);
1151 xfs_bmap_mark_sick(ip
, whichfork
);
1152 return -EFSCORRUPTED
;
1155 /* Copy records into the incore cache. */
1156 frp
= xfs_bmbt_rec_addr(mp
, block
, 1);
1157 for (j
= 0; j
< num_recs
; j
++, frp
++, ir
->loaded
++) {
1158 struct xfs_bmbt_irec
new;
1161 xfs_bmbt_disk_get_all(frp
, &new);
1162 fa
= xfs_bmap_validate_extent(ip
, whichfork
, &new);
1164 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
,
1165 "xfs_iread_extents(2)", frp
,
1167 xfs_bmap_mark_sick(ip
, whichfork
);
1168 return xfs_bmap_complain_bad_rec(ip
, whichfork
, fa
,
1171 xfs_iext_insert(ip
, &ir
->icur
, &new,
1172 xfs_bmap_fork_to_state(whichfork
));
1173 trace_xfs_read_extent(ip
, &ir
->icur
,
1174 xfs_bmap_fork_to_state(whichfork
), _THIS_IP_
);
1175 xfs_iext_next(ifp
, &ir
->icur
);
1182 * Read in extents from a btree-format inode.
1186 struct xfs_trans
*tp
,
1187 struct xfs_inode
*ip
,
1190 struct xfs_iread_state ir
;
1191 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1192 struct xfs_mount
*mp
= ip
->i_mount
;
1193 struct xfs_btree_cur
*cur
;
1196 if (!xfs_need_iread_extents(ifp
))
1199 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1202 xfs_iext_first(ifp
, &ir
.icur
);
1203 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
1204 error
= xfs_btree_visit_blocks(cur
, xfs_iread_bmbt_block
,
1205 XFS_BTREE_VISIT_RECORDS
, &ir
);
1206 xfs_btree_del_cursor(cur
, error
);
1210 if (XFS_IS_CORRUPT(mp
, ir
.loaded
!= ifp
->if_nextents
)) {
1211 xfs_bmap_mark_sick(ip
, whichfork
);
1212 error
= -EFSCORRUPTED
;
1215 ASSERT(ir
.loaded
== xfs_iext_count(ifp
));
1217 * Use release semantics so that we can use acquire semantics in
1218 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1221 smp_store_release(&ifp
->if_needextents
, 0);
1224 if (xfs_metadata_is_sick(error
))
1225 xfs_bmap_mark_sick(ip
, whichfork
);
1226 xfs_iext_destroy(ifp
);
1231 * Returns the relative block number of the first unused block(s) in the given
1232 * fork with at least "len" logically contiguous blocks free. This is the
1233 * lowest-address hole if the fork has holes, else the first block past the end
1234 * of fork. Return 0 if the fork is currently local (in-inode).
1237 xfs_bmap_first_unused(
1238 struct xfs_trans
*tp
, /* transaction pointer */
1239 struct xfs_inode
*ip
, /* incore inode */
1240 xfs_extlen_t len
, /* size of hole to find */
1241 xfs_fileoff_t
*first_unused
, /* unused block */
1242 int whichfork
) /* data or attr fork */
1244 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1245 struct xfs_bmbt_irec got
;
1246 struct xfs_iext_cursor icur
;
1247 xfs_fileoff_t lastaddr
= 0;
1248 xfs_fileoff_t lowest
, max
;
1251 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
) {
1256 ASSERT(xfs_ifork_has_extents(ifp
));
1258 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1262 lowest
= max
= *first_unused
;
1263 for_each_xfs_iext(ifp
, &icur
, &got
) {
1265 * See if the hole before this extent will work.
1267 if (got
.br_startoff
>= lowest
+ len
&&
1268 got
.br_startoff
- max
>= len
)
1270 lastaddr
= got
.br_startoff
+ got
.br_blockcount
;
1271 max
= XFS_FILEOFF_MAX(lastaddr
, lowest
);
1274 *first_unused
= max
;
1279 * Returns the file-relative block number of the last block - 1 before
1280 * last_block (input value) in the file.
1281 * This is not based on i_size, it is based on the extent records.
1282 * Returns 0 for local files, as they do not have extent records.
1285 xfs_bmap_last_before(
1286 struct xfs_trans
*tp
, /* transaction pointer */
1287 struct xfs_inode
*ip
, /* incore inode */
1288 xfs_fileoff_t
*last_block
, /* last block */
1289 int whichfork
) /* data or attr fork */
1291 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1292 struct xfs_bmbt_irec got
;
1293 struct xfs_iext_cursor icur
;
1296 switch (ifp
->if_format
) {
1297 case XFS_DINODE_FMT_LOCAL
:
1300 case XFS_DINODE_FMT_BTREE
:
1301 case XFS_DINODE_FMT_EXTENTS
:
1305 xfs_bmap_mark_sick(ip
, whichfork
);
1306 return -EFSCORRUPTED
;
1309 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1313 if (!xfs_iext_lookup_extent_before(ip
, ifp
, last_block
, &icur
, &got
))
1319 xfs_bmap_last_extent(
1320 struct xfs_trans
*tp
,
1321 struct xfs_inode
*ip
,
1323 struct xfs_bmbt_irec
*rec
,
1326 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1327 struct xfs_iext_cursor icur
;
1330 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1334 xfs_iext_last(ifp
, &icur
);
1335 if (!xfs_iext_get_extent(ifp
, &icur
, rec
))
1343 * Check the last inode extent to determine whether this allocation will result
1344 * in blocks being allocated at the end of the file. When we allocate new data
1345 * blocks at the end of the file which do not start at the previous data block,
1346 * we will try to align the new blocks at stripe unit boundaries.
1348 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1349 * at, or past the EOF.
1353 struct xfs_bmalloca
*bma
,
1356 struct xfs_bmbt_irec rec
;
1361 error
= xfs_bmap_last_extent(NULL
, bma
->ip
, whichfork
, &rec
,
1372 * Check if we are allocation or past the last extent, or at least into
1373 * the last delayed allocated extent.
1375 bma
->aeof
= bma
->offset
>= rec
.br_startoff
+ rec
.br_blockcount
||
1376 (bma
->offset
>= rec
.br_startoff
&&
1377 isnullstartblock(rec
.br_startblock
));
1382 * Returns the file-relative block number of the first block past eof in
1383 * the file. This is not based on i_size, it is based on the extent records.
1384 * Returns 0 for local files, as they do not have extent records.
1387 xfs_bmap_last_offset(
1388 struct xfs_inode
*ip
,
1389 xfs_fileoff_t
*last_block
,
1392 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1393 struct xfs_bmbt_irec rec
;
1399 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
)
1402 if (XFS_IS_CORRUPT(ip
->i_mount
, !xfs_ifork_has_extents(ifp
))) {
1403 xfs_bmap_mark_sick(ip
, whichfork
);
1404 return -EFSCORRUPTED
;
1407 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, &is_empty
);
1408 if (error
|| is_empty
)
1411 *last_block
= rec
.br_startoff
+ rec
.br_blockcount
;
1416 * Extent tree manipulation functions used during allocation.
1420 xfs_bmap_same_rtgroup(
1421 struct xfs_inode
*ip
,
1423 struct xfs_bmbt_irec
*left
,
1424 struct xfs_bmbt_irec
*right
)
1426 struct xfs_mount
*mp
= ip
->i_mount
;
1428 if (xfs_ifork_is_realtime(ip
, whichfork
) && xfs_has_rtgroups(mp
)) {
1429 if (xfs_rtb_to_rgno(mp
, left
->br_startblock
) !=
1430 xfs_rtb_to_rgno(mp
, right
->br_startblock
))
1438 * Convert a delayed allocation to a real allocation.
1440 STATIC
int /* error */
1441 xfs_bmap_add_extent_delay_real(
1442 struct xfs_bmalloca
*bma
,
1445 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
1446 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
1447 struct xfs_bmbt_irec
*new = &bma
->got
;
1448 int error
; /* error return value */
1449 int i
; /* temp state */
1450 xfs_fileoff_t new_endoff
; /* end offset of new entry */
1451 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
1452 /* left is 0, right is 1, prev is 2 */
1453 int rval
=0; /* return value (logging flags) */
1454 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
1455 xfs_filblks_t da_new
; /* new count del alloc blocks used */
1456 xfs_filblks_t da_old
; /* old count del alloc blocks used */
1457 xfs_filblks_t temp
=0; /* value for da_new calculations */
1458 int tmp_rval
; /* partial logging flags */
1459 struct xfs_bmbt_irec old
;
1461 ASSERT(whichfork
!= XFS_ATTR_FORK
);
1462 ASSERT(!isnullstartblock(new->br_startblock
));
1463 ASSERT(!bma
->cur
|| (bma
->cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
1465 XFS_STATS_INC(mp
, xs_add_exlist
);
1472 * Set up a bunch of variables to make the tests simpler.
1474 xfs_iext_get_extent(ifp
, &bma
->icur
, &PREV
);
1475 new_endoff
= new->br_startoff
+ new->br_blockcount
;
1476 ASSERT(isnullstartblock(PREV
.br_startblock
));
1477 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
1478 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
1480 da_old
= startblockval(PREV
.br_startblock
);
1484 * Set flags determining what part of the previous delayed allocation
1485 * extent is being replaced by a real allocation.
1487 if (PREV
.br_startoff
== new->br_startoff
)
1488 state
|= BMAP_LEFT_FILLING
;
1489 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
1490 state
|= BMAP_RIGHT_FILLING
;
1493 * Check and set flags if this segment has a left neighbor.
1494 * Don't set contiguous if the combined extent would be too large.
1496 if (xfs_iext_peek_prev_extent(ifp
, &bma
->icur
, &LEFT
)) {
1497 state
|= BMAP_LEFT_VALID
;
1498 if (isnullstartblock(LEFT
.br_startblock
))
1499 state
|= BMAP_LEFT_DELAY
;
1502 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
1503 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
1504 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
1505 LEFT
.br_state
== new->br_state
&&
1506 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
1507 xfs_bmap_same_rtgroup(bma
->ip
, whichfork
, &LEFT
, new))
1508 state
|= BMAP_LEFT_CONTIG
;
1511 * Check and set flags if this segment has a right neighbor.
1512 * Don't set contiguous if the combined extent would be too large.
1513 * Also check for all-three-contiguous being too large.
1515 if (xfs_iext_peek_next_extent(ifp
, &bma
->icur
, &RIGHT
)) {
1516 state
|= BMAP_RIGHT_VALID
;
1517 if (isnullstartblock(RIGHT
.br_startblock
))
1518 state
|= BMAP_RIGHT_DELAY
;
1521 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
1522 new_endoff
== RIGHT
.br_startoff
&&
1523 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
1524 new->br_state
== RIGHT
.br_state
&&
1525 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
1526 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1527 BMAP_RIGHT_FILLING
)) !=
1528 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1529 BMAP_RIGHT_FILLING
) ||
1530 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
1531 <= XFS_MAX_BMBT_EXTLEN
) &&
1532 xfs_bmap_same_rtgroup(bma
->ip
, whichfork
, new, &RIGHT
))
1533 state
|= BMAP_RIGHT_CONTIG
;
1537 * Switch out based on the FILLING and CONTIG state bits.
1539 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1540 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
1541 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1542 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1544 * Filling in all of a previously delayed allocation extent.
1545 * The left and right neighbors are both contiguous with new.
1547 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
1549 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1550 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1551 xfs_iext_prev(ifp
, &bma
->icur
);
1552 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1555 if (bma
->cur
== NULL
)
1556 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1558 rval
= XFS_ILOG_CORE
;
1559 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1562 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1563 xfs_btree_mark_sick(bma
->cur
);
1564 error
= -EFSCORRUPTED
;
1567 error
= xfs_btree_delete(bma
->cur
, &i
);
1570 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1571 xfs_btree_mark_sick(bma
->cur
);
1572 error
= -EFSCORRUPTED
;
1575 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
1578 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1579 xfs_btree_mark_sick(bma
->cur
);
1580 error
= -EFSCORRUPTED
;
1583 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1587 ASSERT(da_new
<= da_old
);
1590 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1592 * Filling in all of a previously delayed allocation extent.
1593 * The left neighbor is contiguous, the right is not.
1596 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
1598 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1599 xfs_iext_prev(ifp
, &bma
->icur
);
1600 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1602 if (bma
->cur
== NULL
)
1603 rval
= XFS_ILOG_DEXT
;
1606 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1609 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1610 xfs_btree_mark_sick(bma
->cur
);
1611 error
= -EFSCORRUPTED
;
1614 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1618 ASSERT(da_new
<= da_old
);
1621 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1623 * Filling in all of a previously delayed allocation extent.
1624 * The right neighbor is contiguous, the left is not. Take care
1625 * with delay -> unwritten extent allocation here because the
1626 * delalloc record we are overwriting is always written.
1628 PREV
.br_startblock
= new->br_startblock
;
1629 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
1630 PREV
.br_state
= new->br_state
;
1632 xfs_iext_next(ifp
, &bma
->icur
);
1633 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1634 xfs_iext_prev(ifp
, &bma
->icur
);
1635 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1637 if (bma
->cur
== NULL
)
1638 rval
= XFS_ILOG_DEXT
;
1641 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1644 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1645 xfs_btree_mark_sick(bma
->cur
);
1646 error
= -EFSCORRUPTED
;
1649 error
= xfs_bmbt_update(bma
->cur
, &PREV
);
1653 ASSERT(da_new
<= da_old
);
1656 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
1658 * Filling in all of a previously delayed allocation extent.
1659 * Neither the left nor right neighbors are contiguous with
1662 PREV
.br_startblock
= new->br_startblock
;
1663 PREV
.br_state
= new->br_state
;
1664 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1667 if (bma
->cur
== NULL
)
1668 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1670 rval
= XFS_ILOG_CORE
;
1671 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1674 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1675 xfs_btree_mark_sick(bma
->cur
);
1676 error
= -EFSCORRUPTED
;
1679 error
= xfs_btree_insert(bma
->cur
, &i
);
1682 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1683 xfs_btree_mark_sick(bma
->cur
);
1684 error
= -EFSCORRUPTED
;
1688 ASSERT(da_new
<= da_old
);
1691 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
1693 * Filling in the first part of a previous delayed allocation.
1694 * The left neighbor is contiguous.
1697 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1698 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1699 startblockval(PREV
.br_startblock
));
1701 LEFT
.br_blockcount
+= new->br_blockcount
;
1703 PREV
.br_blockcount
= temp
;
1704 PREV
.br_startoff
+= new->br_blockcount
;
1705 PREV
.br_startblock
= nullstartblock(da_new
);
1707 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1708 xfs_iext_prev(ifp
, &bma
->icur
);
1709 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1711 if (bma
->cur
== NULL
)
1712 rval
= XFS_ILOG_DEXT
;
1715 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1718 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1719 xfs_btree_mark_sick(bma
->cur
);
1720 error
= -EFSCORRUPTED
;
1723 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1727 ASSERT(da_new
<= da_old
);
1730 case BMAP_LEFT_FILLING
:
1732 * Filling in the first part of a previous delayed allocation.
1733 * The left neighbor is not contiguous.
1735 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1738 if (bma
->cur
== NULL
)
1739 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1741 rval
= XFS_ILOG_CORE
;
1742 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1745 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1746 xfs_btree_mark_sick(bma
->cur
);
1747 error
= -EFSCORRUPTED
;
1750 error
= xfs_btree_insert(bma
->cur
, &i
);
1753 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1754 xfs_btree_mark_sick(bma
->cur
);
1755 error
= -EFSCORRUPTED
;
1760 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1761 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1762 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1768 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1769 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1770 startblockval(PREV
.br_startblock
) -
1771 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1773 PREV
.br_startoff
= new_endoff
;
1774 PREV
.br_blockcount
= temp
;
1775 PREV
.br_startblock
= nullstartblock(da_new
);
1776 xfs_iext_next(ifp
, &bma
->icur
);
1777 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1778 xfs_iext_prev(ifp
, &bma
->icur
);
1781 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1783 * Filling in the last part of a previous delayed allocation.
1784 * The right neighbor is contiguous with the new allocation.
1787 RIGHT
.br_startoff
= new->br_startoff
;
1788 RIGHT
.br_startblock
= new->br_startblock
;
1789 RIGHT
.br_blockcount
+= new->br_blockcount
;
1791 if (bma
->cur
== NULL
)
1792 rval
= XFS_ILOG_DEXT
;
1795 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1798 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1799 xfs_btree_mark_sick(bma
->cur
);
1800 error
= -EFSCORRUPTED
;
1803 error
= xfs_bmbt_update(bma
->cur
, &RIGHT
);
1808 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1809 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1810 startblockval(PREV
.br_startblock
));
1812 PREV
.br_blockcount
= temp
;
1813 PREV
.br_startblock
= nullstartblock(da_new
);
1815 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1816 xfs_iext_next(ifp
, &bma
->icur
);
1817 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &RIGHT
);
1818 ASSERT(da_new
<= da_old
);
1821 case BMAP_RIGHT_FILLING
:
1823 * Filling in the last part of a previous delayed allocation.
1824 * The right neighbor is not contiguous.
1826 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1829 if (bma
->cur
== NULL
)
1830 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1832 rval
= XFS_ILOG_CORE
;
1833 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1836 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1837 xfs_btree_mark_sick(bma
->cur
);
1838 error
= -EFSCORRUPTED
;
1841 error
= xfs_btree_insert(bma
->cur
, &i
);
1844 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1845 xfs_btree_mark_sick(bma
->cur
);
1846 error
= -EFSCORRUPTED
;
1851 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1852 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1853 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1859 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1860 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1861 startblockval(PREV
.br_startblock
) -
1862 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1864 PREV
.br_startblock
= nullstartblock(da_new
);
1865 PREV
.br_blockcount
= temp
;
1866 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1867 xfs_iext_next(ifp
, &bma
->icur
);
1868 ASSERT(da_new
<= da_old
);
1873 * Filling in the middle part of a previous delayed allocation.
1874 * Contiguity is impossible here.
1875 * This case is avoided almost all the time.
1877 * We start with a delayed allocation:
1879 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1882 * and we are allocating:
1883 * +rrrrrrrrrrrrrrrrr+
1886 * and we set it up for insertion as:
1887 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1889 * PREV @ idx LEFT RIGHT
1890 * inserted at idx + 1
1894 /* LEFT is the new middle */
1897 /* RIGHT is the new right */
1898 RIGHT
.br_state
= PREV
.br_state
;
1899 RIGHT
.br_startoff
= new_endoff
;
1900 RIGHT
.br_blockcount
=
1901 PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
1902 RIGHT
.br_startblock
=
1903 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1904 RIGHT
.br_blockcount
));
1907 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
1908 PREV
.br_startblock
=
1909 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1910 PREV
.br_blockcount
));
1911 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1913 xfs_iext_next(ifp
, &bma
->icur
);
1914 xfs_iext_insert(bma
->ip
, &bma
->icur
, &RIGHT
, state
);
1915 xfs_iext_insert(bma
->ip
, &bma
->icur
, &LEFT
, state
);
1918 if (bma
->cur
== NULL
)
1919 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1921 rval
= XFS_ILOG_CORE
;
1922 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1925 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1926 xfs_btree_mark_sick(bma
->cur
);
1927 error
= -EFSCORRUPTED
;
1930 error
= xfs_btree_insert(bma
->cur
, &i
);
1933 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1934 xfs_btree_mark_sick(bma
->cur
);
1935 error
= -EFSCORRUPTED
;
1940 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1941 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1942 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1948 da_new
= startblockval(PREV
.br_startblock
) +
1949 startblockval(RIGHT
.br_startblock
);
1952 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1953 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1954 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
1955 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1956 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1957 case BMAP_LEFT_CONTIG
:
1958 case BMAP_RIGHT_CONTIG
:
1960 * These cases are all impossible.
1965 /* add reverse mapping unless caller opted out */
1966 if (!(bma
->flags
& XFS_BMAPI_NORMAP
))
1967 xfs_rmap_map_extent(bma
->tp
, bma
->ip
, whichfork
, new);
1969 /* convert to a btree if necessary */
1970 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1971 int tmp_logflags
; /* partial log flag return val */
1973 ASSERT(bma
->cur
== NULL
);
1974 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1975 &bma
->cur
, da_old
> 0, &tmp_logflags
,
1977 bma
->logflags
|= tmp_logflags
;
1982 if (da_new
!= da_old
)
1983 xfs_mod_delalloc(bma
->ip
, 0, (int64_t)da_new
- da_old
);
1986 da_new
+= bma
->cur
->bc_bmap
.allocated
;
1987 bma
->cur
->bc_bmap
.allocated
= 0;
1990 /* adjust for changes in reserved delayed indirect blocks */
1991 if (da_new
< da_old
)
1992 xfs_add_fdblocks(mp
, da_old
- da_new
);
1993 else if (da_new
> da_old
)
1994 error
= xfs_dec_fdblocks(mp
, da_new
- da_old
, true);
1996 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
1998 if (whichfork
!= XFS_COW_FORK
)
1999 bma
->logflags
|= rval
;
2007 * Convert an unwritten allocation to a real allocation or vice versa.
2010 xfs_bmap_add_extent_unwritten_real(
2011 struct xfs_trans
*tp
,
2012 xfs_inode_t
*ip
, /* incore inode pointer */
2014 struct xfs_iext_cursor
*icur
,
2015 struct xfs_btree_cur
**curp
, /* if *curp is null, not a btree */
2016 xfs_bmbt_irec_t
*new, /* new data to add to file extents */
2017 int *logflagsp
) /* inode logging flags */
2019 struct xfs_btree_cur
*cur
; /* btree cursor */
2020 int error
; /* error return value */
2021 int i
; /* temp state */
2022 struct xfs_ifork
*ifp
; /* inode fork pointer */
2023 xfs_fileoff_t new_endoff
; /* end offset of new entry */
2024 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
2025 /* left is 0, right is 1, prev is 2 */
2026 int rval
=0; /* return value (logging flags) */
2027 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2028 struct xfs_mount
*mp
= ip
->i_mount
;
2029 struct xfs_bmbt_irec old
;
2034 ifp
= xfs_ifork_ptr(ip
, whichfork
);
2036 ASSERT(!isnullstartblock(new->br_startblock
));
2038 XFS_STATS_INC(mp
, xs_add_exlist
);
2045 * Set up a bunch of variables to make the tests simpler.
2048 xfs_iext_get_extent(ifp
, icur
, &PREV
);
2049 ASSERT(new->br_state
!= PREV
.br_state
);
2050 new_endoff
= new->br_startoff
+ new->br_blockcount
;
2051 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
2052 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
2055 * Set flags determining what part of the previous oldext allocation
2056 * extent is being replaced by a newext allocation.
2058 if (PREV
.br_startoff
== new->br_startoff
)
2059 state
|= BMAP_LEFT_FILLING
;
2060 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
2061 state
|= BMAP_RIGHT_FILLING
;
2064 * Check and set flags if this segment has a left neighbor.
2065 * Don't set contiguous if the combined extent would be too large.
2067 if (xfs_iext_peek_prev_extent(ifp
, icur
, &LEFT
)) {
2068 state
|= BMAP_LEFT_VALID
;
2069 if (isnullstartblock(LEFT
.br_startblock
))
2070 state
|= BMAP_LEFT_DELAY
;
2073 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2074 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
2075 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
2076 LEFT
.br_state
== new->br_state
&&
2077 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2078 xfs_bmap_same_rtgroup(ip
, whichfork
, &LEFT
, new))
2079 state
|= BMAP_LEFT_CONTIG
;
2082 * Check and set flags if this segment has a right neighbor.
2083 * Don't set contiguous if the combined extent would be too large.
2084 * Also check for all-three-contiguous being too large.
2086 if (xfs_iext_peek_next_extent(ifp
, icur
, &RIGHT
)) {
2087 state
|= BMAP_RIGHT_VALID
;
2088 if (isnullstartblock(RIGHT
.br_startblock
))
2089 state
|= BMAP_RIGHT_DELAY
;
2092 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2093 new_endoff
== RIGHT
.br_startoff
&&
2094 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
2095 new->br_state
== RIGHT
.br_state
&&
2096 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2097 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2098 BMAP_RIGHT_FILLING
)) !=
2099 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2100 BMAP_RIGHT_FILLING
) ||
2101 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
2102 <= XFS_MAX_BMBT_EXTLEN
) &&
2103 xfs_bmap_same_rtgroup(ip
, whichfork
, new, &RIGHT
))
2104 state
|= BMAP_RIGHT_CONTIG
;
2107 * Switch out based on the FILLING and CONTIG state bits.
2109 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2110 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
2111 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2112 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2114 * Setting all of a previous oldext extent to newext.
2115 * The left and right neighbors are both contiguous with new.
2117 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
2119 xfs_iext_remove(ip
, icur
, state
);
2120 xfs_iext_remove(ip
, icur
, state
);
2121 xfs_iext_prev(ifp
, icur
);
2122 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2123 ifp
->if_nextents
-= 2;
2125 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2127 rval
= XFS_ILOG_CORE
;
2128 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2131 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2132 xfs_btree_mark_sick(cur
);
2133 error
= -EFSCORRUPTED
;
2136 if ((error
= xfs_btree_delete(cur
, &i
)))
2138 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2139 xfs_btree_mark_sick(cur
);
2140 error
= -EFSCORRUPTED
;
2143 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2145 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2146 xfs_btree_mark_sick(cur
);
2147 error
= -EFSCORRUPTED
;
2150 if ((error
= xfs_btree_delete(cur
, &i
)))
2152 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2153 xfs_btree_mark_sick(cur
);
2154 error
= -EFSCORRUPTED
;
2157 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2159 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2160 xfs_btree_mark_sick(cur
);
2161 error
= -EFSCORRUPTED
;
2164 error
= xfs_bmbt_update(cur
, &LEFT
);
2170 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2172 * Setting all of a previous oldext extent to newext.
2173 * The left neighbor is contiguous, the right is not.
2175 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
2177 xfs_iext_remove(ip
, icur
, state
);
2178 xfs_iext_prev(ifp
, icur
);
2179 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2182 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2184 rval
= XFS_ILOG_CORE
;
2185 error
= xfs_bmbt_lookup_eq(cur
, &PREV
, &i
);
2188 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2189 xfs_btree_mark_sick(cur
);
2190 error
= -EFSCORRUPTED
;
2193 if ((error
= xfs_btree_delete(cur
, &i
)))
2195 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2196 xfs_btree_mark_sick(cur
);
2197 error
= -EFSCORRUPTED
;
2200 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2202 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2203 xfs_btree_mark_sick(cur
);
2204 error
= -EFSCORRUPTED
;
2207 error
= xfs_bmbt_update(cur
, &LEFT
);
2213 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2215 * Setting all of a previous oldext extent to newext.
2216 * The right neighbor is contiguous, the left is not.
2218 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
2219 PREV
.br_state
= new->br_state
;
2221 xfs_iext_next(ifp
, icur
);
2222 xfs_iext_remove(ip
, icur
, state
);
2223 xfs_iext_prev(ifp
, icur
);
2224 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2228 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2230 rval
= XFS_ILOG_CORE
;
2231 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2234 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2235 xfs_btree_mark_sick(cur
);
2236 error
= -EFSCORRUPTED
;
2239 if ((error
= xfs_btree_delete(cur
, &i
)))
2241 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2242 xfs_btree_mark_sick(cur
);
2243 error
= -EFSCORRUPTED
;
2246 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2248 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2249 xfs_btree_mark_sick(cur
);
2250 error
= -EFSCORRUPTED
;
2253 error
= xfs_bmbt_update(cur
, &PREV
);
2259 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
2261 * Setting all of a previous oldext extent to newext.
2262 * Neither the left nor right neighbors are contiguous with
2265 PREV
.br_state
= new->br_state
;
2266 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2269 rval
= XFS_ILOG_DEXT
;
2272 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2275 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2276 xfs_btree_mark_sick(cur
);
2277 error
= -EFSCORRUPTED
;
2280 error
= xfs_bmbt_update(cur
, &PREV
);
2286 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
2288 * Setting the first part of a previous oldext extent to newext.
2289 * The left neighbor is contiguous.
2291 LEFT
.br_blockcount
+= new->br_blockcount
;
2294 PREV
.br_startoff
+= new->br_blockcount
;
2295 PREV
.br_startblock
+= new->br_blockcount
;
2296 PREV
.br_blockcount
-= new->br_blockcount
;
2298 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2299 xfs_iext_prev(ifp
, icur
);
2300 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2303 rval
= XFS_ILOG_DEXT
;
2306 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2309 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2310 xfs_btree_mark_sick(cur
);
2311 error
= -EFSCORRUPTED
;
2314 error
= xfs_bmbt_update(cur
, &PREV
);
2317 error
= xfs_btree_decrement(cur
, 0, &i
);
2320 error
= xfs_bmbt_update(cur
, &LEFT
);
2326 case BMAP_LEFT_FILLING
:
2328 * Setting the first part of a previous oldext extent to newext.
2329 * The left neighbor is not contiguous.
2332 PREV
.br_startoff
+= new->br_blockcount
;
2333 PREV
.br_startblock
+= new->br_blockcount
;
2334 PREV
.br_blockcount
-= new->br_blockcount
;
2336 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2337 xfs_iext_insert(ip
, icur
, new, state
);
2341 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2343 rval
= XFS_ILOG_CORE
;
2344 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2347 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2348 xfs_btree_mark_sick(cur
);
2349 error
= -EFSCORRUPTED
;
2352 error
= xfs_bmbt_update(cur
, &PREV
);
2355 cur
->bc_rec
.b
= *new;
2356 if ((error
= xfs_btree_insert(cur
, &i
)))
2358 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2359 xfs_btree_mark_sick(cur
);
2360 error
= -EFSCORRUPTED
;
2366 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2368 * Setting the last part of a previous oldext extent to newext.
2369 * The right neighbor is contiguous with the new allocation.
2372 PREV
.br_blockcount
-= new->br_blockcount
;
2374 RIGHT
.br_startoff
= new->br_startoff
;
2375 RIGHT
.br_startblock
= new->br_startblock
;
2376 RIGHT
.br_blockcount
+= new->br_blockcount
;
2378 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2379 xfs_iext_next(ifp
, icur
);
2380 xfs_iext_update_extent(ip
, state
, icur
, &RIGHT
);
2383 rval
= XFS_ILOG_DEXT
;
2386 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2389 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2390 xfs_btree_mark_sick(cur
);
2391 error
= -EFSCORRUPTED
;
2394 error
= xfs_bmbt_update(cur
, &PREV
);
2397 error
= xfs_btree_increment(cur
, 0, &i
);
2400 error
= xfs_bmbt_update(cur
, &RIGHT
);
2406 case BMAP_RIGHT_FILLING
:
2408 * Setting the last part of a previous oldext extent to newext.
2409 * The right neighbor is not contiguous.
2412 PREV
.br_blockcount
-= new->br_blockcount
;
2414 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2415 xfs_iext_next(ifp
, icur
);
2416 xfs_iext_insert(ip
, icur
, new, state
);
2420 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2422 rval
= XFS_ILOG_CORE
;
2423 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2426 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2427 xfs_btree_mark_sick(cur
);
2428 error
= -EFSCORRUPTED
;
2431 error
= xfs_bmbt_update(cur
, &PREV
);
2434 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2437 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2438 xfs_btree_mark_sick(cur
);
2439 error
= -EFSCORRUPTED
;
2442 if ((error
= xfs_btree_insert(cur
, &i
)))
2444 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2445 xfs_btree_mark_sick(cur
);
2446 error
= -EFSCORRUPTED
;
2454 * Setting the middle part of a previous oldext extent to
2455 * newext. Contiguity is impossible here.
2456 * One extent becomes three extents.
2459 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
2462 r
[1].br_startoff
= new_endoff
;
2463 r
[1].br_blockcount
=
2464 old
.br_startoff
+ old
.br_blockcount
- new_endoff
;
2465 r
[1].br_startblock
= new->br_startblock
+ new->br_blockcount
;
2466 r
[1].br_state
= PREV
.br_state
;
2468 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2469 xfs_iext_next(ifp
, icur
);
2470 xfs_iext_insert(ip
, icur
, &r
[1], state
);
2471 xfs_iext_insert(ip
, icur
, &r
[0], state
);
2472 ifp
->if_nextents
+= 2;
2475 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2477 rval
= XFS_ILOG_CORE
;
2478 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2481 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2482 xfs_btree_mark_sick(cur
);
2483 error
= -EFSCORRUPTED
;
2486 /* new right extent - oldext */
2487 error
= xfs_bmbt_update(cur
, &r
[1]);
2490 /* new left extent - oldext */
2491 cur
->bc_rec
.b
= PREV
;
2492 if ((error
= xfs_btree_insert(cur
, &i
)))
2494 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2495 xfs_btree_mark_sick(cur
);
2496 error
= -EFSCORRUPTED
;
2500 * Reset the cursor to the position of the new extent
2501 * we are about to insert as we can't trust it after
2502 * the previous insert.
2504 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2507 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2508 xfs_btree_mark_sick(cur
);
2509 error
= -EFSCORRUPTED
;
2512 /* new middle extent - newext */
2513 if ((error
= xfs_btree_insert(cur
, &i
)))
2515 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2516 xfs_btree_mark_sick(cur
);
2517 error
= -EFSCORRUPTED
;
2523 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2524 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2525 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2526 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2527 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2528 case BMAP_LEFT_CONTIG
:
2529 case BMAP_RIGHT_CONTIG
:
2531 * These cases are all impossible.
2536 /* update reverse mappings */
2537 xfs_rmap_convert_extent(mp
, tp
, ip
, whichfork
, new);
2539 /* convert to a btree if necessary */
2540 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2541 int tmp_logflags
; /* partial log flag return val */
2543 ASSERT(cur
== NULL
);
2544 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
2545 &tmp_logflags
, whichfork
);
2546 *logflagsp
|= tmp_logflags
;
2551 /* clear out the allocated field, done with it now in any case. */
2553 cur
->bc_bmap
.allocated
= 0;
2557 xfs_bmap_check_leaf_extents(*curp
, ip
, whichfork
);
2567 * Convert a hole to a real allocation.
2569 STATIC
int /* error */
2570 xfs_bmap_add_extent_hole_real(
2571 struct xfs_trans
*tp
,
2572 struct xfs_inode
*ip
,
2574 struct xfs_iext_cursor
*icur
,
2575 struct xfs_btree_cur
**curp
,
2576 struct xfs_bmbt_irec
*new,
2580 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
2581 struct xfs_mount
*mp
= ip
->i_mount
;
2582 struct xfs_btree_cur
*cur
= *curp
;
2583 int error
; /* error return value */
2584 int i
; /* temp state */
2585 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2586 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2587 int rval
=0; /* return value (logging flags) */
2588 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2589 struct xfs_bmbt_irec old
;
2591 ASSERT(!isnullstartblock(new->br_startblock
));
2592 ASSERT(!cur
|| !(cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
2594 XFS_STATS_INC(mp
, xs_add_exlist
);
2597 * Check and set flags if this segment has a left neighbor.
2599 if (xfs_iext_peek_prev_extent(ifp
, icur
, &left
)) {
2600 state
|= BMAP_LEFT_VALID
;
2601 if (isnullstartblock(left
.br_startblock
))
2602 state
|= BMAP_LEFT_DELAY
;
2606 * Check and set flags if this segment has a current value.
2607 * Not true if we're inserting into the "hole" at eof.
2609 if (xfs_iext_get_extent(ifp
, icur
, &right
)) {
2610 state
|= BMAP_RIGHT_VALID
;
2611 if (isnullstartblock(right
.br_startblock
))
2612 state
|= BMAP_RIGHT_DELAY
;
2616 * We're inserting a real allocation between "left" and "right".
2617 * Set the contiguity flags. Don't let extents get too large.
2619 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2620 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2621 left
.br_startblock
+ left
.br_blockcount
== new->br_startblock
&&
2622 left
.br_state
== new->br_state
&&
2623 left
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2624 xfs_bmap_same_rtgroup(ip
, whichfork
, &left
, new))
2625 state
|= BMAP_LEFT_CONTIG
;
2627 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2628 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2629 new->br_startblock
+ new->br_blockcount
== right
.br_startblock
&&
2630 new->br_state
== right
.br_state
&&
2631 new->br_blockcount
+ right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2632 (!(state
& BMAP_LEFT_CONTIG
) ||
2633 left
.br_blockcount
+ new->br_blockcount
+
2634 right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
) &&
2635 xfs_bmap_same_rtgroup(ip
, whichfork
, new, &right
))
2636 state
|= BMAP_RIGHT_CONTIG
;
2640 * Select which case we're in here, and implement it.
2642 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2643 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2645 * New allocation is contiguous with real allocations on the
2646 * left and on the right.
2647 * Merge all three into a single extent record.
2649 left
.br_blockcount
+= new->br_blockcount
+ right
.br_blockcount
;
2651 xfs_iext_remove(ip
, icur
, state
);
2652 xfs_iext_prev(ifp
, icur
);
2653 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2657 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2659 rval
= XFS_ILOG_CORE
;
2660 error
= xfs_bmbt_lookup_eq(cur
, &right
, &i
);
2663 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2664 xfs_btree_mark_sick(cur
);
2665 error
= -EFSCORRUPTED
;
2668 error
= xfs_btree_delete(cur
, &i
);
2671 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2672 xfs_btree_mark_sick(cur
);
2673 error
= -EFSCORRUPTED
;
2676 error
= xfs_btree_decrement(cur
, 0, &i
);
2679 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2680 xfs_btree_mark_sick(cur
);
2681 error
= -EFSCORRUPTED
;
2684 error
= xfs_bmbt_update(cur
, &left
);
2690 case BMAP_LEFT_CONTIG
:
2692 * New allocation is contiguous with a real allocation
2694 * Merge the new allocation with the left neighbor.
2697 left
.br_blockcount
+= new->br_blockcount
;
2699 xfs_iext_prev(ifp
, icur
);
2700 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2703 rval
= xfs_ilog_fext(whichfork
);
2706 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2709 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2710 xfs_btree_mark_sick(cur
);
2711 error
= -EFSCORRUPTED
;
2714 error
= xfs_bmbt_update(cur
, &left
);
2720 case BMAP_RIGHT_CONTIG
:
2722 * New allocation is contiguous with a real allocation
2724 * Merge the new allocation with the right neighbor.
2728 right
.br_startoff
= new->br_startoff
;
2729 right
.br_startblock
= new->br_startblock
;
2730 right
.br_blockcount
+= new->br_blockcount
;
2731 xfs_iext_update_extent(ip
, state
, icur
, &right
);
2734 rval
= xfs_ilog_fext(whichfork
);
2737 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2740 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2741 xfs_btree_mark_sick(cur
);
2742 error
= -EFSCORRUPTED
;
2745 error
= xfs_bmbt_update(cur
, &right
);
2753 * New allocation is not contiguous with another
2755 * Insert a new entry.
2757 xfs_iext_insert(ip
, icur
, new, state
);
2761 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2763 rval
= XFS_ILOG_CORE
;
2764 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2767 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2768 xfs_btree_mark_sick(cur
);
2769 error
= -EFSCORRUPTED
;
2772 error
= xfs_btree_insert(cur
, &i
);
2775 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2776 xfs_btree_mark_sick(cur
);
2777 error
= -EFSCORRUPTED
;
2784 /* add reverse mapping unless caller opted out */
2785 if (!(flags
& XFS_BMAPI_NORMAP
))
2786 xfs_rmap_map_extent(tp
, ip
, whichfork
, new);
2788 /* convert to a btree if necessary */
2789 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2790 int tmp_logflags
; /* partial log flag return val */
2792 ASSERT(cur
== NULL
);
2793 error
= xfs_bmap_extents_to_btree(tp
, ip
, curp
, 0,
2794 &tmp_logflags
, whichfork
);
2795 *logflagsp
|= tmp_logflags
;
2801 /* clear out the allocated field, done with it now in any case. */
2803 cur
->bc_bmap
.allocated
= 0;
2805 xfs_bmap_check_leaf_extents(cur
, ip
, whichfork
);
2812 * Functions used in the extent read, allocate and remove paths
2816 * Adjust the size of the new extent based on i_extsize and rt extsize.
2819 xfs_bmap_extsize_align(
2821 xfs_bmbt_irec_t
*gotp
, /* next extent pointer */
2822 xfs_bmbt_irec_t
*prevp
, /* previous extent pointer */
2823 xfs_extlen_t extsz
, /* align to this extent size */
2824 int rt
, /* is this a realtime inode? */
2825 int eof
, /* is extent at end-of-file? */
2826 int delay
, /* creating delalloc extent? */
2827 int convert
, /* overwriting unwritten extent? */
2828 xfs_fileoff_t
*offp
, /* in/out: aligned offset */
2829 xfs_extlen_t
*lenp
) /* in/out: aligned length */
2831 xfs_fileoff_t orig_off
; /* original offset */
2832 xfs_extlen_t orig_alen
; /* original length */
2833 xfs_fileoff_t orig_end
; /* original off+len */
2834 xfs_fileoff_t nexto
; /* next file offset */
2835 xfs_fileoff_t prevo
; /* previous file offset */
2836 xfs_fileoff_t align_off
; /* temp for offset */
2837 xfs_extlen_t align_alen
; /* temp for length */
2838 xfs_extlen_t temp
; /* temp for calculations */
2843 orig_off
= align_off
= *offp
;
2844 orig_alen
= align_alen
= *lenp
;
2845 orig_end
= orig_off
+ orig_alen
;
2848 * If this request overlaps an existing extent, then don't
2849 * attempt to perform any additional alignment.
2851 if (!delay
&& !eof
&&
2852 (orig_off
>= gotp
->br_startoff
) &&
2853 (orig_end
<= gotp
->br_startoff
+ gotp
->br_blockcount
)) {
2858 * If the file offset is unaligned vs. the extent size
2859 * we need to align it. This will be possible unless
2860 * the file was previously written with a kernel that didn't
2861 * perform this alignment, or if a truncate shot us in the
2864 div_u64_rem(orig_off
, extsz
, &temp
);
2870 /* Same adjustment for the end of the requested area. */
2871 temp
= (align_alen
% extsz
);
2873 align_alen
+= extsz
- temp
;
2876 * For large extent hint sizes, the aligned extent might be larger than
2877 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2878 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2879 * allocation loops handle short allocation just fine, so it is safe to
2880 * do this. We only want to do it when we are forced to, though, because
2881 * it means more allocation operations are required.
2883 while (align_alen
> XFS_MAX_BMBT_EXTLEN
)
2884 align_alen
-= extsz
;
2885 ASSERT(align_alen
<= XFS_MAX_BMBT_EXTLEN
);
2888 * If the previous block overlaps with this proposed allocation
2889 * then move the start forward without adjusting the length.
2891 if (prevp
->br_startoff
!= NULLFILEOFF
) {
2892 if (prevp
->br_startblock
== HOLESTARTBLOCK
)
2893 prevo
= prevp
->br_startoff
;
2895 prevo
= prevp
->br_startoff
+ prevp
->br_blockcount
;
2898 if (align_off
!= orig_off
&& align_off
< prevo
)
2901 * If the next block overlaps with this proposed allocation
2902 * then move the start back without adjusting the length,
2903 * but not before offset 0.
2904 * This may of course make the start overlap previous block,
2905 * and if we hit the offset 0 limit then the next block
2906 * can still overlap too.
2908 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
) {
2909 if ((delay
&& gotp
->br_startblock
== HOLESTARTBLOCK
) ||
2910 (!delay
&& gotp
->br_startblock
== DELAYSTARTBLOCK
))
2911 nexto
= gotp
->br_startoff
+ gotp
->br_blockcount
;
2913 nexto
= gotp
->br_startoff
;
2915 nexto
= NULLFILEOFF
;
2917 align_off
+ align_alen
!= orig_end
&&
2918 align_off
+ align_alen
> nexto
)
2919 align_off
= nexto
> align_alen
? nexto
- align_alen
: 0;
2921 * If we're now overlapping the next or previous extent that
2922 * means we can't fit an extsz piece in this hole. Just move
2923 * the start forward to the first valid spot and set
2924 * the length so we hit the end.
2926 if (align_off
!= orig_off
&& align_off
< prevo
)
2928 if (align_off
+ align_alen
!= orig_end
&&
2929 align_off
+ align_alen
> nexto
&&
2930 nexto
!= NULLFILEOFF
) {
2931 ASSERT(nexto
> prevo
);
2932 align_alen
= nexto
- align_off
;
2936 * If realtime, and the result isn't a multiple of the realtime
2937 * extent size we need to remove blocks until it is.
2939 if (rt
&& (temp
= xfs_extlen_to_rtxmod(mp
, align_alen
))) {
2941 * We're not covering the original request, or
2942 * we won't be able to once we fix the length.
2944 if (orig_off
< align_off
||
2945 orig_end
> align_off
+ align_alen
||
2946 align_alen
- temp
< orig_alen
)
2949 * Try to fix it by moving the start up.
2951 if (align_off
+ temp
<= orig_off
) {
2956 * Try to fix it by moving the end in.
2958 else if (align_off
+ align_alen
- temp
>= orig_end
)
2961 * Set the start to the minimum then trim the length.
2964 align_alen
-= orig_off
- align_off
;
2965 align_off
= orig_off
;
2966 align_alen
-= xfs_extlen_to_rtxmod(mp
, align_alen
);
2969 * Result doesn't cover the request, fail it.
2971 if (orig_off
< align_off
|| orig_end
> align_off
+ align_alen
)
2974 ASSERT(orig_off
>= align_off
);
2975 /* see XFS_BMBT_MAX_EXTLEN handling above */
2976 ASSERT(orig_end
<= align_off
+ align_alen
||
2977 align_alen
+ extsz
> XFS_MAX_BMBT_EXTLEN
);
2981 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
)
2982 ASSERT(align_off
+ align_alen
<= gotp
->br_startoff
);
2983 if (prevp
->br_startoff
!= NULLFILEOFF
)
2984 ASSERT(align_off
>= prevp
->br_startoff
+ prevp
->br_blockcount
);
2993 xfs_bmap_adjacent_valid(
2994 struct xfs_bmalloca
*ap
,
2998 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3000 if (XFS_IS_REALTIME_INODE(ap
->ip
) &&
3001 (ap
->datatype
& XFS_ALLOC_USERDATA
)) {
3002 if (!xfs_has_rtgroups(mp
))
3003 return x
< mp
->m_sb
.sb_rblocks
;
3005 return xfs_rtb_to_rgno(mp
, x
) == xfs_rtb_to_rgno(mp
, y
) &&
3006 xfs_rtb_to_rgno(mp
, x
) < mp
->m_sb
.sb_rgcount
&&
3007 xfs_rtb_to_rtx(mp
, x
) < mp
->m_sb
.sb_rgextents
;
3011 return XFS_FSB_TO_AGNO(mp
, x
) == XFS_FSB_TO_AGNO(mp
, y
) &&
3012 XFS_FSB_TO_AGNO(mp
, x
) < mp
->m_sb
.sb_agcount
&&
3013 XFS_FSB_TO_AGBNO(mp
, x
) < mp
->m_sb
.sb_agblocks
;
3016 #define XFS_ALLOC_GAP_UNITS 4
3018 /* returns true if ap->blkno was modified */
3021 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3023 xfs_fsblock_t adjust
; /* adjustment to block numbers */
3026 * If allocating at eof, and there's a previous real block,
3027 * try to use its last block as our starting point.
3029 if (ap
->eof
&& ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3030 !isnullstartblock(ap
->prev
.br_startblock
) &&
3031 xfs_bmap_adjacent_valid(ap
,
3032 ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
,
3033 ap
->prev
.br_startblock
)) {
3034 ap
->blkno
= ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
;
3036 * Adjust for the gap between prevp and us.
3038 adjust
= ap
->offset
-
3039 (ap
->prev
.br_startoff
+ ap
->prev
.br_blockcount
);
3040 if (adjust
&& xfs_bmap_adjacent_valid(ap
, ap
->blkno
+ adjust
,
3041 ap
->prev
.br_startblock
))
3042 ap
->blkno
+= adjust
;
3046 * If not at eof, then compare the two neighbor blocks.
3047 * Figure out whether either one gives us a good starting point,
3048 * and pick the better one.
3051 xfs_fsblock_t gotbno
; /* right side block number */
3052 xfs_fsblock_t gotdiff
=0; /* right side difference */
3053 xfs_fsblock_t prevbno
; /* left side block number */
3054 xfs_fsblock_t prevdiff
=0; /* left side difference */
3057 * If there's a previous (left) block, select a requested
3058 * start block based on it.
3060 if (ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3061 !isnullstartblock(ap
->prev
.br_startblock
) &&
3062 (prevbno
= ap
->prev
.br_startblock
+
3063 ap
->prev
.br_blockcount
) &&
3064 xfs_bmap_adjacent_valid(ap
, prevbno
,
3065 ap
->prev
.br_startblock
)) {
3067 * Calculate gap to end of previous block.
3069 adjust
= prevdiff
= ap
->offset
-
3070 (ap
->prev
.br_startoff
+
3071 ap
->prev
.br_blockcount
);
3073 * Figure the startblock based on the previous block's
3074 * end and the gap size.
3076 * If the gap is large relative to the piece we're
3077 * allocating, or using it gives us an invalid block
3078 * number, then just use the end of the previous block.
3080 if (prevdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3081 xfs_bmap_adjacent_valid(ap
, prevbno
+ prevdiff
,
3082 ap
->prev
.br_startblock
))
3088 * No previous block or can't follow it, just default.
3091 prevbno
= NULLFSBLOCK
;
3093 * If there's a following (right) block, select a requested
3094 * start block based on it.
3096 if (!isnullstartblock(ap
->got
.br_startblock
)) {
3098 * Calculate gap to start of next block.
3100 adjust
= gotdiff
= ap
->got
.br_startoff
- ap
->offset
;
3102 * Figure the startblock based on the next block's
3103 * start and the gap size.
3105 gotbno
= ap
->got
.br_startblock
;
3108 * If the gap is large relative to the piece we're
3109 * allocating, or using it gives us an invalid block
3110 * number, then just use the start of the next block
3111 * offset by our length.
3113 if (gotdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3114 xfs_bmap_adjacent_valid(ap
, gotbno
- gotdiff
,
3117 else if (xfs_bmap_adjacent_valid(ap
, gotbno
- ap
->length
,
3119 gotbno
-= ap
->length
;
3120 gotdiff
+= adjust
- ap
->length
;
3125 * No next block, just default.
3128 gotbno
= NULLFSBLOCK
;
3130 * If both valid, pick the better one, else the only good
3131 * one, else ap->blkno is already set (to 0 or the inode block).
3133 if (prevbno
!= NULLFSBLOCK
&& gotbno
!= NULLFSBLOCK
) {
3134 ap
->blkno
= prevdiff
<= gotdiff
? prevbno
: gotbno
;
3137 if (prevbno
!= NULLFSBLOCK
) {
3138 ap
->blkno
= prevbno
;
3141 if (gotbno
!= NULLFSBLOCK
) {
3151 xfs_bmap_longest_free_extent(
3152 struct xfs_perag
*pag
,
3153 struct xfs_trans
*tp
,
3156 xfs_extlen_t longest
;
3159 if (!xfs_perag_initialised_agf(pag
)) {
3160 error
= xfs_alloc_read_agf(pag
, tp
, XFS_ALLOC_FLAG_TRYLOCK
,
3166 longest
= xfs_alloc_longest_free_extent(pag
,
3167 xfs_alloc_min_freelist(pag_mount(pag
), pag
),
3168 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
));
3169 if (*blen
< longest
)
3176 xfs_bmap_select_minlen(
3177 struct xfs_bmalloca
*ap
,
3178 struct xfs_alloc_arg
*args
,
3183 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3184 * possible that there is enough contiguous free space for this request.
3186 if (blen
< ap
->minlen
)
3190 * If the best seen length is less than the request length,
3191 * use the best as the minimum, otherwise we've got the maxlen we
3194 if (blen
< args
->maxlen
)
3196 return args
->maxlen
;
3200 xfs_bmap_btalloc_select_lengths(
3201 struct xfs_bmalloca
*ap
,
3202 struct xfs_alloc_arg
*args
,
3205 struct xfs_mount
*mp
= args
->mp
;
3206 struct xfs_perag
*pag
;
3207 xfs_agnumber_t agno
, startag
;
3210 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3211 args
->total
= ap
->minlen
;
3212 args
->minlen
= ap
->minlen
;
3216 args
->total
= ap
->total
;
3217 startag
= XFS_FSB_TO_AGNO(mp
, ap
->blkno
);
3218 if (startag
== NULLAGNUMBER
)
3222 for_each_perag_wrap(mp
, startag
, agno
, pag
) {
3223 error
= xfs_bmap_longest_free_extent(pag
, args
->tp
, blen
);
3224 if (error
&& error
!= -EAGAIN
)
3227 if (*blen
>= args
->maxlen
)
3231 xfs_perag_rele(pag
);
3233 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, *blen
);
3237 /* Update all inode and quota accounting for the allocation we just did. */
3239 xfs_bmap_alloc_account(
3240 struct xfs_bmalloca
*ap
)
3242 bool isrt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3243 !(ap
->flags
& XFS_BMAPI_ATTRFORK
);
3246 if (ap
->flags
& XFS_BMAPI_COWFORK
) {
3248 * COW fork blocks are in-core only and thus are treated as
3249 * in-core quota reservation (like delalloc blocks) even when
3250 * converted to real blocks. The quota reservation is not
3251 * accounted to disk until blocks are remapped to the data
3252 * fork. So if these blocks were previously delalloc, we
3253 * already have quota reservation and there's nothing to do
3257 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3262 * Otherwise, we've allocated blocks in a hole. The transaction
3263 * has acquired in-core quota reservation for this extent.
3264 * Rather than account these as real blocks, however, we reduce
3265 * the transaction quota reservation based on the allocation.
3266 * This essentially transfers the transaction quota reservation
3267 * to that of a delalloc extent.
3269 ap
->ip
->i_delayed_blks
+= ap
->length
;
3270 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, isrt
?
3271 XFS_TRANS_DQ_RES_RTBLKS
: XFS_TRANS_DQ_RES_BLKS
,
3276 /* data/attr fork only */
3277 ap
->ip
->i_nblocks
+= ap
->length
;
3278 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3280 ap
->ip
->i_delayed_blks
-= ap
->length
;
3281 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3282 fld
= isrt
? XFS_TRANS_DQ_DELRTBCOUNT
: XFS_TRANS_DQ_DELBCOUNT
;
3284 fld
= isrt
? XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
3287 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, fld
, ap
->length
);
3291 xfs_bmap_compute_alignments(
3292 struct xfs_bmalloca
*ap
,
3293 struct xfs_alloc_arg
*args
)
3295 struct xfs_mount
*mp
= args
->mp
;
3296 xfs_extlen_t align
= 0; /* minimum allocation alignment */
3297 int stripe_align
= 0;
3299 /* stripe alignment for allocation is determined by mount parameters */
3300 if (mp
->m_swidth
&& xfs_has_swalloc(mp
))
3301 stripe_align
= mp
->m_swidth
;
3302 else if (mp
->m_dalign
)
3303 stripe_align
= mp
->m_dalign
;
3305 if (ap
->flags
& XFS_BMAPI_COWFORK
)
3306 align
= xfs_get_cowextsz_hint(ap
->ip
);
3307 else if (ap
->datatype
& XFS_ALLOC_USERDATA
)
3308 align
= xfs_get_extsz_hint(ap
->ip
);
3310 if (xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
, align
, 0,
3311 ap
->eof
, 0, ap
->conv
, &ap
->offset
,
3317 /* apply extent size hints if obtained earlier */
3320 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3322 args
->mod
= args
->prod
- args
->mod
;
3323 } else if (mp
->m_sb
.sb_blocksize
>= PAGE_SIZE
) {
3327 args
->prod
= PAGE_SIZE
>> mp
->m_sb
.sb_blocklog
;
3328 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3330 args
->mod
= args
->prod
- args
->mod
;
3333 return stripe_align
;
3337 xfs_bmap_process_allocated_extent(
3338 struct xfs_bmalloca
*ap
,
3339 struct xfs_alloc_arg
*args
,
3340 xfs_fileoff_t orig_offset
,
3341 xfs_extlen_t orig_length
)
3343 ap
->blkno
= args
->fsbno
;
3344 ap
->length
= args
->len
;
3346 * If the extent size hint is active, we tried to round the
3347 * caller's allocation request offset down to extsz and the
3348 * length up to another extsz boundary. If we found a free
3349 * extent we mapped it in starting at this new offset. If the
3350 * newly mapped space isn't long enough to cover any of the
3351 * range of offsets that was originally requested, move the
3352 * mapping up so that we can fill as much of the caller's
3353 * original request as possible. Free space is apparently
3354 * very fragmented so we're unlikely to be able to satisfy the
3357 if (ap
->length
<= orig_length
)
3358 ap
->offset
= orig_offset
;
3359 else if (ap
->offset
+ ap
->length
< orig_offset
+ orig_length
)
3360 ap
->offset
= orig_offset
+ orig_length
- ap
->length
;
3361 xfs_bmap_alloc_account(ap
);
3365 xfs_bmap_exact_minlen_extent_alloc(
3366 struct xfs_bmalloca
*ap
,
3367 struct xfs_alloc_arg
*args
)
3369 if (ap
->minlen
!= 1) {
3370 args
->fsbno
= NULLFSBLOCK
;
3374 args
->alloc_minlen_only
= 1;
3375 args
->minlen
= args
->maxlen
= ap
->minlen
;
3376 args
->total
= ap
->total
;
3379 * Unlike the longest extent available in an AG, we don't track
3380 * the length of an AG's shortest extent.
3381 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3382 * hence we can afford to start traversing from the 0th AG since
3383 * we need not be concerned about a drop in performance in
3384 * "debug only" code paths.
3386 ap
->blkno
= XFS_AGB_TO_FSB(ap
->ip
->i_mount
, 0, 0);
3389 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3390 * iteration and then drops args->total to args->minlen, which might be
3391 * required to find an allocation for the transaction reservation when
3392 * the file system is very full.
3394 return xfs_bmap_btalloc_low_space(ap
, args
);
3398 * If we are not low on available data blocks and we are allocating at
3399 * EOF, optimise allocation for contiguous file extension and/or stripe
3400 * alignment of the new extent.
3402 * NOTE: ap->aeof is only set if the allocation length is >= the
3403 * stripe unit and the allocation offset is at the end of file.
3406 xfs_bmap_btalloc_at_eof(
3407 struct xfs_bmalloca
*ap
,
3408 struct xfs_alloc_arg
*args
,
3413 struct xfs_mount
*mp
= args
->mp
;
3414 struct xfs_perag
*caller_pag
= args
->pag
;
3418 * If there are already extents in the file, and xfs_bmap_adjacent() has
3419 * given a better blkno, try an exact EOF block allocation to extend the
3420 * file as a contiguous extent. If that fails, or it's the first
3421 * allocation in a file, just try for a stripe aligned allocation.
3424 xfs_extlen_t nextminlen
= 0;
3427 * Compute the minlen+alignment for the next case. Set slop so
3428 * that the value of minlen+alignment+slop doesn't go up between
3431 args
->alignment
= 1;
3432 if (blen
> stripe_align
&& blen
<= args
->maxlen
)
3433 nextminlen
= blen
- stripe_align
;
3435 nextminlen
= args
->minlen
;
3436 if (nextminlen
+ stripe_align
> args
->minlen
+ 1)
3437 args
->minalignslop
= nextminlen
+ stripe_align
-
3440 args
->minalignslop
= 0;
3443 args
->pag
= xfs_perag_get(mp
, XFS_FSB_TO_AGNO(mp
, ap
->blkno
));
3444 error
= xfs_alloc_vextent_exact_bno(args
, ap
->blkno
);
3446 xfs_perag_put(args
->pag
);
3452 if (args
->fsbno
!= NULLFSBLOCK
)
3455 * Exact allocation failed. Reset to try an aligned allocation
3456 * according to the original allocation specification.
3458 args
->alignment
= stripe_align
;
3459 args
->minlen
= nextminlen
;
3460 args
->minalignslop
= 0;
3463 * Adjust minlen to try and preserve alignment if we
3464 * can't guarantee an aligned maxlen extent.
3466 args
->alignment
= stripe_align
;
3467 if (blen
> args
->alignment
&&
3468 blen
<= args
->maxlen
+ args
->alignment
)
3469 args
->minlen
= blen
- args
->alignment
;
3470 args
->minalignslop
= 0;
3474 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3477 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3478 ASSERT(args
->pag
== NULL
);
3479 args
->pag
= caller_pag
;
3484 if (args
->fsbno
!= NULLFSBLOCK
)
3488 * Allocation failed, so turn return the allocation args to their
3489 * original non-aligned state so the caller can proceed on allocation
3490 * failure as if this function was never called.
3492 args
->alignment
= 1;
3497 * We have failed multiple allocation attempts so now are in a low space
3498 * allocation situation. Try a locality first full filesystem minimum length
3499 * allocation whilst still maintaining necessary total block reservation
3502 * If that fails, we are now critically low on space, so perform a last resort
3503 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3504 * filesystem free space scan. We also indicate to future allocations in this
3505 * transaction that we are critically low on space so they don't waste time on
3506 * allocation modes that are unlikely to succeed.
3509 xfs_bmap_btalloc_low_space(
3510 struct xfs_bmalloca
*ap
,
3511 struct xfs_alloc_arg
*args
)
3515 if (args
->minlen
> ap
->minlen
) {
3516 args
->minlen
= ap
->minlen
;
3517 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3518 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3522 /* Last ditch attempt before failure is declared. */
3523 args
->total
= ap
->minlen
;
3524 error
= xfs_alloc_vextent_first_ag(args
, 0);
3527 ap
->tp
->t_flags
|= XFS_TRANS_LOWMODE
;
3532 xfs_bmap_btalloc_filestreams(
3533 struct xfs_bmalloca
*ap
,
3534 struct xfs_alloc_arg
*args
,
3537 xfs_extlen_t blen
= 0;
3541 error
= xfs_filestream_select_ag(ap
, args
, &blen
);
3547 * If we are in low space mode, then optimal allocation will fail so
3548 * prepare for minimal allocation and jump to the low space algorithm
3551 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3552 args
->minlen
= ap
->minlen
;
3553 ASSERT(args
->fsbno
== NULLFSBLOCK
);
3557 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, blen
);
3559 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3562 if (!error
&& args
->fsbno
== NULLFSBLOCK
)
3563 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3567 * We are now done with the perag reference for the filestreams
3568 * association provided by xfs_filestream_select_ag(). Release it now as
3569 * we've either succeeded, had a fatal error or we are out of space and
3570 * need to do a full filesystem scan for free space which will take it's
3573 xfs_perag_rele(args
->pag
);
3575 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3578 return xfs_bmap_btalloc_low_space(ap
, args
);
3582 xfs_bmap_btalloc_best_length(
3583 struct xfs_bmalloca
*ap
,
3584 struct xfs_alloc_arg
*args
,
3587 xfs_extlen_t blen
= 0;
3590 ap
->blkno
= XFS_INO_TO_FSB(args
->mp
, ap
->ip
->i_ino
);
3591 if (!xfs_bmap_adjacent(ap
))
3595 * Search for an allocation group with a single extent large enough for
3596 * the request. If one isn't found, then adjust the minimum allocation
3597 * size to the largest space found.
3599 error
= xfs_bmap_btalloc_select_lengths(ap
, args
, &blen
);
3604 * Don't attempt optimal EOF allocation if previous allocations barely
3605 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3606 * optimal or even aligned allocations in this case, so don't waste time
3609 if (ap
->aeof
&& !(ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
)) {
3610 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3612 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3616 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3617 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3620 return xfs_bmap_btalloc_low_space(ap
, args
);
3625 struct xfs_bmalloca
*ap
)
3627 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3628 struct xfs_alloc_arg args
= {
3631 .fsbno
= NULLFSBLOCK
,
3632 .oinfo
= XFS_RMAP_OINFO_SKIP_UPDATE
,
3633 .minleft
= ap
->minleft
,
3634 .wasdel
= ap
->wasdel
,
3635 .resv
= XFS_AG_RESV_NONE
,
3636 .datatype
= ap
->datatype
,
3640 xfs_fileoff_t orig_offset
;
3641 xfs_extlen_t orig_length
;
3646 orig_offset
= ap
->offset
;
3647 orig_length
= ap
->length
;
3649 stripe_align
= xfs_bmap_compute_alignments(ap
, &args
);
3651 /* Trim the allocation back to the maximum an AG can fit. */
3652 args
.maxlen
= min(ap
->length
, mp
->m_ag_max_usable
);
3654 if (unlikely(XFS_TEST_ERROR(false, mp
,
3655 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT
)))
3656 error
= xfs_bmap_exact_minlen_extent_alloc(ap
, &args
);
3657 else if ((ap
->datatype
& XFS_ALLOC_USERDATA
) &&
3658 xfs_inode_is_filestream(ap
->ip
))
3659 error
= xfs_bmap_btalloc_filestreams(ap
, &args
, stripe_align
);
3661 error
= xfs_bmap_btalloc_best_length(ap
, &args
, stripe_align
);
3665 if (args
.fsbno
!= NULLFSBLOCK
) {
3666 xfs_bmap_process_allocated_extent(ap
, &args
, orig_offset
,
3669 ap
->blkno
= NULLFSBLOCK
;
3675 /* Trim extent to fit a logical block range. */
3678 struct xfs_bmbt_irec
*irec
,
3682 xfs_fileoff_t distance
;
3683 xfs_fileoff_t end
= bno
+ len
;
3685 if (irec
->br_startoff
+ irec
->br_blockcount
<= bno
||
3686 irec
->br_startoff
>= end
) {
3687 irec
->br_blockcount
= 0;
3691 if (irec
->br_startoff
< bno
) {
3692 distance
= bno
- irec
->br_startoff
;
3693 if (isnullstartblock(irec
->br_startblock
))
3694 irec
->br_startblock
= DELAYSTARTBLOCK
;
3695 if (irec
->br_startblock
!= DELAYSTARTBLOCK
&&
3696 irec
->br_startblock
!= HOLESTARTBLOCK
)
3697 irec
->br_startblock
+= distance
;
3698 irec
->br_startoff
+= distance
;
3699 irec
->br_blockcount
-= distance
;
3702 if (end
< irec
->br_startoff
+ irec
->br_blockcount
) {
3703 distance
= irec
->br_startoff
+ irec
->br_blockcount
- end
;
3704 irec
->br_blockcount
-= distance
;
3709 * Trim the returned map to the required bounds
3713 struct xfs_bmbt_irec
*mval
,
3714 struct xfs_bmbt_irec
*got
,
3722 if ((flags
& XFS_BMAPI_ENTIRE
) ||
3723 got
->br_startoff
+ got
->br_blockcount
<= obno
) {
3725 if (isnullstartblock(got
->br_startblock
))
3726 mval
->br_startblock
= DELAYSTARTBLOCK
;
3732 ASSERT((*bno
>= obno
) || (n
== 0));
3734 mval
->br_startoff
= *bno
;
3735 if (isnullstartblock(got
->br_startblock
))
3736 mval
->br_startblock
= DELAYSTARTBLOCK
;
3738 mval
->br_startblock
= got
->br_startblock
+
3739 (*bno
- got
->br_startoff
);
3741 * Return the minimum of what we got and what we asked for for
3742 * the length. We can use the len variable here because it is
3743 * modified below and we could have been there before coming
3744 * here if the first part of the allocation didn't overlap what
3747 mval
->br_blockcount
= XFS_FILBLKS_MIN(end
- *bno
,
3748 got
->br_blockcount
- (*bno
- got
->br_startoff
));
3749 mval
->br_state
= got
->br_state
;
3750 ASSERT(mval
->br_blockcount
<= len
);
3755 * Update and validate the extent map to return
3758 xfs_bmapi_update_map(
3759 struct xfs_bmbt_irec
**map
,
3767 xfs_bmbt_irec_t
*mval
= *map
;
3769 ASSERT((flags
& XFS_BMAPI_ENTIRE
) ||
3770 ((mval
->br_startoff
+ mval
->br_blockcount
) <= end
));
3771 ASSERT((flags
& XFS_BMAPI_ENTIRE
) || (mval
->br_blockcount
<= *len
) ||
3772 (mval
->br_startoff
< obno
));
3774 *bno
= mval
->br_startoff
+ mval
->br_blockcount
;
3776 if (*n
> 0 && mval
->br_startoff
== mval
[-1].br_startoff
) {
3777 /* update previous map with new information */
3778 ASSERT(mval
->br_startblock
== mval
[-1].br_startblock
);
3779 ASSERT(mval
->br_blockcount
> mval
[-1].br_blockcount
);
3780 ASSERT(mval
->br_state
== mval
[-1].br_state
);
3781 mval
[-1].br_blockcount
= mval
->br_blockcount
;
3782 mval
[-1].br_state
= mval
->br_state
;
3783 } else if (*n
> 0 && mval
->br_startblock
!= DELAYSTARTBLOCK
&&
3784 mval
[-1].br_startblock
!= DELAYSTARTBLOCK
&&
3785 mval
[-1].br_startblock
!= HOLESTARTBLOCK
&&
3786 mval
->br_startblock
== mval
[-1].br_startblock
+
3787 mval
[-1].br_blockcount
&&
3788 mval
[-1].br_state
== mval
->br_state
) {
3789 ASSERT(mval
->br_startoff
==
3790 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
);
3791 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3792 } else if (*n
> 0 &&
3793 mval
->br_startblock
== DELAYSTARTBLOCK
&&
3794 mval
[-1].br_startblock
== DELAYSTARTBLOCK
&&
3795 mval
->br_startoff
==
3796 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
) {
3797 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3798 mval
[-1].br_state
= mval
->br_state
;
3799 } else if (!((*n
== 0) &&
3800 ((mval
->br_startoff
+ mval
->br_blockcount
) <=
3809 * Map file blocks to filesystem blocks without allocation.
3813 struct xfs_inode
*ip
,
3816 struct xfs_bmbt_irec
*mval
,
3820 struct xfs_mount
*mp
= ip
->i_mount
;
3821 int whichfork
= xfs_bmapi_whichfork(flags
);
3822 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
3823 struct xfs_bmbt_irec got
;
3826 struct xfs_iext_cursor icur
;
3832 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_ENTIRE
)));
3833 xfs_assert_ilocked(ip
, XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
);
3835 if (WARN_ON_ONCE(!ifp
)) {
3836 xfs_bmap_mark_sick(ip
, whichfork
);
3837 return -EFSCORRUPTED
;
3840 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
3841 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
3842 xfs_bmap_mark_sick(ip
, whichfork
);
3843 return -EFSCORRUPTED
;
3846 if (xfs_is_shutdown(mp
))
3849 XFS_STATS_INC(mp
, xs_blk_mapr
);
3851 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
3855 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
))
3860 while (bno
< end
&& n
< *nmap
) {
3861 /* Reading past eof, act as though there's a hole up to end. */
3863 got
.br_startoff
= end
;
3864 if (got
.br_startoff
> bno
) {
3865 /* Reading in a hole. */
3866 mval
->br_startoff
= bno
;
3867 mval
->br_startblock
= HOLESTARTBLOCK
;
3868 mval
->br_blockcount
=
3869 XFS_FILBLKS_MIN(len
, got
.br_startoff
- bno
);
3870 mval
->br_state
= XFS_EXT_NORM
;
3871 bno
+= mval
->br_blockcount
;
3872 len
-= mval
->br_blockcount
;
3878 /* set up the extent map to return. */
3879 xfs_bmapi_trim_map(mval
, &got
, &bno
, len
, obno
, end
, n
, flags
);
3880 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
3882 /* If we're done, stop now. */
3883 if (bno
>= end
|| n
>= *nmap
)
3886 /* Else go on to the next record. */
3887 if (!xfs_iext_next_extent(ifp
, &icur
, &got
))
3896 struct xfs_bmalloca
*bma
)
3898 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
3899 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
3900 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
3903 ASSERT(bma
->length
> 0);
3904 ASSERT(bma
->length
<= XFS_MAX_BMBT_EXTLEN
);
3906 if (bma
->flags
& XFS_BMAPI_CONTIG
)
3907 bma
->minlen
= bma
->length
;
3911 if (!(bma
->flags
& XFS_BMAPI_METADATA
)) {
3913 * For the data and COW fork, the first data in the file is
3914 * treated differently to all other allocations. For the
3915 * attribute fork, we only need to ensure the allocated range
3916 * is not on the busy list.
3918 bma
->datatype
= XFS_ALLOC_NOBUSY
;
3919 if (whichfork
== XFS_DATA_FORK
|| whichfork
== XFS_COW_FORK
) {
3920 bma
->datatype
|= XFS_ALLOC_USERDATA
;
3921 if (bma
->offset
== 0)
3922 bma
->datatype
|= XFS_ALLOC_INITIAL_USER_DATA
;
3924 if (mp
->m_dalign
&& bma
->length
>= mp
->m_dalign
) {
3925 error
= xfs_bmap_isaeof(bma
, whichfork
);
3932 if ((bma
->datatype
& XFS_ALLOC_USERDATA
) &&
3933 XFS_IS_REALTIME_INODE(bma
->ip
))
3934 error
= xfs_bmap_rtalloc(bma
);
3936 error
= xfs_bmap_btalloc(bma
);
3939 if (bma
->blkno
== NULLFSBLOCK
)
3942 if (WARN_ON_ONCE(!xfs_valid_startblock(bma
->ip
, bma
->blkno
))) {
3943 xfs_bmap_mark_sick(bma
->ip
, whichfork
);
3944 return -EFSCORRUPTED
;
3947 if (bma
->flags
& XFS_BMAPI_ZERO
) {
3948 error
= xfs_zero_extent(bma
->ip
, bma
->blkno
, bma
->length
);
3953 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
)
3954 bma
->cur
= xfs_bmbt_init_cursor(mp
, bma
->tp
, bma
->ip
, whichfork
);
3956 * Bump the number of extents we've allocated
3961 if (bma
->cur
&& bma
->wasdel
)
3962 bma
->cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
3964 bma
->got
.br_startoff
= bma
->offset
;
3965 bma
->got
.br_startblock
= bma
->blkno
;
3966 bma
->got
.br_blockcount
= bma
->length
;
3967 bma
->got
.br_state
= XFS_EXT_NORM
;
3969 if (bma
->flags
& XFS_BMAPI_PREALLOC
)
3970 bma
->got
.br_state
= XFS_EXT_UNWRITTEN
;
3973 error
= xfs_bmap_add_extent_delay_real(bma
, whichfork
);
3975 error
= xfs_bmap_add_extent_hole_real(bma
->tp
, bma
->ip
,
3976 whichfork
, &bma
->icur
, &bma
->cur
, &bma
->got
,
3977 &bma
->logflags
, bma
->flags
);
3982 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
3983 * or xfs_bmap_add_extent_hole_real might have merged it into one of
3984 * the neighbouring ones.
3986 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
3988 ASSERT(bma
->got
.br_startoff
<= bma
->offset
);
3989 ASSERT(bma
->got
.br_startoff
+ bma
->got
.br_blockcount
>=
3990 bma
->offset
+ bma
->length
);
3991 ASSERT(bma
->got
.br_state
== XFS_EXT_NORM
||
3992 bma
->got
.br_state
== XFS_EXT_UNWRITTEN
);
3997 xfs_bmapi_convert_unwritten(
3998 struct xfs_bmalloca
*bma
,
3999 struct xfs_bmbt_irec
*mval
,
4003 int whichfork
= xfs_bmapi_whichfork(flags
);
4004 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4005 int tmp_logflags
= 0;
4008 /* check if we need to do unwritten->real conversion */
4009 if (mval
->br_state
== XFS_EXT_UNWRITTEN
&&
4010 (flags
& XFS_BMAPI_PREALLOC
))
4013 /* check if we need to do real->unwritten conversion */
4014 if (mval
->br_state
== XFS_EXT_NORM
&&
4015 (flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
)) !=
4016 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
))
4020 * Modify (by adding) the state flag, if writing.
4022 ASSERT(mval
->br_blockcount
<= len
);
4023 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
) {
4024 bma
->cur
= xfs_bmbt_init_cursor(bma
->ip
->i_mount
, bma
->tp
,
4025 bma
->ip
, whichfork
);
4027 mval
->br_state
= (mval
->br_state
== XFS_EXT_UNWRITTEN
)
4028 ? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
4031 * Before insertion into the bmbt, zero the range being converted
4034 if (flags
& XFS_BMAPI_ZERO
) {
4035 error
= xfs_zero_extent(bma
->ip
, mval
->br_startblock
,
4036 mval
->br_blockcount
);
4041 error
= xfs_bmap_add_extent_unwritten_real(bma
->tp
, bma
->ip
, whichfork
,
4042 &bma
->icur
, &bma
->cur
, mval
, &tmp_logflags
);
4044 * Log the inode core unconditionally in the unwritten extent conversion
4045 * path because the conversion might not have done so (e.g., if the
4046 * extent count hasn't changed). We need to make sure the inode is dirty
4047 * in the transaction for the sake of fsync(), even if nothing has
4048 * changed, because fsync() will not force the log for this transaction
4049 * unless it sees the inode pinned.
4051 * Note: If we're only converting cow fork extents, there aren't
4052 * any on-disk updates to make, so we don't need to log anything.
4054 if (whichfork
!= XFS_COW_FORK
)
4055 bma
->logflags
|= tmp_logflags
| XFS_ILOG_CORE
;
4060 * Update our extent pointer, given that
4061 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4062 * of the neighbouring ones.
4064 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
4067 * We may have combined previously unwritten space with written space,
4068 * so generate another request.
4070 if (mval
->br_blockcount
< len
)
4077 struct xfs_trans
*tp
,
4078 struct xfs_inode
*ip
,
4081 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, fork
);
4083 if (tp
&& tp
->t_highest_agno
!= NULLAGNUMBER
)
4085 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4087 return be16_to_cpu(ifp
->if_broot
->bb_level
) + 1;
4091 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4092 * a case where the data is changed, there's an error, and it's not logged so we
4093 * don't shutdown when we should. Don't bother logging extents/btree changes if
4094 * we converted to the other format.
4098 struct xfs_bmalloca
*bma
,
4102 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4104 if ((bma
->logflags
& xfs_ilog_fext(whichfork
)) &&
4105 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
4106 bma
->logflags
&= ~xfs_ilog_fext(whichfork
);
4107 else if ((bma
->logflags
& xfs_ilog_fbroot(whichfork
)) &&
4108 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4109 bma
->logflags
&= ~xfs_ilog_fbroot(whichfork
);
4112 xfs_trans_log_inode(bma
->tp
, bma
->ip
, bma
->logflags
);
4114 xfs_btree_del_cursor(bma
->cur
, error
);
4118 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4119 * extent state if necessary. Details behaviour is controlled by the flags
4120 * parameter. Only allocates blocks from a single allocation group, to avoid
4123 * Returns 0 on success and places the extent mappings in mval. nmaps is used
4124 * as an input/output parameter where the caller specifies the maximum number
4125 * of mappings that may be returned and xfs_bmapi_write passes back the number
4126 * of mappings (including existing mappings) it found.
4128 * Returns a negative error code on failure, including -ENOSPC when it could not
4129 * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4130 * delalloc range, but those blocks were before the passed in range.
4134 struct xfs_trans
*tp
, /* transaction pointer */
4135 struct xfs_inode
*ip
, /* incore inode */
4136 xfs_fileoff_t bno
, /* starting file offs. mapped */
4137 xfs_filblks_t len
, /* length to map in file */
4138 uint32_t flags
, /* XFS_BMAPI_... */
4139 xfs_extlen_t total
, /* total blocks needed */
4140 struct xfs_bmbt_irec
*mval
, /* output: map values */
4141 int *nmap
) /* i/o: mval size/count */
4143 struct xfs_bmalloca bma
= {
4148 struct xfs_mount
*mp
= ip
->i_mount
;
4149 int whichfork
= xfs_bmapi_whichfork(flags
);
4150 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4151 xfs_fileoff_t end
; /* end of mapped file region */
4152 bool eof
= false; /* after the end of extents */
4153 int error
; /* error return */
4154 int n
; /* current extent index */
4155 xfs_fileoff_t obno
; /* old block number (offset) */
4158 xfs_fileoff_t orig_bno
; /* original block number value */
4159 int orig_flags
; /* original flags arg value */
4160 xfs_filblks_t orig_len
; /* original value of len arg */
4161 struct xfs_bmbt_irec
*orig_mval
; /* original value of mval */
4162 int orig_nmap
; /* original value of *nmap */
4172 ASSERT(*nmap
<= XFS_BMAP_MAX_NMAP
);
4175 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_LOCAL
);
4176 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4177 ASSERT(!(flags
& XFS_BMAPI_REMAP
));
4179 /* zeroing is for currently only for data extents, not metadata */
4180 ASSERT((flags
& (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
)) !=
4181 (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
));
4183 * we can allocate unwritten extents or pre-zero allocated blocks,
4184 * but it makes no sense to do both at once. This would result in
4185 * zeroing the unwritten extent twice, but it still being an
4186 * unwritten extent....
4188 ASSERT((flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
)) !=
4189 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
));
4191 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4192 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4193 xfs_bmap_mark_sick(ip
, whichfork
);
4194 return -EFSCORRUPTED
;
4197 if (xfs_is_shutdown(mp
))
4200 XFS_STATS_INC(mp
, xs_blk_mapw
);
4202 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4206 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &bma
.icur
, &bma
.got
))
4208 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4209 bma
.prev
.br_startoff
= NULLFILEOFF
;
4210 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4215 while (bno
< end
&& n
< *nmap
) {
4216 bool need_alloc
= false, wasdelay
= false;
4218 /* in hole or beyond EOF? */
4219 if (eof
|| bma
.got
.br_startoff
> bno
) {
4221 * CoW fork conversions should /never/ hit EOF or
4222 * holes. There should always be something for us
4225 ASSERT(!((flags
& XFS_BMAPI_CONVERT
) &&
4226 (flags
& XFS_BMAPI_COWFORK
)));
4229 } else if (isnullstartblock(bma
.got
.br_startblock
)) {
4234 * First, deal with the hole before the allocated space
4235 * that we found, if any.
4237 if (need_alloc
|| wasdelay
) {
4239 bma
.conv
= !!(flags
& XFS_BMAPI_CONVERT
);
4240 bma
.wasdel
= wasdelay
;
4245 * There's a 32/64 bit type mismatch between the
4246 * allocation length request (which can be 64 bits in
4247 * length) and the bma length request, which is
4248 * xfs_extlen_t and therefore 32 bits. Hence we have to
4249 * be careful and do the min() using the larger type to
4252 bma
.length
= XFS_FILBLKS_MIN(len
, XFS_MAX_BMBT_EXTLEN
);
4255 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4256 bma
.got
.br_blockcount
-
4257 (bno
- bma
.got
.br_startoff
));
4260 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4261 bma
.got
.br_startoff
- bno
);
4264 ASSERT(bma
.length
> 0);
4265 error
= xfs_bmapi_allocate(&bma
);
4268 * If we already allocated space in a previous
4269 * iteration return what we go so far when
4270 * running out of space.
4272 if (error
== -ENOSPC
&& bma
.nallocs
)
4278 * If this is a CoW allocation, record the data in
4279 * the refcount btree for orphan recovery.
4281 if (whichfork
== XFS_COW_FORK
)
4282 xfs_refcount_alloc_cow_extent(tp
,
4283 XFS_IS_REALTIME_INODE(ip
),
4284 bma
.blkno
, bma
.length
);
4287 /* Deal with the allocated space we found. */
4288 xfs_bmapi_trim_map(mval
, &bma
.got
, &bno
, len
, obno
,
4291 /* Execute unwritten extent conversion if necessary */
4292 error
= xfs_bmapi_convert_unwritten(&bma
, mval
, len
, flags
);
4293 if (error
== -EAGAIN
)
4298 /* update the extent map to return */
4299 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4302 * If we're done, stop now. Stop when we've allocated
4303 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4304 * the transaction may get too big.
4306 if (bno
>= end
|| n
>= *nmap
|| bma
.nallocs
>= *nmap
)
4309 /* Else go on to the next record. */
4311 if (!xfs_iext_next_extent(ifp
, &bma
.icur
, &bma
.got
))
4315 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4320 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_BTREE
||
4321 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
));
4322 xfs_bmapi_finish(&bma
, whichfork
, 0);
4323 xfs_bmap_validate_ret(orig_bno
, orig_len
, orig_flags
, orig_mval
,
4327 * When converting delayed allocations, xfs_bmapi_allocate ignores
4328 * the passed in bno and always converts from the start of the found
4331 * To avoid a successful return with *nmap set to 0, return the magic
4332 * -ENOSR error code for this particular case so that the caller can
4336 ASSERT(bma
.nallocs
>= *nmap
);
4342 xfs_bmapi_finish(&bma
, whichfork
, error
);
4347 * Convert an existing delalloc extent to real blocks based on file offset. This
4348 * attempts to allocate the entire delalloc extent and may require multiple
4349 * invocations to allocate the target offset if a large enough physical extent
4353 xfs_bmapi_convert_one_delalloc(
4354 struct xfs_inode
*ip
,
4357 struct iomap
*iomap
,
4360 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4361 struct xfs_mount
*mp
= ip
->i_mount
;
4362 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
4363 struct xfs_bmalloca bma
= { NULL
};
4365 struct xfs_trans
*tp
;
4368 if (whichfork
== XFS_COW_FORK
)
4369 flags
|= IOMAP_F_SHARED
;
4372 * Space for the extent and indirect blocks was reserved when the
4373 * delalloc extent was created so there's no need to do so here.
4375 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0, 0,
4376 XFS_TRANS_RESERVE
, &tp
);
4380 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
4381 xfs_trans_ijoin(tp
, ip
, 0);
4383 error
= xfs_iext_count_extend(tp
, ip
, whichfork
,
4384 XFS_IEXT_ADD_NOSPLIT_CNT
);
4386 goto out_trans_cancel
;
4388 if (!xfs_iext_lookup_extent(ip
, ifp
, offset_fsb
, &bma
.icur
, &bma
.got
) ||
4389 bma
.got
.br_startoff
> offset_fsb
) {
4391 * No extent found in the range we are trying to convert. This
4392 * should only happen for the COW fork, where another thread
4393 * might have moved the extent to the data fork in the meantime.
4395 WARN_ON_ONCE(whichfork
!= XFS_COW_FORK
);
4397 goto out_trans_cancel
;
4401 * If we find a real extent here we raced with another thread converting
4402 * the extent. Just return the real extent at this offset.
4404 if (!isnullstartblock(bma
.got
.br_startblock
)) {
4405 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4406 xfs_iomap_inode_sequence(ip
, flags
));
4408 *seq
= READ_ONCE(ifp
->if_seq
);
4409 goto out_trans_cancel
;
4415 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4418 * Always allocate convert from the start of the delalloc extent even if
4419 * that is outside the passed in range to create large contiguous
4422 bma
.offset
= bma
.got
.br_startoff
;
4423 bma
.length
= bma
.got
.br_blockcount
;
4426 * When we're converting the delalloc reservations backing dirty pages
4427 * in the page cache, we must be careful about how we create the new
4430 * New CoW fork extents are created unwritten, turned into real extents
4431 * when we're about to write the data to disk, and mapped into the data
4432 * fork after the write finishes. End of story.
4434 * New data fork extents must be mapped in as unwritten and converted
4435 * to real extents after the write succeeds to avoid exposing stale
4436 * disk contents if we crash.
4438 bma
.flags
= XFS_BMAPI_PREALLOC
;
4439 if (whichfork
== XFS_COW_FORK
)
4440 bma
.flags
|= XFS_BMAPI_COWFORK
;
4442 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4443 bma
.prev
.br_startoff
= NULLFILEOFF
;
4445 error
= xfs_bmapi_allocate(&bma
);
4449 XFS_STATS_ADD(mp
, xs_xstrat_bytes
, XFS_FSB_TO_B(mp
, bma
.length
));
4450 XFS_STATS_INC(mp
, xs_xstrat_quick
);
4452 ASSERT(!isnullstartblock(bma
.got
.br_startblock
));
4453 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4454 xfs_iomap_inode_sequence(ip
, flags
));
4456 *seq
= READ_ONCE(ifp
->if_seq
);
4458 if (whichfork
== XFS_COW_FORK
)
4459 xfs_refcount_alloc_cow_extent(tp
, XFS_IS_REALTIME_INODE(ip
),
4460 bma
.blkno
, bma
.length
);
4462 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4467 xfs_bmapi_finish(&bma
, whichfork
, 0);
4468 error
= xfs_trans_commit(tp
);
4469 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4473 xfs_bmapi_finish(&bma
, whichfork
, error
);
4475 xfs_trans_cancel(tp
);
4476 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4481 * Pass in a dellalloc extent and convert it to real extents, return the real
4482 * extent that maps offset_fsb in iomap.
4485 xfs_bmapi_convert_delalloc(
4486 struct xfs_inode
*ip
,
4489 struct iomap
*iomap
,
4495 * Attempt to allocate whatever delalloc extent currently backs offset
4496 * and put the result into iomap. Allocate in a loop because it may
4497 * take several attempts to allocate real blocks for a contiguous
4498 * delalloc extent if free space is sufficiently fragmented.
4501 error
= xfs_bmapi_convert_one_delalloc(ip
, whichfork
, offset
,
4505 } while (iomap
->offset
+ iomap
->length
<= offset
);
4512 struct xfs_trans
*tp
,
4513 struct xfs_inode
*ip
,
4516 xfs_fsblock_t startblock
,
4519 struct xfs_mount
*mp
= ip
->i_mount
;
4520 struct xfs_ifork
*ifp
;
4521 struct xfs_btree_cur
*cur
= NULL
;
4522 struct xfs_bmbt_irec got
;
4523 struct xfs_iext_cursor icur
;
4524 int whichfork
= xfs_bmapi_whichfork(flags
);
4525 int logflags
= 0, error
;
4527 ifp
= xfs_ifork_ptr(ip
, whichfork
);
4529 ASSERT(len
<= (xfs_filblks_t
)XFS_MAX_BMBT_EXTLEN
);
4530 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4531 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
|
4532 XFS_BMAPI_NORMAP
)));
4533 ASSERT((flags
& (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
)) !=
4534 (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
));
4536 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4537 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4538 xfs_bmap_mark_sick(ip
, whichfork
);
4539 return -EFSCORRUPTED
;
4542 if (xfs_is_shutdown(mp
))
4545 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4549 if (xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
)) {
4550 /* make sure we only reflink into a hole. */
4551 ASSERT(got
.br_startoff
> bno
);
4552 ASSERT(got
.br_startoff
- bno
>= len
);
4555 ip
->i_nblocks
+= len
;
4556 ip
->i_delayed_blks
-= len
; /* see xfs_bmap_defer_add */
4557 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
4559 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
4560 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
4562 got
.br_startoff
= bno
;
4563 got
.br_startblock
= startblock
;
4564 got
.br_blockcount
= len
;
4565 if (flags
& XFS_BMAPI_PREALLOC
)
4566 got
.br_state
= XFS_EXT_UNWRITTEN
;
4568 got
.br_state
= XFS_EXT_NORM
;
4570 error
= xfs_bmap_add_extent_hole_real(tp
, ip
, whichfork
, &icur
,
4571 &cur
, &got
, &logflags
, flags
);
4575 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
, whichfork
);
4578 if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_EXTENTS
)
4579 logflags
&= ~XFS_ILOG_DEXT
;
4580 else if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
)
4581 logflags
&= ~XFS_ILOG_DBROOT
;
4584 xfs_trans_log_inode(tp
, ip
, logflags
);
4586 xfs_btree_del_cursor(cur
, error
);
4591 * When a delalloc extent is split (e.g., due to a hole punch), the original
4592 * indlen reservation must be shared across the two new extents that are left
4595 * Given the original reservation and the worst case indlen for the two new
4596 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4597 * reservation fairly across the two new extents. If necessary, steal available
4598 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4599 * ores == 1). The number of stolen blocks is returned. The availability and
4600 * subsequent accounting of stolen blocks is the responsibility of the caller.
4603 xfs_bmap_split_indlen(
4604 xfs_filblks_t ores
, /* original res. */
4605 xfs_filblks_t
*indlen1
, /* ext1 worst indlen */
4606 xfs_filblks_t
*indlen2
) /* ext2 worst indlen */
4608 xfs_filblks_t len1
= *indlen1
;
4609 xfs_filblks_t len2
= *indlen2
;
4610 xfs_filblks_t nres
= len1
+ len2
; /* new total res. */
4611 xfs_filblks_t resfactor
;
4614 * We can't meet the total required reservation for the two extents.
4615 * Calculate the percent of the overall shortage between both extents
4616 * and apply this percentage to each of the requested indlen values.
4617 * This distributes the shortage fairly and reduces the chances that one
4618 * of the two extents is left with nothing when extents are repeatedly
4621 resfactor
= (ores
* 100);
4622 do_div(resfactor
, nres
);
4627 ASSERT(len1
+ len2
<= ores
);
4628 ASSERT(len1
< *indlen1
&& len2
< *indlen2
);
4631 * Hand out the remainder to each extent. If one of the two reservations
4632 * is zero, we want to make sure that one gets a block first. The loop
4633 * below starts with len1, so hand len2 a block right off the bat if it
4636 ores
-= (len1
+ len2
);
4637 ASSERT((*indlen1
- len1
) + (*indlen2
- len2
) >= ores
);
4638 if (ores
&& !len2
&& *indlen2
) {
4643 if (len1
< *indlen1
) {
4649 if (len2
< *indlen2
) {
4660 xfs_bmap_del_extent_delay(
4661 struct xfs_inode
*ip
,
4663 struct xfs_iext_cursor
*icur
,
4664 struct xfs_bmbt_irec
*got
,
4665 struct xfs_bmbt_irec
*del
,
4666 uint32_t bflags
) /* bmapi flags */
4668 struct xfs_mount
*mp
= ip
->i_mount
;
4669 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4670 struct xfs_bmbt_irec
new;
4671 int64_t da_old
, da_new
, da_diff
= 0;
4672 xfs_fileoff_t del_endoff
, got_endoff
;
4673 xfs_filblks_t got_indlen
, new_indlen
, stolen
= 0;
4674 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
4678 XFS_STATS_INC(mp
, xs_del_exlist
);
4680 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
4681 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4682 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4683 da_old
= startblockval(got
->br_startblock
);
4686 ASSERT(del
->br_blockcount
> 0);
4687 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4688 ASSERT(got_endoff
>= del_endoff
);
4691 * Update the inode delalloc counter now and wait to update the
4692 * sb counters as we might have to borrow some blocks for the
4693 * indirect block accounting.
4695 xfs_quota_unreserve_blkres(ip
, del
->br_blockcount
);
4696 ip
->i_delayed_blks
-= del
->br_blockcount
;
4698 if (got
->br_startoff
== del
->br_startoff
)
4699 state
|= BMAP_LEFT_FILLING
;
4700 if (got_endoff
== del_endoff
)
4701 state
|= BMAP_RIGHT_FILLING
;
4703 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4704 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4706 * Matches the whole extent. Delete the entry.
4708 xfs_iext_remove(ip
, icur
, state
);
4709 xfs_iext_prev(ifp
, icur
);
4711 case BMAP_LEFT_FILLING
:
4713 * Deleting the first part of the extent.
4715 got
->br_startoff
= del_endoff
;
4716 got
->br_blockcount
-= del
->br_blockcount
;
4717 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4718 got
->br_blockcount
), da_old
);
4719 got
->br_startblock
= nullstartblock((int)da_new
);
4720 xfs_iext_update_extent(ip
, state
, icur
, got
);
4722 case BMAP_RIGHT_FILLING
:
4724 * Deleting the last part of the extent.
4726 got
->br_blockcount
= got
->br_blockcount
- del
->br_blockcount
;
4727 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4728 got
->br_blockcount
), da_old
);
4729 got
->br_startblock
= nullstartblock((int)da_new
);
4730 xfs_iext_update_extent(ip
, state
, icur
, got
);
4734 * Deleting the middle of the extent.
4736 * Distribute the original indlen reservation across the two new
4737 * extents. Steal blocks from the deleted extent if necessary.
4738 * Stealing blocks simply fudges the fdblocks accounting below.
4739 * Warn if either of the new indlen reservations is zero as this
4740 * can lead to delalloc problems.
4742 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4743 got_indlen
= xfs_bmap_worst_indlen(ip
, got
->br_blockcount
);
4745 new.br_blockcount
= got_endoff
- del_endoff
;
4746 new_indlen
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
4748 WARN_ON_ONCE(!got_indlen
|| !new_indlen
);
4750 * Steal as many blocks as we can to try and satisfy the worst
4751 * case indlen for both new extents.
4753 * However, we can't just steal reservations from the data
4754 * blocks if this is an RT inodes as the data and metadata
4755 * blocks come from different pools. We'll have to live with
4756 * under-filled indirect reservation in this case.
4758 da_new
= got_indlen
+ new_indlen
;
4759 if (da_new
> da_old
&& !isrt
) {
4760 stolen
= XFS_FILBLKS_MIN(da_new
- da_old
,
4761 del
->br_blockcount
);
4764 if (da_new
> da_old
)
4765 xfs_bmap_split_indlen(da_old
, &got_indlen
, &new_indlen
);
4766 da_new
= got_indlen
+ new_indlen
;
4768 got
->br_startblock
= nullstartblock((int)got_indlen
);
4770 new.br_startoff
= del_endoff
;
4771 new.br_state
= got
->br_state
;
4772 new.br_startblock
= nullstartblock((int)new_indlen
);
4774 xfs_iext_update_extent(ip
, state
, icur
, got
);
4775 xfs_iext_next(ifp
, icur
);
4776 xfs_iext_insert(ip
, icur
, &new, state
);
4778 del
->br_blockcount
-= stolen
;
4782 ASSERT(da_old
>= da_new
);
4783 da_diff
= da_old
- da_new
;
4786 if (bflags
& XFS_BMAPI_REMAP
) {
4789 xfs_rtbxlen_t rtxlen
;
4791 rtxlen
= xfs_blen_to_rtbxlen(mp
, del
->br_blockcount
);
4792 if (xfs_is_zoned_inode(ip
))
4793 xfs_zoned_add_available(mp
, rtxlen
);
4794 xfs_add_frextents(mp
, rtxlen
);
4796 fdblocks
+= del
->br_blockcount
;
4799 xfs_add_fdblocks(mp
, fdblocks
);
4800 xfs_mod_delalloc(ip
, -(int64_t)del
->br_blockcount
, -da_diff
);
4804 xfs_bmap_del_extent_cow(
4805 struct xfs_inode
*ip
,
4806 struct xfs_iext_cursor
*icur
,
4807 struct xfs_bmbt_irec
*got
,
4808 struct xfs_bmbt_irec
*del
)
4810 struct xfs_mount
*mp
= ip
->i_mount
;
4811 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, XFS_COW_FORK
);
4812 struct xfs_bmbt_irec
new;
4813 xfs_fileoff_t del_endoff
, got_endoff
;
4814 uint32_t state
= BMAP_COWFORK
;
4816 XFS_STATS_INC(mp
, xs_del_exlist
);
4818 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4819 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4821 ASSERT(del
->br_blockcount
> 0);
4822 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4823 ASSERT(got_endoff
>= del_endoff
);
4824 ASSERT(!isnullstartblock(got
->br_startblock
));
4826 if (got
->br_startoff
== del
->br_startoff
)
4827 state
|= BMAP_LEFT_FILLING
;
4828 if (got_endoff
== del_endoff
)
4829 state
|= BMAP_RIGHT_FILLING
;
4831 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4832 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4834 * Matches the whole extent. Delete the entry.
4836 xfs_iext_remove(ip
, icur
, state
);
4837 xfs_iext_prev(ifp
, icur
);
4839 case BMAP_LEFT_FILLING
:
4841 * Deleting the first part of the extent.
4843 got
->br_startoff
= del_endoff
;
4844 got
->br_blockcount
-= del
->br_blockcount
;
4845 got
->br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
4846 xfs_iext_update_extent(ip
, state
, icur
, got
);
4848 case BMAP_RIGHT_FILLING
:
4850 * Deleting the last part of the extent.
4852 got
->br_blockcount
-= del
->br_blockcount
;
4853 xfs_iext_update_extent(ip
, state
, icur
, got
);
4857 * Deleting the middle of the extent.
4859 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4861 new.br_startoff
= del_endoff
;
4862 new.br_blockcount
= got_endoff
- del_endoff
;
4863 new.br_state
= got
->br_state
;
4864 new.br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
4866 xfs_iext_update_extent(ip
, state
, icur
, got
);
4867 xfs_iext_next(ifp
, icur
);
4868 xfs_iext_insert(ip
, icur
, &new, state
);
4871 ip
->i_delayed_blks
-= del
->br_blockcount
;
4875 xfs_bmap_free_rtblocks(
4876 struct xfs_trans
*tp
,
4877 struct xfs_bmbt_irec
*del
)
4879 struct xfs_rtgroup
*rtg
;
4882 rtg
= xfs_rtgroup_grab(tp
->t_mountp
, 0);
4887 * Ensure the bitmap and summary inodes are locked and joined to the
4888 * transaction before modifying them.
4890 if (!(tp
->t_flags
& XFS_TRANS_RTBITMAP_LOCKED
)) {
4891 tp
->t_flags
|= XFS_TRANS_RTBITMAP_LOCKED
;
4892 xfs_rtgroup_lock(rtg
, XFS_RTGLOCK_BITMAP
);
4893 xfs_rtgroup_trans_join(tp
, rtg
, XFS_RTGLOCK_BITMAP
);
4896 error
= xfs_rtfree_blocks(tp
, rtg
, del
->br_startblock
,
4897 del
->br_blockcount
);
4898 xfs_rtgroup_rele(rtg
);
4903 * Called by xfs_bmapi to update file extent records and the btree
4904 * after removing space.
4906 STATIC
int /* error */
4907 xfs_bmap_del_extent_real(
4908 xfs_inode_t
*ip
, /* incore inode pointer */
4909 xfs_trans_t
*tp
, /* current transaction pointer */
4910 struct xfs_iext_cursor
*icur
,
4911 struct xfs_btree_cur
*cur
, /* if null, not a btree */
4912 xfs_bmbt_irec_t
*del
, /* data to remove from extents */
4913 int *logflagsp
, /* inode logging flags */
4914 int whichfork
, /* data or attr fork */
4915 uint32_t bflags
) /* bmapi flags */
4917 xfs_fsblock_t del_endblock
=0; /* first block past del */
4918 xfs_fileoff_t del_endoff
; /* first offset past del */
4919 int error
= 0; /* error return value */
4920 struct xfs_bmbt_irec got
; /* current extent entry */
4921 xfs_fileoff_t got_endoff
; /* first offset past got */
4922 int i
; /* temp state */
4923 struct xfs_ifork
*ifp
; /* inode fork pointer */
4924 xfs_mount_t
*mp
; /* mount structure */
4925 xfs_filblks_t nblks
; /* quota/sb block count */
4926 xfs_bmbt_irec_t
new; /* new record to be inserted */
4928 uint qfield
; /* quota field to update */
4929 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
4930 struct xfs_bmbt_irec old
;
4935 XFS_STATS_INC(mp
, xs_del_exlist
);
4937 ifp
= xfs_ifork_ptr(ip
, whichfork
);
4938 ASSERT(del
->br_blockcount
> 0);
4939 xfs_iext_get_extent(ifp
, icur
, &got
);
4940 ASSERT(got
.br_startoff
<= del
->br_startoff
);
4941 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4942 got_endoff
= got
.br_startoff
+ got
.br_blockcount
;
4943 ASSERT(got_endoff
>= del_endoff
);
4944 ASSERT(!isnullstartblock(got
.br_startblock
));
4948 * If it's the case where the directory code is running with no block
4949 * reservation, and the deleted block is in the middle of its extent,
4950 * and the resulting insert of an extent would cause transformation to
4951 * btree format, then reject it. The calling code will then swap blocks
4952 * around instead. We have to do this now, rather than waiting for the
4953 * conversion to btree format, since the transaction will be dirty then.
4955 if (tp
->t_blk_res
== 0 &&
4956 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
4957 ifp
->if_nextents
>= XFS_IFORK_MAXEXT(ip
, whichfork
) &&
4958 del
->br_startoff
> got
.br_startoff
&& del_endoff
< got_endoff
)
4961 *logflagsp
= XFS_ILOG_CORE
;
4962 if (xfs_ifork_is_realtime(ip
, whichfork
))
4963 qfield
= XFS_TRANS_DQ_RTBCOUNT
;
4965 qfield
= XFS_TRANS_DQ_BCOUNT
;
4966 nblks
= del
->br_blockcount
;
4968 del_endblock
= del
->br_startblock
+ del
->br_blockcount
;
4970 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
4973 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
4974 xfs_btree_mark_sick(cur
);
4975 return -EFSCORRUPTED
;
4979 if (got
.br_startoff
== del
->br_startoff
)
4980 state
|= BMAP_LEFT_FILLING
;
4981 if (got_endoff
== del_endoff
)
4982 state
|= BMAP_RIGHT_FILLING
;
4984 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4985 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4987 * Matches the whole extent. Delete the entry.
4989 xfs_iext_remove(ip
, icur
, state
);
4990 xfs_iext_prev(ifp
, icur
);
4993 *logflagsp
|= XFS_ILOG_CORE
;
4995 *logflagsp
|= xfs_ilog_fext(whichfork
);
4998 if ((error
= xfs_btree_delete(cur
, &i
)))
5000 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5001 xfs_btree_mark_sick(cur
);
5002 return -EFSCORRUPTED
;
5005 case BMAP_LEFT_FILLING
:
5007 * Deleting the first part of the extent.
5009 got
.br_startoff
= del_endoff
;
5010 got
.br_startblock
= del_endblock
;
5011 got
.br_blockcount
-= del
->br_blockcount
;
5012 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5014 *logflagsp
|= xfs_ilog_fext(whichfork
);
5017 error
= xfs_bmbt_update(cur
, &got
);
5021 case BMAP_RIGHT_FILLING
:
5023 * Deleting the last part of the extent.
5025 got
.br_blockcount
-= del
->br_blockcount
;
5026 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5028 *logflagsp
|= xfs_ilog_fext(whichfork
);
5031 error
= xfs_bmbt_update(cur
, &got
);
5037 * Deleting the middle of the extent.
5042 got
.br_blockcount
= del
->br_startoff
- got
.br_startoff
;
5043 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5045 new.br_startoff
= del_endoff
;
5046 new.br_blockcount
= got_endoff
- del_endoff
;
5047 new.br_state
= got
.br_state
;
5048 new.br_startblock
= del_endblock
;
5050 *logflagsp
|= XFS_ILOG_CORE
;
5052 error
= xfs_bmbt_update(cur
, &got
);
5055 error
= xfs_btree_increment(cur
, 0, &i
);
5058 cur
->bc_rec
.b
= new;
5059 error
= xfs_btree_insert(cur
, &i
);
5060 if (error
&& error
!= -ENOSPC
)
5063 * If get no-space back from btree insert, it tried a
5064 * split, and we have a zero block reservation. Fix up
5065 * our state and return the error.
5067 if (error
== -ENOSPC
) {
5069 * Reset the cursor, don't trust it after any
5072 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5075 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5076 xfs_btree_mark_sick(cur
);
5077 return -EFSCORRUPTED
;
5080 * Update the btree record back
5081 * to the original value.
5083 error
= xfs_bmbt_update(cur
, &old
);
5087 * Reset the extent record back
5088 * to the original value.
5090 xfs_iext_update_extent(ip
, state
, icur
, &old
);
5094 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5095 xfs_btree_mark_sick(cur
);
5096 return -EFSCORRUPTED
;
5099 *logflagsp
|= xfs_ilog_fext(whichfork
);
5102 xfs_iext_next(ifp
, icur
);
5103 xfs_iext_insert(ip
, icur
, &new, state
);
5107 /* remove reverse mapping */
5108 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, del
);
5111 * If we need to, add to list of extents to delete.
5113 if (!(bflags
& XFS_BMAPI_REMAP
)) {
5114 bool isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
5116 if (xfs_is_reflink_inode(ip
) && whichfork
== XFS_DATA_FORK
) {
5117 xfs_refcount_decrease_extent(tp
, isrt
, del
);
5118 } else if (isrt
&& !xfs_has_rtgroups(mp
)) {
5119 error
= xfs_bmap_free_rtblocks(tp
, del
);
5121 unsigned int efi_flags
= 0;
5123 if ((bflags
& XFS_BMAPI_NODISCARD
) ||
5124 del
->br_state
== XFS_EXT_UNWRITTEN
)
5125 efi_flags
|= XFS_FREE_EXTENT_SKIP_DISCARD
;
5128 * Historically, we did not use EFIs to free realtime
5129 * extents. However, when reverse mapping is enabled,
5130 * we must maintain the same order of operations as the
5131 * data device, which is: Remove the file mapping,
5132 * remove the reverse mapping, and then free the
5133 * blocks. Reflink for realtime volumes requires the
5134 * same sort of ordering. Both features rely on
5135 * rtgroups, so let's gate rt EFI usage on rtgroups.
5138 efi_flags
|= XFS_FREE_EXTENT_REALTIME
;
5140 error
= xfs_free_extent_later(tp
, del
->br_startblock
,
5141 del
->br_blockcount
, NULL
,
5142 XFS_AG_RESV_NONE
, efi_flags
);
5149 * Adjust inode # blocks in the file.
5152 ip
->i_nblocks
-= nblks
;
5154 * Adjust quota data.
5156 if (qfield
&& !(bflags
& XFS_BMAPI_REMAP
))
5157 xfs_trans_mod_dquot_byino(tp
, ip
, qfield
, (long)-nblks
);
5163 * Unmap (remove) blocks from a file.
5164 * If nexts is nonzero then the number of extents to remove is limited to
5165 * that value. If not all extents in the block range can be removed then
5170 struct xfs_trans
*tp
, /* transaction pointer */
5171 struct xfs_inode
*ip
, /* incore inode */
5172 xfs_fileoff_t start
, /* first file offset deleted */
5173 xfs_filblks_t
*rlen
, /* i/o: amount remaining */
5174 uint32_t flags
, /* misc flags */
5175 xfs_extnum_t nexts
) /* number of extents max */
5177 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
5178 struct xfs_bmbt_irec del
; /* extent being deleted */
5179 int error
; /* error return value */
5180 xfs_extnum_t extno
; /* extent number in list */
5181 struct xfs_bmbt_irec got
; /* current extent record */
5182 struct xfs_ifork
*ifp
; /* inode fork pointer */
5183 int isrt
; /* freeing in rt area */
5184 int logflags
; /* transaction logging flags */
5185 xfs_extlen_t mod
; /* rt extent offset */
5186 struct xfs_mount
*mp
= ip
->i_mount
;
5187 int tmp_logflags
; /* partial logging flags */
5188 int wasdel
; /* was a delayed alloc extent */
5189 int whichfork
; /* data or attribute fork */
5190 xfs_filblks_t len
= *rlen
; /* length to unmap in file */
5192 struct xfs_iext_cursor icur
;
5195 trace_xfs_bunmap(ip
, start
, len
, flags
, _RET_IP_
);
5197 whichfork
= xfs_bmapi_whichfork(flags
);
5198 ASSERT(whichfork
!= XFS_COW_FORK
);
5199 ifp
= xfs_ifork_ptr(ip
, whichfork
);
5200 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
))) {
5201 xfs_bmap_mark_sick(ip
, whichfork
);
5202 return -EFSCORRUPTED
;
5204 if (xfs_is_shutdown(mp
))
5207 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
5211 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5215 if (xfs_iext_count(ifp
) == 0) {
5219 XFS_STATS_INC(mp
, xs_blk_unmap
);
5220 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
5223 if (!xfs_iext_lookup_extent_before(ip
, ifp
, &end
, &icur
, &got
)) {
5230 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
5231 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
5232 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5237 while (end
!= (xfs_fileoff_t
)-1 && end
>= start
&&
5238 (nexts
== 0 || extno
< nexts
)) {
5240 * Is the found extent after a hole in which end lives?
5241 * Just back up to the previous extent, if so.
5243 if (got
.br_startoff
> end
&&
5244 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5249 * Is the last block of this extent before the range
5250 * we're supposed to delete? If so, we're done.
5252 end
= XFS_FILEOFF_MIN(end
,
5253 got
.br_startoff
+ got
.br_blockcount
- 1);
5257 * Then deal with the (possibly delayed) allocated space
5261 wasdel
= isnullstartblock(del
.br_startblock
);
5263 if (got
.br_startoff
< start
) {
5264 del
.br_startoff
= start
;
5265 del
.br_blockcount
-= start
- got
.br_startoff
;
5267 del
.br_startblock
+= start
- got
.br_startoff
;
5269 if (del
.br_startoff
+ del
.br_blockcount
> end
+ 1)
5270 del
.br_blockcount
= end
+ 1 - del
.br_startoff
;
5272 if (!isrt
|| (flags
& XFS_BMAPI_REMAP
))
5275 mod
= xfs_rtb_to_rtxoff(mp
,
5276 del
.br_startblock
+ del
.br_blockcount
);
5279 * Realtime extent not lined up at the end.
5280 * The extent could have been split into written
5281 * and unwritten pieces, or we could just be
5282 * unmapping part of it. But we can't really
5283 * get rid of part of a realtime extent.
5285 if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5287 * This piece is unwritten, or we're not
5288 * using unwritten extents. Skip over it.
5290 ASSERT((flags
& XFS_BMAPI_REMAP
) || end
>= mod
);
5291 end
-= mod
> del
.br_blockcount
?
5292 del
.br_blockcount
: mod
;
5293 if (end
< got
.br_startoff
&&
5294 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5301 * It's written, turn it unwritten.
5302 * This is better than zeroing it.
5304 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5305 ASSERT(tp
->t_blk_res
> 0);
5307 * If this spans a realtime extent boundary,
5308 * chop it back to the start of the one we end at.
5310 if (del
.br_blockcount
> mod
) {
5311 del
.br_startoff
+= del
.br_blockcount
- mod
;
5312 del
.br_startblock
+= del
.br_blockcount
- mod
;
5313 del
.br_blockcount
= mod
;
5315 del
.br_state
= XFS_EXT_UNWRITTEN
;
5316 error
= xfs_bmap_add_extent_unwritten_real(tp
, ip
,
5317 whichfork
, &icur
, &cur
, &del
,
5324 mod
= xfs_rtb_to_rtxoff(mp
, del
.br_startblock
);
5326 xfs_extlen_t off
= mp
->m_sb
.sb_rextsize
- mod
;
5329 * Realtime extent is lined up at the end but not
5330 * at the front. We'll get rid of full extents if
5333 if (del
.br_blockcount
> off
) {
5334 del
.br_blockcount
-= off
;
5335 del
.br_startoff
+= off
;
5336 del
.br_startblock
+= off
;
5337 } else if (del
.br_startoff
== start
&&
5338 (del
.br_state
== XFS_EXT_UNWRITTEN
||
5339 tp
->t_blk_res
== 0)) {
5341 * Can't make it unwritten. There isn't
5342 * a full extent here so just skip it.
5344 ASSERT(end
>= del
.br_blockcount
);
5345 end
-= del
.br_blockcount
;
5346 if (got
.br_startoff
> end
&&
5347 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5352 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5353 struct xfs_bmbt_irec prev
;
5354 xfs_fileoff_t unwrite_start
;
5357 * This one is already unwritten.
5358 * It must have a written left neighbor.
5359 * Unwrite the killed part of that one and
5362 if (!xfs_iext_prev_extent(ifp
, &icur
, &prev
))
5364 ASSERT(prev
.br_state
== XFS_EXT_NORM
);
5365 ASSERT(!isnullstartblock(prev
.br_startblock
));
5366 ASSERT(del
.br_startblock
==
5367 prev
.br_startblock
+ prev
.br_blockcount
);
5368 unwrite_start
= max3(start
,
5369 del
.br_startoff
- mod
,
5371 mod
= unwrite_start
- prev
.br_startoff
;
5372 prev
.br_startoff
= unwrite_start
;
5373 prev
.br_startblock
+= mod
;
5374 prev
.br_blockcount
-= mod
;
5375 prev
.br_state
= XFS_EXT_UNWRITTEN
;
5376 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5377 ip
, whichfork
, &icur
, &cur
,
5383 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5384 del
.br_state
= XFS_EXT_UNWRITTEN
;
5385 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5386 ip
, whichfork
, &icur
, &cur
,
5396 xfs_bmap_del_extent_delay(ip
, whichfork
, &icur
, &got
,
5399 error
= xfs_bmap_del_extent_real(ip
, tp
, &icur
, cur
,
5400 &del
, &tmp_logflags
, whichfork
,
5402 logflags
|= tmp_logflags
;
5407 end
= del
.br_startoff
- 1;
5410 * If not done go on to the next (previous) record.
5412 if (end
!= (xfs_fileoff_t
)-1 && end
>= start
) {
5413 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
5414 (got
.br_startoff
> end
&&
5415 !xfs_iext_prev_extent(ifp
, &icur
, &got
))) {
5422 if (done
|| end
== (xfs_fileoff_t
)-1 || end
< start
)
5425 *rlen
= end
- start
+ 1;
5428 * Convert to a btree if necessary.
5430 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5431 ASSERT(cur
== NULL
);
5432 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
5433 &tmp_logflags
, whichfork
);
5434 logflags
|= tmp_logflags
;
5436 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
,
5442 * Log everything. Do this after conversion, there's no point in
5443 * logging the extent records if we've converted to btree format.
5445 if ((logflags
& xfs_ilog_fext(whichfork
)) &&
5446 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
5447 logflags
&= ~xfs_ilog_fext(whichfork
);
5448 else if ((logflags
& xfs_ilog_fbroot(whichfork
)) &&
5449 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
5450 logflags
&= ~xfs_ilog_fbroot(whichfork
);
5452 * Log inode even in the error case, if the transaction
5453 * is dirty we'll need to shut down the filesystem.
5456 xfs_trans_log_inode(tp
, ip
, logflags
);
5459 cur
->bc_bmap
.allocated
= 0;
5460 xfs_btree_del_cursor(cur
, error
);
5465 /* Unmap a range of a file. */
5469 struct xfs_inode
*ip
,
5478 error
= __xfs_bunmapi(tp
, ip
, bno
, &len
, flags
, nexts
);
5484 * Determine whether an extent shift can be accomplished by a merge with the
5485 * extent that precedes the target hole of the shift.
5489 struct xfs_inode
*ip
,
5491 struct xfs_bmbt_irec
*left
, /* preceding extent */
5492 struct xfs_bmbt_irec
*got
, /* current extent to shift */
5493 xfs_fileoff_t shift
) /* shift fsb */
5495 xfs_fileoff_t startoff
;
5497 startoff
= got
->br_startoff
- shift
;
5500 * The extent, once shifted, must be adjacent in-file and on-disk with
5501 * the preceding extent.
5503 if ((left
->br_startoff
+ left
->br_blockcount
!= startoff
) ||
5504 (left
->br_startblock
+ left
->br_blockcount
!= got
->br_startblock
) ||
5505 (left
->br_state
!= got
->br_state
) ||
5506 (left
->br_blockcount
+ got
->br_blockcount
> XFS_MAX_BMBT_EXTLEN
) ||
5507 !xfs_bmap_same_rtgroup(ip
, whichfork
, left
, got
))
5514 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5515 * hole in the file. If an extent shift would result in the extent being fully
5516 * adjacent to the extent that currently precedes the hole, we can merge with
5517 * the preceding extent rather than do the shift.
5519 * This function assumes the caller has verified a shift-by-merge is possible
5520 * with the provided extents via xfs_bmse_can_merge().
5524 struct xfs_trans
*tp
,
5525 struct xfs_inode
*ip
,
5527 xfs_fileoff_t shift
, /* shift fsb */
5528 struct xfs_iext_cursor
*icur
,
5529 struct xfs_bmbt_irec
*got
, /* extent to shift */
5530 struct xfs_bmbt_irec
*left
, /* preceding extent */
5531 struct xfs_btree_cur
*cur
,
5532 int *logflags
) /* output */
5534 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5535 struct xfs_bmbt_irec
new;
5536 xfs_filblks_t blockcount
;
5538 struct xfs_mount
*mp
= ip
->i_mount
;
5540 blockcount
= left
->br_blockcount
+ got
->br_blockcount
;
5542 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5543 ASSERT(xfs_bmse_can_merge(ip
, whichfork
, left
, got
, shift
));
5546 new.br_blockcount
= blockcount
;
5549 * Update the on-disk extent count, the btree if necessary and log the
5553 *logflags
|= XFS_ILOG_CORE
;
5555 *logflags
|= XFS_ILOG_DEXT
;
5559 /* lookup and remove the extent to merge */
5560 error
= xfs_bmbt_lookup_eq(cur
, got
, &i
);
5563 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5564 xfs_btree_mark_sick(cur
);
5565 return -EFSCORRUPTED
;
5568 error
= xfs_btree_delete(cur
, &i
);
5571 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5572 xfs_btree_mark_sick(cur
);
5573 return -EFSCORRUPTED
;
5576 /* lookup and update size of the previous extent */
5577 error
= xfs_bmbt_lookup_eq(cur
, left
, &i
);
5580 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5581 xfs_btree_mark_sick(cur
);
5582 return -EFSCORRUPTED
;
5585 error
= xfs_bmbt_update(cur
, &new);
5589 /* change to extent format if required after extent removal */
5590 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, logflags
, whichfork
);
5595 xfs_iext_remove(ip
, icur
, 0);
5596 xfs_iext_prev(ifp
, icur
);
5597 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5600 /* update reverse mapping. rmap functions merge the rmaps for us */
5601 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, got
);
5602 memcpy(&new, got
, sizeof(new));
5603 new.br_startoff
= left
->br_startoff
+ left
->br_blockcount
;
5604 xfs_rmap_map_extent(tp
, ip
, whichfork
, &new);
5609 xfs_bmap_shift_update_extent(
5610 struct xfs_trans
*tp
,
5611 struct xfs_inode
*ip
,
5613 struct xfs_iext_cursor
*icur
,
5614 struct xfs_bmbt_irec
*got
,
5615 struct xfs_btree_cur
*cur
,
5617 xfs_fileoff_t startoff
)
5619 struct xfs_mount
*mp
= ip
->i_mount
;
5620 struct xfs_bmbt_irec prev
= *got
;
5623 *logflags
|= XFS_ILOG_CORE
;
5625 got
->br_startoff
= startoff
;
5628 error
= xfs_bmbt_lookup_eq(cur
, &prev
, &i
);
5631 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5632 xfs_btree_mark_sick(cur
);
5633 return -EFSCORRUPTED
;
5636 error
= xfs_bmbt_update(cur
, got
);
5640 *logflags
|= XFS_ILOG_DEXT
;
5643 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5646 /* update reverse mapping */
5647 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, &prev
);
5648 xfs_rmap_map_extent(tp
, ip
, whichfork
, got
);
5653 xfs_bmap_collapse_extents(
5654 struct xfs_trans
*tp
,
5655 struct xfs_inode
*ip
,
5656 xfs_fileoff_t
*next_fsb
,
5657 xfs_fileoff_t offset_shift_fsb
,
5660 int whichfork
= XFS_DATA_FORK
;
5661 struct xfs_mount
*mp
= ip
->i_mount
;
5662 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5663 struct xfs_btree_cur
*cur
= NULL
;
5664 struct xfs_bmbt_irec got
, prev
;
5665 struct xfs_iext_cursor icur
;
5666 xfs_fileoff_t new_startoff
;
5670 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5671 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5672 xfs_bmap_mark_sick(ip
, whichfork
);
5673 return -EFSCORRUPTED
;
5676 if (xfs_is_shutdown(mp
))
5679 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5681 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5685 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
5686 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5688 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
5692 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
5693 xfs_bmap_mark_sick(ip
, whichfork
);
5694 error
= -EFSCORRUPTED
;
5698 new_startoff
= got
.br_startoff
- offset_shift_fsb
;
5699 if (xfs_iext_peek_prev_extent(ifp
, &icur
, &prev
)) {
5700 if (new_startoff
< prev
.br_startoff
+ prev
.br_blockcount
) {
5705 if (xfs_bmse_can_merge(ip
, whichfork
, &prev
, &got
,
5706 offset_shift_fsb
)) {
5707 error
= xfs_bmse_merge(tp
, ip
, whichfork
,
5708 offset_shift_fsb
, &icur
, &got
, &prev
,
5715 if (got
.br_startoff
< offset_shift_fsb
) {
5721 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
5722 cur
, &logflags
, new_startoff
);
5727 if (!xfs_iext_next_extent(ifp
, &icur
, &got
)) {
5732 *next_fsb
= got
.br_startoff
;
5735 xfs_btree_del_cursor(cur
, error
);
5737 xfs_trans_log_inode(tp
, ip
, logflags
);
5741 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5743 xfs_bmap_can_insert_extents(
5744 struct xfs_inode
*ip
,
5746 xfs_fileoff_t shift
)
5748 struct xfs_bmbt_irec got
;
5752 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
);
5754 if (xfs_is_shutdown(ip
->i_mount
))
5757 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
5758 error
= xfs_bmap_last_extent(NULL
, ip
, XFS_DATA_FORK
, &got
, &is_empty
);
5759 if (!error
&& !is_empty
&& got
.br_startoff
>= off
&&
5760 ((got
.br_startoff
+ shift
) & BMBT_STARTOFF_MASK
) < got
.br_startoff
)
5762 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
5768 xfs_bmap_insert_extents(
5769 struct xfs_trans
*tp
,
5770 struct xfs_inode
*ip
,
5771 xfs_fileoff_t
*next_fsb
,
5772 xfs_fileoff_t offset_shift_fsb
,
5774 xfs_fileoff_t stop_fsb
)
5776 int whichfork
= XFS_DATA_FORK
;
5777 struct xfs_mount
*mp
= ip
->i_mount
;
5778 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5779 struct xfs_btree_cur
*cur
= NULL
;
5780 struct xfs_bmbt_irec got
, next
;
5781 struct xfs_iext_cursor icur
;
5782 xfs_fileoff_t new_startoff
;
5786 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5787 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5788 xfs_bmap_mark_sick(ip
, whichfork
);
5789 return -EFSCORRUPTED
;
5792 if (xfs_is_shutdown(mp
))
5795 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5797 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5801 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
5802 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5804 if (*next_fsb
== NULLFSBLOCK
) {
5805 xfs_iext_last(ifp
, &icur
);
5806 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
5807 stop_fsb
> got
.br_startoff
) {
5812 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
5817 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
5818 xfs_bmap_mark_sick(ip
, whichfork
);
5819 error
= -EFSCORRUPTED
;
5823 if (XFS_IS_CORRUPT(mp
, stop_fsb
> got
.br_startoff
)) {
5824 xfs_bmap_mark_sick(ip
, whichfork
);
5825 error
= -EFSCORRUPTED
;
5829 new_startoff
= got
.br_startoff
+ offset_shift_fsb
;
5830 if (xfs_iext_peek_next_extent(ifp
, &icur
, &next
)) {
5831 if (new_startoff
+ got
.br_blockcount
> next
.br_startoff
) {
5837 * Unlike a left shift (which involves a hole punch), a right
5838 * shift does not modify extent neighbors in any way. We should
5839 * never find mergeable extents in this scenario. Check anyways
5840 * and warn if we encounter two extents that could be one.
5842 if (xfs_bmse_can_merge(ip
, whichfork
, &got
, &next
,
5847 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
5848 cur
, &logflags
, new_startoff
);
5852 if (!xfs_iext_prev_extent(ifp
, &icur
, &got
) ||
5853 stop_fsb
>= got
.br_startoff
+ got
.br_blockcount
) {
5858 *next_fsb
= got
.br_startoff
;
5861 xfs_btree_del_cursor(cur
, error
);
5863 xfs_trans_log_inode(tp
, ip
, logflags
);
5868 * Splits an extent into two extents at split_fsb block such that it is the
5869 * first block of the current_ext. @ext is a target extent to be split.
5870 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5871 * hole or the first block of extents, just return 0.
5874 xfs_bmap_split_extent(
5875 struct xfs_trans
*tp
,
5876 struct xfs_inode
*ip
,
5877 xfs_fileoff_t split_fsb
)
5879 int whichfork
= XFS_DATA_FORK
;
5880 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5881 struct xfs_btree_cur
*cur
= NULL
;
5882 struct xfs_bmbt_irec got
;
5883 struct xfs_bmbt_irec
new; /* split extent */
5884 struct xfs_mount
*mp
= ip
->i_mount
;
5885 xfs_fsblock_t gotblkcnt
; /* new block count for got */
5886 struct xfs_iext_cursor icur
;
5891 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5892 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5893 xfs_bmap_mark_sick(ip
, whichfork
);
5894 return -EFSCORRUPTED
;
5897 if (xfs_is_shutdown(mp
))
5900 /* Read in all the extents */
5901 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5906 * If there are not extents, or split_fsb lies in a hole we are done.
5908 if (!xfs_iext_lookup_extent(ip
, ifp
, split_fsb
, &icur
, &got
) ||
5909 got
.br_startoff
>= split_fsb
)
5912 gotblkcnt
= split_fsb
- got
.br_startoff
;
5913 new.br_startoff
= split_fsb
;
5914 new.br_startblock
= got
.br_startblock
+ gotblkcnt
;
5915 new.br_blockcount
= got
.br_blockcount
- gotblkcnt
;
5916 new.br_state
= got
.br_state
;
5918 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
5919 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5920 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5923 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5924 xfs_btree_mark_sick(cur
);
5925 error
= -EFSCORRUPTED
;
5930 got
.br_blockcount
= gotblkcnt
;
5931 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), &icur
,
5934 logflags
= XFS_ILOG_CORE
;
5936 error
= xfs_bmbt_update(cur
, &got
);
5940 logflags
|= XFS_ILOG_DEXT
;
5942 /* Add new extent */
5943 xfs_iext_next(ifp
, &icur
);
5944 xfs_iext_insert(ip
, &icur
, &new, 0);
5948 error
= xfs_bmbt_lookup_eq(cur
, &new, &i
);
5951 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
5952 xfs_btree_mark_sick(cur
);
5953 error
= -EFSCORRUPTED
;
5956 error
= xfs_btree_insert(cur
, &i
);
5959 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5960 xfs_btree_mark_sick(cur
);
5961 error
= -EFSCORRUPTED
;
5967 * Convert to a btree if necessary.
5969 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5970 int tmp_logflags
; /* partial log flag return val */
5972 ASSERT(cur
== NULL
);
5973 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
5974 &tmp_logflags
, whichfork
);
5975 logflags
|= tmp_logflags
;
5980 cur
->bc_bmap
.allocated
= 0;
5981 xfs_btree_del_cursor(cur
, error
);
5985 xfs_trans_log_inode(tp
, ip
, logflags
);
5989 /* Record a bmap intent. */
5992 struct xfs_trans
*tp
,
5993 enum xfs_bmap_intent_type type
,
5994 struct xfs_inode
*ip
,
5996 struct xfs_bmbt_irec
*bmap
)
5998 struct xfs_bmap_intent
*bi
;
6000 if ((whichfork
!= XFS_DATA_FORK
&& whichfork
!= XFS_ATTR_FORK
) ||
6001 bmap
->br_startblock
== HOLESTARTBLOCK
||
6002 bmap
->br_startblock
== DELAYSTARTBLOCK
)
6005 bi
= kmem_cache_alloc(xfs_bmap_intent_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
6006 INIT_LIST_HEAD(&bi
->bi_list
);
6009 bi
->bi_whichfork
= whichfork
;
6010 bi
->bi_bmap
= *bmap
;
6012 xfs_bmap_defer_add(tp
, bi
);
6015 /* Map an extent into a file. */
6017 xfs_bmap_map_extent(
6018 struct xfs_trans
*tp
,
6019 struct xfs_inode
*ip
,
6021 struct xfs_bmbt_irec
*PREV
)
6023 __xfs_bmap_add(tp
, XFS_BMAP_MAP
, ip
, whichfork
, PREV
);
6026 /* Unmap an extent out of a file. */
6028 xfs_bmap_unmap_extent(
6029 struct xfs_trans
*tp
,
6030 struct xfs_inode
*ip
,
6032 struct xfs_bmbt_irec
*PREV
)
6034 __xfs_bmap_add(tp
, XFS_BMAP_UNMAP
, ip
, whichfork
, PREV
);
6038 * Process one of the deferred bmap operations. We pass back the
6039 * btree cursor to maintain our lock on the bmapbt between calls.
6042 xfs_bmap_finish_one(
6043 struct xfs_trans
*tp
,
6044 struct xfs_bmap_intent
*bi
)
6046 struct xfs_bmbt_irec
*bmap
= &bi
->bi_bmap
;
6050 if (bi
->bi_whichfork
== XFS_ATTR_FORK
)
6051 flags
|= XFS_BMAPI_ATTRFORK
;
6053 ASSERT(tp
->t_highest_agno
== NULLAGNUMBER
);
6055 trace_xfs_bmap_deferred(bi
);
6057 if (XFS_TEST_ERROR(false, tp
->t_mountp
, XFS_ERRTAG_BMAP_FINISH_ONE
))
6060 switch (bi
->bi_type
) {
6062 if (bi
->bi_bmap
.br_state
== XFS_EXT_UNWRITTEN
)
6063 flags
|= XFS_BMAPI_PREALLOC
;
6064 error
= xfs_bmapi_remap(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6065 bmap
->br_blockcount
, bmap
->br_startblock
,
6067 bmap
->br_blockcount
= 0;
6069 case XFS_BMAP_UNMAP
:
6070 error
= __xfs_bunmapi(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6071 &bmap
->br_blockcount
, flags
| XFS_BMAPI_REMAP
,
6076 xfs_bmap_mark_sick(bi
->bi_owner
, bi
->bi_whichfork
);
6077 error
= -EFSCORRUPTED
;
6083 /* Check that an extent does not have invalid flags or bad ranges. */
6085 xfs_bmap_validate_extent_raw(
6086 struct xfs_mount
*mp
,
6089 struct xfs_bmbt_irec
*irec
)
6091 if (!xfs_verify_fileext(mp
, irec
->br_startoff
, irec
->br_blockcount
))
6092 return __this_address
;
6094 if (rtfile
&& whichfork
== XFS_DATA_FORK
) {
6095 if (!xfs_verify_rtbext(mp
, irec
->br_startblock
,
6096 irec
->br_blockcount
))
6097 return __this_address
;
6099 if (!xfs_verify_fsbext(mp
, irec
->br_startblock
,
6100 irec
->br_blockcount
))
6101 return __this_address
;
6103 if (irec
->br_state
!= XFS_EXT_NORM
&& whichfork
!= XFS_DATA_FORK
)
6104 return __this_address
;
6109 xfs_bmap_intent_init_cache(void)
6111 xfs_bmap_intent_cache
= kmem_cache_create("xfs_bmap_intent",
6112 sizeof(struct xfs_bmap_intent
),
6115 return xfs_bmap_intent_cache
!= NULL
? 0 : -ENOMEM
;
6119 xfs_bmap_intent_destroy_cache(void)
6121 kmem_cache_destroy(xfs_bmap_intent_cache
);
6122 xfs_bmap_intent_cache
= NULL
;
6125 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6127 xfs_bmap_validate_extent(
6128 struct xfs_inode
*ip
,
6130 struct xfs_bmbt_irec
*irec
)
6132 return xfs_bmap_validate_extent_raw(ip
->i_mount
,
6133 XFS_IS_REALTIME_INODE(ip
), whichfork
, irec
);
6137 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6138 * freed from a file in a single transaction.
6140 #define XFS_ITRUNC_MAX_EXTENTS 2
6143 * Unmap every extent in part of an inode's fork. We don't do any higher level
6144 * invalidation work at all.
6148 struct xfs_trans
**tpp
,
6149 struct xfs_inode
*ip
,
6151 xfs_fileoff_t startoff
,
6152 xfs_fileoff_t endoff
)
6154 xfs_filblks_t unmap_len
= endoff
- startoff
+ 1;
6157 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
6159 while (unmap_len
> 0) {
6160 ASSERT((*tpp
)->t_highest_agno
== NULLAGNUMBER
);
6161 error
= __xfs_bunmapi(*tpp
, ip
, startoff
, &unmap_len
, flags
,
6162 XFS_ITRUNC_MAX_EXTENTS
);
6166 /* free the just unmapped extents */
6167 error
= xfs_defer_finish(tpp
);
6176 struct xfs_bmap_query_range
{
6177 xfs_bmap_query_range_fn fn
;
6181 /* Format btree record and pass to our callback. */
6183 xfs_bmap_query_range_helper(
6184 struct xfs_btree_cur
*cur
,
6185 const union xfs_btree_rec
*rec
,
6188 struct xfs_bmap_query_range
*query
= priv
;
6189 struct xfs_bmbt_irec irec
;
6192 xfs_bmbt_disk_get_all(&rec
->bmbt
, &irec
);
6193 fa
= xfs_bmap_validate_extent(cur
->bc_ino
.ip
, cur
->bc_ino
.whichfork
,
6196 xfs_btree_mark_sick(cur
);
6197 return xfs_bmap_complain_bad_rec(cur
->bc_ino
.ip
,
6198 cur
->bc_ino
.whichfork
, fa
, &irec
);
6201 return query
->fn(cur
, &irec
, query
->priv
);
6204 /* Find all bmaps. */
6207 struct xfs_btree_cur
*cur
,
6208 xfs_bmap_query_range_fn fn
,
6211 struct xfs_bmap_query_range query
= {
6216 return xfs_btree_query_all(cur
, xfs_bmap_query_range_helper
, &query
);
6219 /* Helper function to extract extent size hint from inode */
6222 struct xfs_inode
*ip
)
6225 * No point in aligning allocations if we need to COW to actually
6228 if (!xfs_is_always_cow_inode(ip
) &&
6229 (ip
->i_diflags
& XFS_DIFLAG_EXTSIZE
) && ip
->i_extsize
)
6230 return ip
->i_extsize
;
6231 if (XFS_IS_REALTIME_INODE(ip
) &&
6232 ip
->i_mount
->m_sb
.sb_rextsize
> 1)
6233 return ip
->i_mount
->m_sb
.sb_rextsize
;
6238 * Helper function to extract CoW extent size hint from inode.
6239 * Between the extent size hint and the CoW extent size hint, we
6240 * return the greater of the two. If the value is zero (automatic),
6241 * use the default size.
6244 xfs_get_cowextsz_hint(
6245 struct xfs_inode
*ip
)
6250 if (ip
->i_diflags2
& XFS_DIFLAG2_COWEXTSIZE
)
6251 a
= ip
->i_cowextsize
;
6252 if (XFS_IS_REALTIME_INODE(ip
)) {
6254 if (ip
->i_diflags
& XFS_DIFLAG_EXTSIZE
)
6257 b
= xfs_get_extsz_hint(ip
);
6262 return XFS_DEFAULT_COWEXTSZ_HINT
;