1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include "libxfs_priv.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_errortag.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_trace.h"
26 #include "xfs_attr_leaf.h"
27 #include "xfs_quota_defs.h"
30 #include "xfs_ag_resv.h"
31 #include "xfs_refcount.h"
32 #include "xfs_rtbitmap.h"
33 #include "xfs_health.h"
34 #include "defer_item.h"
35 #include "xfs_symlink_remote.h"
37 struct kmem_cache
*xfs_bmap_intent_cache
;
40 * Miscellaneous helper functions
44 * Compute and fill in the value of the maximum depth of a bmap btree
45 * in this filesystem. Done once, during mount.
48 xfs_bmap_compute_maxlevels(
49 xfs_mount_t
*mp
, /* file system mount structure */
50 int whichfork
) /* data or attr fork */
52 uint64_t maxblocks
; /* max blocks at this level */
53 xfs_extnum_t maxleafents
; /* max leaf entries possible */
54 int level
; /* btree level */
55 int maxrootrecs
; /* max records in root block */
56 int minleafrecs
; /* min records in leaf block */
57 int minnoderecs
; /* min records in node block */
58 int sz
; /* root block size */
61 * The maximum number of extents in a fork, hence the maximum number of
62 * leaf entries, is controlled by the size of the on-disk extent count.
64 * Note that we can no longer assume that if we are in ATTR1 that the
65 * fork offset of all the inodes will be
66 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
67 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
68 * but probably at various positions. Therefore, for both ATTR1 and
69 * ATTR2 we have to assume the worst case scenario of a minimum size
72 maxleafents
= xfs_iext_max_nextents(xfs_has_large_extent_counts(mp
),
74 if (whichfork
== XFS_DATA_FORK
)
75 sz
= XFS_BMDR_SPACE_CALC(MINDBTPTRS
);
77 sz
= XFS_BMDR_SPACE_CALC(MINABTPTRS
);
79 maxrootrecs
= xfs_bmdr_maxrecs(sz
, 0);
80 minleafrecs
= mp
->m_bmap_dmnr
[0];
81 minnoderecs
= mp
->m_bmap_dmnr
[1];
82 maxblocks
= howmany_64(maxleafents
, minleafrecs
);
83 for (level
= 1; maxblocks
> 1; level
++) {
84 if (maxblocks
<= maxrootrecs
)
87 maxblocks
= howmany_64(maxblocks
, minnoderecs
);
89 mp
->m_bm_maxlevels
[whichfork
] = level
;
90 ASSERT(mp
->m_bm_maxlevels
[whichfork
] <= xfs_bmbt_maxlevels_ondisk());
94 xfs_bmap_compute_attr_offset(
97 if (mp
->m_sb
.sb_inodesize
== 256)
98 return XFS_LITINO(mp
) - XFS_BMDR_SPACE_CALC(MINABTPTRS
);
99 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS
);
102 STATIC
int /* error */
104 struct xfs_btree_cur
*cur
,
105 struct xfs_bmbt_irec
*irec
,
106 int *stat
) /* success/failure */
108 cur
->bc_rec
.b
= *irec
;
109 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
112 STATIC
int /* error */
113 xfs_bmbt_lookup_first(
114 struct xfs_btree_cur
*cur
,
115 int *stat
) /* success/failure */
117 cur
->bc_rec
.b
.br_startoff
= 0;
118 cur
->bc_rec
.b
.br_startblock
= 0;
119 cur
->bc_rec
.b
.br_blockcount
= 0;
120 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
124 * Check if the inode needs to be converted to btree format.
126 static inline bool xfs_bmap_needs_btree(struct xfs_inode
*ip
, int whichfork
)
128 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
130 return whichfork
!= XFS_COW_FORK
&&
131 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
132 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
);
136 * Check if the inode should be converted to extent format.
138 static inline bool xfs_bmap_wants_extents(struct xfs_inode
*ip
, int whichfork
)
140 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
142 return whichfork
!= XFS_COW_FORK
&&
143 ifp
->if_format
== XFS_DINODE_FMT_BTREE
&&
144 ifp
->if_nextents
<= XFS_IFORK_MAXEXT(ip
, whichfork
);
148 * Update the record referred to by cur to the value given by irec
149 * This either works (return 0) or gets an EFSCORRUPTED error.
153 struct xfs_btree_cur
*cur
,
154 struct xfs_bmbt_irec
*irec
)
156 union xfs_btree_rec rec
;
158 xfs_bmbt_disk_set_all(&rec
.bmbt
, irec
);
159 return xfs_btree_update(cur
, &rec
);
163 * Compute the worst-case number of indirect blocks that will be used
164 * for ip's delayed extent of length "len".
167 xfs_bmap_worst_indlen(
168 xfs_inode_t
*ip
, /* incore inode pointer */
169 xfs_filblks_t len
) /* delayed extent length */
171 int level
; /* btree level number */
172 int maxrecs
; /* maximum record count at this level */
173 xfs_mount_t
*mp
; /* mount structure */
174 xfs_filblks_t rval
; /* return value */
177 maxrecs
= mp
->m_bmap_dmxr
[0];
178 for (level
= 0, rval
= 0;
179 level
< XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
);
182 do_div(len
, maxrecs
);
185 return rval
+ XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
) -
188 maxrecs
= mp
->m_bmap_dmxr
[1];
194 * Calculate the default attribute fork offset for newly created inodes.
197 xfs_default_attroffset(
198 struct xfs_inode
*ip
)
200 if (ip
->i_df
.if_format
== XFS_DINODE_FMT_DEV
)
201 return roundup(sizeof(xfs_dev_t
), 8);
202 return M_IGEO(ip
->i_mount
)->attr_fork_offset
;
206 * Helper routine to reset inode i_forkoff field when switching attribute fork
207 * from local to extent format - we reset it where possible to make space
208 * available for inline data fork extents.
211 xfs_bmap_forkoff_reset(
215 if (whichfork
== XFS_ATTR_FORK
&&
216 ip
->i_df
.if_format
!= XFS_DINODE_FMT_DEV
&&
217 ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
) {
218 uint dfl_forkoff
= xfs_default_attroffset(ip
) >> 3;
220 if (dfl_forkoff
> ip
->i_forkoff
)
221 ip
->i_forkoff
= dfl_forkoff
;
227 struct xfs_mount
*mp
, /* file system mount point */
228 struct xfs_trans
*tp
, /* transaction pointer */
229 xfs_fsblock_t fsbno
, /* file system block number */
230 struct xfs_buf
**bpp
) /* buffer for fsbno */
232 struct xfs_buf
*bp
; /* return value */
235 if (!xfs_verify_fsbno(mp
, fsbno
))
236 return -EFSCORRUPTED
;
237 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
238 XFS_FSB_TO_DADDR(mp
, fsbno
), mp
->m_bsize
, 0, &bp
,
241 xfs_buf_set_ref(bp
, XFS_BMAP_BTREE_REF
);
248 STATIC
struct xfs_buf
*
250 struct xfs_btree_cur
*cur
,
253 struct xfs_log_item
*lip
;
259 for (i
= 0; i
< cur
->bc_maxlevels
; i
++) {
260 if (!cur
->bc_levels
[i
].bp
)
262 if (xfs_buf_daddr(cur
->bc_levels
[i
].bp
) == bno
)
263 return cur
->bc_levels
[i
].bp
;
266 /* Chase down all the log items to see if the bp is there */
267 list_for_each_entry(lip
, &cur
->bc_tp
->t_items
, li_trans
) {
268 struct xfs_buf_log_item
*bip
= (struct xfs_buf_log_item
*)lip
;
270 if (bip
->bli_item
.li_type
== XFS_LI_BUF
&&
271 xfs_buf_daddr(bip
->bli_buf
) == bno
)
280 struct xfs_btree_block
*block
,
286 __be64
*pp
, *thispa
; /* pointer to block address */
287 xfs_bmbt_key_t
*prevp
, *keyp
;
289 ASSERT(be16_to_cpu(block
->bb_level
) > 0);
292 for( i
= 1; i
<= xfs_btree_get_numrecs(block
); i
++) {
293 dmxr
= mp
->m_bmap_dmxr
[0];
294 keyp
= XFS_BMBT_KEY_ADDR(mp
, block
, i
);
297 ASSERT(be64_to_cpu(prevp
->br_startoff
) <
298 be64_to_cpu(keyp
->br_startoff
));
303 * Compare the block numbers to see if there are dups.
306 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, i
, sz
);
308 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, i
, dmxr
);
310 for (j
= i
+1; j
<= be16_to_cpu(block
->bb_numrecs
); j
++) {
312 thispa
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, j
, sz
);
314 thispa
= XFS_BMBT_PTR_ADDR(mp
, block
, j
, dmxr
);
315 if (*thispa
== *pp
) {
316 xfs_warn(mp
, "%s: thispa(%d) == pp(%d) %lld",
318 (unsigned long long)be64_to_cpu(*thispa
));
319 xfs_err(mp
, "%s: ptrs are equal in node\n",
321 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
328 * Check that the extents for the inode ip are in the right order in all
329 * btree leaves. THis becomes prohibitively expensive for large extent count
330 * files, so don't bother with inodes that have more than 10,000 extents in
331 * them. The btree record ordering checks will still be done, so for such large
332 * bmapbt constructs that is going to catch most corruptions.
335 xfs_bmap_check_leaf_extents(
336 struct xfs_btree_cur
*cur
, /* btree cursor or null */
337 xfs_inode_t
*ip
, /* incore inode pointer */
338 int whichfork
) /* data or attr fork */
340 struct xfs_mount
*mp
= ip
->i_mount
;
341 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
342 struct xfs_btree_block
*block
; /* current btree block */
343 xfs_fsblock_t bno
; /* block # of "block" */
344 struct xfs_buf
*bp
; /* buffer for "block" */
345 int error
; /* error return value */
346 xfs_extnum_t i
=0, j
; /* index into the extents list */
347 int level
; /* btree level, for checking */
348 __be64
*pp
; /* pointer to block address */
349 xfs_bmbt_rec_t
*ep
; /* pointer to current extent */
350 xfs_bmbt_rec_t last
= {0, 0}; /* last extent in prev block */
351 xfs_bmbt_rec_t
*nextp
; /* pointer to next extent */
354 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
357 /* skip large extent count inodes */
358 if (ip
->i_df
.if_nextents
> 10000)
362 block
= ifp
->if_broot
;
364 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
366 level
= be16_to_cpu(block
->bb_level
);
368 xfs_check_block(block
, mp
, 1, ifp
->if_broot_bytes
);
369 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
370 bno
= be64_to_cpu(*pp
);
372 ASSERT(bno
!= NULLFSBLOCK
);
373 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
374 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
377 * Go down the tree until leaf level is reached, following the first
378 * pointer (leftmost) at each level.
380 while (level
-- > 0) {
381 /* See if buf is in cur first */
383 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
386 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
387 if (xfs_metadata_is_sick(error
))
388 xfs_btree_mark_sick(cur
);
392 block
= XFS_BUF_TO_BLOCK(bp
);
397 * Check this block for basic sanity (increasing keys and
398 * no duplicate blocks).
401 xfs_check_block(block
, mp
, 0, 0);
402 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
403 bno
= be64_to_cpu(*pp
);
404 if (XFS_IS_CORRUPT(mp
, !xfs_verify_fsbno(mp
, bno
))) {
405 xfs_btree_mark_sick(cur
);
406 error
= -EFSCORRUPTED
;
411 xfs_trans_brelse(NULL
, bp
);
416 * Here with bp and block set to the leftmost leaf node in the tree.
421 * Loop over all leaf nodes checking that all extents are in the right order.
424 xfs_fsblock_t nextbno
;
425 xfs_extnum_t num_recs
;
428 num_recs
= xfs_btree_get_numrecs(block
);
431 * Read-ahead the next leaf block, if any.
434 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
437 * Check all the extents to make sure they are OK.
438 * If we had a previous block, the last entry should
439 * conform with the first entry in this one.
442 ep
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
444 ASSERT(xfs_bmbt_disk_get_startoff(&last
) +
445 xfs_bmbt_disk_get_blockcount(&last
) <=
446 xfs_bmbt_disk_get_startoff(ep
));
448 for (j
= 1; j
< num_recs
; j
++) {
449 nextp
= XFS_BMBT_REC_ADDR(mp
, block
, j
+ 1);
450 ASSERT(xfs_bmbt_disk_get_startoff(ep
) +
451 xfs_bmbt_disk_get_blockcount(ep
) <=
452 xfs_bmbt_disk_get_startoff(nextp
));
460 xfs_trans_brelse(NULL
, bp
);
464 * If we've reached the end, stop.
466 if (bno
== NULLFSBLOCK
)
470 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
473 error
= xfs_bmap_read_buf(mp
, NULL
, bno
, &bp
);
474 if (xfs_metadata_is_sick(error
))
475 xfs_btree_mark_sick(cur
);
479 block
= XFS_BUF_TO_BLOCK(bp
);
485 xfs_warn(mp
, "%s: at error0", __func__
);
487 xfs_trans_brelse(NULL
, bp
);
489 xfs_warn(mp
, "%s: BAD after btree leaves for %llu extents",
491 xfs_err(mp
, "%s: CORRUPTED BTREE OR SOMETHING", __func__
);
492 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
497 * Validate that the bmbt_irecs being returned from bmapi are valid
498 * given the caller's original parameters. Specifically check the
499 * ranges of the returned irecs to ensure that they only extend beyond
500 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
503 xfs_bmap_validate_ret(
507 xfs_bmbt_irec_t
*mval
,
511 int i
; /* index to map values */
513 ASSERT(ret_nmap
<= nmap
);
515 for (i
= 0; i
< ret_nmap
; i
++) {
516 ASSERT(mval
[i
].br_blockcount
> 0);
517 if (!(flags
& XFS_BMAPI_ENTIRE
)) {
518 ASSERT(mval
[i
].br_startoff
>= bno
);
519 ASSERT(mval
[i
].br_blockcount
<= len
);
520 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
<=
523 ASSERT(mval
[i
].br_startoff
< bno
+ len
);
524 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
>
528 mval
[i
- 1].br_startoff
+ mval
[i
- 1].br_blockcount
==
529 mval
[i
].br_startoff
);
530 ASSERT(mval
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
531 mval
[i
].br_startblock
!= HOLESTARTBLOCK
);
532 ASSERT(mval
[i
].br_state
== XFS_EXT_NORM
||
533 mval
[i
].br_state
== XFS_EXT_UNWRITTEN
);
538 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
539 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
543 * Inode fork format manipulation functions
547 * Convert the inode format to extent format if it currently is in btree format,
548 * but the extent list is small enough that it fits into the extent format.
550 * Since the extents are already in-core, all we have to do is give up the space
551 * for the btree root and pitch the leaf block.
553 STATIC
int /* error */
554 xfs_bmap_btree_to_extents(
555 struct xfs_trans
*tp
, /* transaction pointer */
556 struct xfs_inode
*ip
, /* incore inode pointer */
557 struct xfs_btree_cur
*cur
, /* btree cursor */
558 int *logflagsp
, /* inode logging flags */
559 int whichfork
) /* data or attr fork */
561 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
562 struct xfs_mount
*mp
= ip
->i_mount
;
563 struct xfs_btree_block
*rblock
= ifp
->if_broot
;
564 struct xfs_btree_block
*cblock
;/* child btree block */
565 xfs_fsblock_t cbno
; /* child block number */
566 struct xfs_buf
*cbp
; /* child block's buffer */
567 int error
; /* error return value */
568 __be64
*pp
; /* ptr to block address */
569 struct xfs_owner_info oinfo
;
571 /* check if we actually need the extent format first: */
572 if (!xfs_bmap_wants_extents(ip
, whichfork
))
576 ASSERT(whichfork
!= XFS_COW_FORK
);
577 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
578 ASSERT(be16_to_cpu(rblock
->bb_level
) == 1);
579 ASSERT(be16_to_cpu(rblock
->bb_numrecs
) == 1);
580 ASSERT(xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0) == 1);
582 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, rblock
, 1, ifp
->if_broot_bytes
);
583 cbno
= be64_to_cpu(*pp
);
585 if (XFS_IS_CORRUPT(cur
->bc_mp
, !xfs_verify_fsbno(mp
, cbno
))) {
586 xfs_btree_mark_sick(cur
);
587 return -EFSCORRUPTED
;
590 error
= xfs_bmap_read_buf(mp
, tp
, cbno
, &cbp
);
591 if (xfs_metadata_is_sick(error
))
592 xfs_btree_mark_sick(cur
);
595 cblock
= XFS_BUF_TO_BLOCK(cbp
);
596 if ((error
= xfs_btree_check_block(cur
, cblock
, 0, cbp
)))
599 xfs_rmap_ino_bmbt_owner(&oinfo
, ip
->i_ino
, whichfork
);
600 error
= xfs_free_extent_later(cur
->bc_tp
, cbno
, 1, &oinfo
,
601 XFS_AG_RESV_NONE
, false);
606 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
607 xfs_trans_binval(tp
, cbp
);
608 if (cur
->bc_levels
[0].bp
== cbp
)
609 cur
->bc_levels
[0].bp
= NULL
;
610 xfs_iroot_realloc(ip
, -1, whichfork
);
611 ASSERT(ifp
->if_broot
== NULL
);
612 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
613 *logflagsp
|= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
618 * Convert an extents-format file into a btree-format file.
619 * The new file will have a root block (in the inode) and a single child block.
621 STATIC
int /* error */
622 xfs_bmap_extents_to_btree(
623 struct xfs_trans
*tp
, /* transaction pointer */
624 struct xfs_inode
*ip
, /* incore inode pointer */
625 struct xfs_btree_cur
**curp
, /* cursor returned to caller */
626 int wasdel
, /* converting a delayed alloc */
627 int *logflagsp
, /* inode logging flags */
628 int whichfork
) /* data or attr fork */
630 struct xfs_btree_block
*ablock
; /* allocated (child) bt block */
631 struct xfs_buf
*abp
; /* buffer for ablock */
632 struct xfs_alloc_arg args
; /* allocation arguments */
633 struct xfs_bmbt_rec
*arp
; /* child record pointer */
634 struct xfs_btree_block
*block
; /* btree root block */
635 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
636 int error
; /* error return value */
637 struct xfs_ifork
*ifp
; /* inode fork pointer */
638 struct xfs_bmbt_key
*kp
; /* root block key pointer */
639 struct xfs_mount
*mp
; /* mount structure */
640 xfs_bmbt_ptr_t
*pp
; /* root block address pointer */
641 struct xfs_iext_cursor icur
;
642 struct xfs_bmbt_irec rec
;
643 xfs_extnum_t cnt
= 0;
646 ASSERT(whichfork
!= XFS_COW_FORK
);
647 ifp
= xfs_ifork_ptr(ip
, whichfork
);
648 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
);
651 * Make space in the inode incore. This needs to be undone if we fail
652 * to expand the root.
654 xfs_iroot_realloc(ip
, 1, whichfork
);
659 block
= ifp
->if_broot
;
660 xfs_bmbt_init_block(ip
, block
, NULL
, 1, 1);
662 * Need a cursor. Can't allocate until bb_level is filled in.
664 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
666 cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
668 * Convert to a btree with two levels, one record in root.
670 ifp
->if_format
= XFS_DINODE_FMT_BTREE
;
671 memset(&args
, 0, sizeof(args
));
674 xfs_rmap_ino_bmbt_owner(&args
.oinfo
, ip
->i_ino
, whichfork
);
676 args
.minlen
= args
.maxlen
= args
.prod
= 1;
677 args
.wasdel
= wasdel
;
679 error
= xfs_alloc_vextent_start_ag(&args
,
680 XFS_INO_TO_FSB(mp
, ip
->i_ino
));
682 goto out_root_realloc
;
685 * Allocation can't fail, the space was reserved.
687 if (WARN_ON_ONCE(args
.fsbno
== NULLFSBLOCK
)) {
689 goto out_root_realloc
;
692 cur
->bc_bmap
.allocated
++;
694 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
695 error
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
696 XFS_FSB_TO_DADDR(mp
, args
.fsbno
),
697 mp
->m_bsize
, 0, &abp
);
699 goto out_unreserve_dquot
;
702 * Fill in the child block.
704 ablock
= XFS_BUF_TO_BLOCK(abp
);
705 xfs_bmbt_init_block(ip
, ablock
, abp
, 0, 0);
707 for_each_xfs_iext(ifp
, &icur
, &rec
) {
708 if (isnullstartblock(rec
.br_startblock
))
710 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1 + cnt
);
711 xfs_bmbt_disk_set_all(arp
, &rec
);
714 ASSERT(cnt
== ifp
->if_nextents
);
715 xfs_btree_set_numrecs(ablock
, cnt
);
718 * Fill in the root key and pointer.
720 kp
= XFS_BMBT_KEY_ADDR(mp
, block
, 1);
721 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1);
722 kp
->br_startoff
= cpu_to_be64(xfs_bmbt_disk_get_startoff(arp
));
723 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, xfs_bmbt_get_maxrecs(cur
,
724 be16_to_cpu(block
->bb_level
)));
725 *pp
= cpu_to_be64(args
.fsbno
);
728 * Do all this logging at the end so that
729 * the root is at the right level.
731 xfs_btree_log_block(cur
, abp
, XFS_BB_ALL_BITS
);
732 xfs_btree_log_recs(cur
, abp
, 1, be16_to_cpu(ablock
->bb_numrecs
));
733 ASSERT(*curp
== NULL
);
735 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fbroot(whichfork
);
739 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
741 xfs_iroot_realloc(ip
, -1, whichfork
);
742 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
743 ASSERT(ifp
->if_broot
== NULL
);
744 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
750 * Convert a local file to an extents file.
751 * This code is out of bounds for data forks of regular files,
752 * since the file data needs to get logged so things will stay consistent.
753 * (The bmap-level manipulations are ok, though).
756 xfs_bmap_local_to_extents_empty(
757 struct xfs_trans
*tp
,
758 struct xfs_inode
*ip
,
761 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
763 ASSERT(whichfork
!= XFS_COW_FORK
);
764 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
765 ASSERT(ifp
->if_bytes
== 0);
766 ASSERT(ifp
->if_nextents
== 0);
768 xfs_bmap_forkoff_reset(ip
, whichfork
);
771 ifp
->if_format
= XFS_DINODE_FMT_EXTENTS
;
772 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
777 xfs_bmap_local_to_extents(
778 xfs_trans_t
*tp
, /* transaction pointer */
779 xfs_inode_t
*ip
, /* incore inode pointer */
780 xfs_extlen_t total
, /* total blocks needed by transaction */
781 int *logflagsp
, /* inode logging flags */
783 void (*init_fn
)(struct xfs_trans
*tp
,
785 struct xfs_inode
*ip
,
786 struct xfs_ifork
*ifp
, void *priv
),
790 int flags
; /* logging flags returned */
791 struct xfs_ifork
*ifp
; /* inode fork pointer */
792 xfs_alloc_arg_t args
; /* allocation arguments */
793 struct xfs_buf
*bp
; /* buffer for extent block */
794 struct xfs_bmbt_irec rec
;
795 struct xfs_iext_cursor icur
;
798 * We don't want to deal with the case of keeping inode data inline yet.
799 * So sending the data fork of a regular inode is invalid.
801 ASSERT(!(S_ISREG(VFS_I(ip
)->i_mode
) && whichfork
== XFS_DATA_FORK
));
802 ifp
= xfs_ifork_ptr(ip
, whichfork
);
803 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_LOCAL
);
805 if (!ifp
->if_bytes
) {
806 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
807 flags
= XFS_ILOG_CORE
;
813 memset(&args
, 0, sizeof(args
));
815 args
.mp
= ip
->i_mount
;
817 args
.minlen
= args
.maxlen
= args
.prod
= 1;
818 xfs_rmap_ino_owner(&args
.oinfo
, ip
->i_ino
, whichfork
, 0);
821 * Allocate a block. We know we need only one, since the
822 * file currently fits in an inode.
825 args
.minlen
= args
.maxlen
= args
.prod
= 1;
826 error
= xfs_alloc_vextent_start_ag(&args
,
827 XFS_INO_TO_FSB(args
.mp
, ip
->i_ino
));
831 /* Can't fail, the space was reserved. */
832 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
833 ASSERT(args
.len
== 1);
834 error
= xfs_trans_get_buf(tp
, args
.mp
->m_ddev_targp
,
835 XFS_FSB_TO_DADDR(args
.mp
, args
.fsbno
),
836 args
.mp
->m_bsize
, 0, &bp
);
841 * Initialize the block, copy the data and log the remote buffer.
843 * The callout is responsible for logging because the remote format
844 * might differ from the local format and thus we don't know how much to
845 * log here. Note that init_fn must also set the buffer log item type
848 init_fn(tp
, bp
, ip
, ifp
, priv
);
850 /* account for the change in fork size */
851 xfs_idata_realloc(ip
, -ifp
->if_bytes
, whichfork
);
852 xfs_bmap_local_to_extents_empty(tp
, ip
, whichfork
);
853 flags
|= XFS_ILOG_CORE
;
859 rec
.br_startblock
= args
.fsbno
;
860 rec
.br_blockcount
= 1;
861 rec
.br_state
= XFS_EXT_NORM
;
862 xfs_iext_first(ifp
, &icur
);
863 xfs_iext_insert(ip
, &icur
, &rec
, 0);
865 ifp
->if_nextents
= 1;
867 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
868 flags
|= xfs_ilog_fext(whichfork
);
876 * Called from xfs_bmap_add_attrfork to handle btree format files.
878 STATIC
int /* error */
879 xfs_bmap_add_attrfork_btree(
880 xfs_trans_t
*tp
, /* transaction pointer */
881 xfs_inode_t
*ip
, /* incore inode pointer */
882 int *flags
) /* inode logging flags */
884 struct xfs_btree_block
*block
= ip
->i_df
.if_broot
;
885 struct xfs_btree_cur
*cur
; /* btree cursor */
886 int error
; /* error return value */
887 xfs_mount_t
*mp
; /* file system mount struct */
888 int stat
; /* newroot status */
892 if (XFS_BMAP_BMDR_SPACE(block
) <= xfs_inode_data_fork_size(ip
))
893 *flags
|= XFS_ILOG_DBROOT
;
895 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
896 error
= xfs_bmbt_lookup_first(cur
, &stat
);
899 /* must be at least one entry */
900 if (XFS_IS_CORRUPT(mp
, stat
!= 1)) {
901 xfs_btree_mark_sick(cur
);
902 error
= -EFSCORRUPTED
;
905 if ((error
= xfs_btree_new_iroot(cur
, flags
, &stat
)))
908 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
911 cur
->bc_bmap
.allocated
= 0;
912 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
916 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
921 * Called from xfs_bmap_add_attrfork to handle extents format files.
923 STATIC
int /* error */
924 xfs_bmap_add_attrfork_extents(
925 struct xfs_trans
*tp
, /* transaction pointer */
926 struct xfs_inode
*ip
, /* incore inode pointer */
927 int *flags
) /* inode logging flags */
929 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
930 int error
; /* error return value */
932 if (ip
->i_df
.if_nextents
* sizeof(struct xfs_bmbt_rec
) <=
933 xfs_inode_data_fork_size(ip
))
936 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0, flags
,
939 cur
->bc_bmap
.allocated
= 0;
940 xfs_btree_del_cursor(cur
, error
);
946 * Called from xfs_bmap_add_attrfork to handle local format files. Each
947 * different data fork content type needs a different callout to do the
948 * conversion. Some are basic and only require special block initialisation
949 * callouts for the data formating, others (directories) are so specialised they
950 * handle everything themselves.
952 * XXX (dgc): investigate whether directory conversion can use the generic
953 * formatting callout. It should be possible - it's just a very complex
956 STATIC
int /* error */
957 xfs_bmap_add_attrfork_local(
958 struct xfs_trans
*tp
, /* transaction pointer */
959 struct xfs_inode
*ip
, /* incore inode pointer */
960 int *flags
) /* inode logging flags */
962 struct xfs_da_args dargs
; /* args for dir/attr code */
964 if (ip
->i_df
.if_bytes
<= xfs_inode_data_fork_size(ip
))
967 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
968 memset(&dargs
, 0, sizeof(dargs
));
969 dargs
.geo
= ip
->i_mount
->m_dir_geo
;
971 dargs
.total
= dargs
.geo
->fsbcount
;
972 dargs
.whichfork
= XFS_DATA_FORK
;
974 dargs
.owner
= ip
->i_ino
;
975 return xfs_dir2_sf_to_block(&dargs
);
978 if (S_ISLNK(VFS_I(ip
)->i_mode
))
979 return xfs_bmap_local_to_extents(tp
, ip
, 1, flags
,
980 XFS_DATA_FORK
, xfs_symlink_local_to_remote
,
983 /* should only be called for types that support local format data */
985 xfs_bmap_mark_sick(ip
, XFS_ATTR_FORK
);
986 return -EFSCORRUPTED
;
990 * Set an inode attr fork offset based on the format of the data fork.
993 xfs_bmap_set_attrforkoff(
994 struct xfs_inode
*ip
,
998 int default_size
= xfs_default_attroffset(ip
) >> 3;
1000 switch (ip
->i_df
.if_format
) {
1001 case XFS_DINODE_FMT_DEV
:
1002 ip
->i_forkoff
= default_size
;
1004 case XFS_DINODE_FMT_LOCAL
:
1005 case XFS_DINODE_FMT_EXTENTS
:
1006 case XFS_DINODE_FMT_BTREE
:
1007 ip
->i_forkoff
= xfs_attr_shortform_bytesfit(ip
, size
);
1009 ip
->i_forkoff
= default_size
;
1010 else if (xfs_has_attr2(ip
->i_mount
) && version
)
1022 * Convert inode from non-attributed to attributed. Caller must hold the
1023 * ILOCK_EXCL and the file cannot have an attr fork.
1025 int /* error code */
1026 xfs_bmap_add_attrfork(
1027 struct xfs_trans
*tp
,
1028 struct xfs_inode
*ip
, /* incore inode pointer */
1029 int size
, /* space new attribute needs */
1030 int rsvd
) /* xact may use reserved blks */
1032 struct xfs_mount
*mp
= tp
->t_mountp
;
1033 int version
= 1; /* superblock attr version */
1034 int logflags
; /* logging flags */
1035 int error
; /* error return value */
1037 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1038 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1039 ASSERT(!xfs_inode_has_attr_fork(ip
));
1041 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1042 error
= xfs_bmap_set_attrforkoff(ip
, size
, &version
);
1046 xfs_ifork_init_attr(ip
, XFS_DINODE_FMT_EXTENTS
, 0);
1048 switch (ip
->i_df
.if_format
) {
1049 case XFS_DINODE_FMT_LOCAL
:
1050 error
= xfs_bmap_add_attrfork_local(tp
, ip
, &logflags
);
1052 case XFS_DINODE_FMT_EXTENTS
:
1053 error
= xfs_bmap_add_attrfork_extents(tp
, ip
, &logflags
);
1055 case XFS_DINODE_FMT_BTREE
:
1056 error
= xfs_bmap_add_attrfork_btree(tp
, ip
, &logflags
);
1063 xfs_trans_log_inode(tp
, ip
, logflags
);
1066 if (!xfs_has_attr(mp
) ||
1067 (!xfs_has_attr2(mp
) && version
== 2)) {
1068 bool log_sb
= false;
1070 spin_lock(&mp
->m_sb_lock
);
1071 if (!xfs_has_attr(mp
)) {
1075 if (!xfs_has_attr2(mp
) && version
== 2) {
1079 spin_unlock(&mp
->m_sb_lock
);
1088 * Internal and external extent tree search functions.
1091 struct xfs_iread_state
{
1092 struct xfs_iext_cursor icur
;
1093 xfs_extnum_t loaded
;
1097 xfs_bmap_complain_bad_rec(
1098 struct xfs_inode
*ip
,
1101 const struct xfs_bmbt_irec
*irec
)
1103 struct xfs_mount
*mp
= ip
->i_mount
;
1104 const char *forkname
;
1106 switch (whichfork
) {
1107 case XFS_DATA_FORK
: forkname
= "data"; break;
1108 case XFS_ATTR_FORK
: forkname
= "attr"; break;
1109 case XFS_COW_FORK
: forkname
= "CoW"; break;
1110 default: forkname
= "???"; break;
1114 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1115 ip
->i_ino
, forkname
, fa
);
1117 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1118 irec
->br_startoff
, irec
->br_startblock
, irec
->br_blockcount
,
1121 return -EFSCORRUPTED
;
1124 /* Stuff every bmbt record from this block into the incore extent map. */
1126 xfs_iread_bmbt_block(
1127 struct xfs_btree_cur
*cur
,
1131 struct xfs_iread_state
*ir
= priv
;
1132 struct xfs_mount
*mp
= cur
->bc_mp
;
1133 struct xfs_inode
*ip
= cur
->bc_ino
.ip
;
1134 struct xfs_btree_block
*block
;
1136 struct xfs_bmbt_rec
*frp
;
1137 xfs_extnum_t num_recs
;
1139 int whichfork
= cur
->bc_ino
.whichfork
;
1140 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1142 block
= xfs_btree_get_block(cur
, level
, &bp
);
1144 /* Abort if we find more records than nextents. */
1145 num_recs
= xfs_btree_get_numrecs(block
);
1146 if (unlikely(ir
->loaded
+ num_recs
> ifp
->if_nextents
)) {
1147 xfs_warn(ip
->i_mount
, "corrupt dinode %llu, (btree extents).",
1148 (unsigned long long)ip
->i_ino
);
1149 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
, __func__
, block
,
1150 sizeof(*block
), __this_address
);
1151 xfs_bmap_mark_sick(ip
, whichfork
);
1152 return -EFSCORRUPTED
;
1155 /* Copy records into the incore cache. */
1156 frp
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
1157 for (j
= 0; j
< num_recs
; j
++, frp
++, ir
->loaded
++) {
1158 struct xfs_bmbt_irec
new;
1161 xfs_bmbt_disk_get_all(frp
, &new);
1162 fa
= xfs_bmap_validate_extent(ip
, whichfork
, &new);
1164 xfs_inode_verifier_error(ip
, -EFSCORRUPTED
,
1165 "xfs_iread_extents(2)", frp
,
1167 xfs_bmap_mark_sick(ip
, whichfork
);
1168 return xfs_bmap_complain_bad_rec(ip
, whichfork
, fa
,
1171 xfs_iext_insert(ip
, &ir
->icur
, &new,
1172 xfs_bmap_fork_to_state(whichfork
));
1173 trace_xfs_read_extent(ip
, &ir
->icur
,
1174 xfs_bmap_fork_to_state(whichfork
), _THIS_IP_
);
1175 xfs_iext_next(ifp
, &ir
->icur
);
1182 * Read in extents from a btree-format inode.
1186 struct xfs_trans
*tp
,
1187 struct xfs_inode
*ip
,
1190 struct xfs_iread_state ir
;
1191 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1192 struct xfs_mount
*mp
= ip
->i_mount
;
1193 struct xfs_btree_cur
*cur
;
1196 if (!xfs_need_iread_extents(ifp
))
1199 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
1202 xfs_iext_first(ifp
, &ir
.icur
);
1203 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
1204 error
= xfs_btree_visit_blocks(cur
, xfs_iread_bmbt_block
,
1205 XFS_BTREE_VISIT_RECORDS
, &ir
);
1206 xfs_btree_del_cursor(cur
, error
);
1210 if (XFS_IS_CORRUPT(mp
, ir
.loaded
!= ifp
->if_nextents
)) {
1211 xfs_bmap_mark_sick(ip
, whichfork
);
1212 error
= -EFSCORRUPTED
;
1215 ASSERT(ir
.loaded
== xfs_iext_count(ifp
));
1217 * Use release semantics so that we can use acquire semantics in
1218 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1221 smp_store_release(&ifp
->if_needextents
, 0);
1224 if (xfs_metadata_is_sick(error
))
1225 xfs_bmap_mark_sick(ip
, whichfork
);
1226 xfs_iext_destroy(ifp
);
1231 * Returns the relative block number of the first unused block(s) in the given
1232 * fork with at least "len" logically contiguous blocks free. This is the
1233 * lowest-address hole if the fork has holes, else the first block past the end
1234 * of fork. Return 0 if the fork is currently local (in-inode).
1237 xfs_bmap_first_unused(
1238 struct xfs_trans
*tp
, /* transaction pointer */
1239 struct xfs_inode
*ip
, /* incore inode */
1240 xfs_extlen_t len
, /* size of hole to find */
1241 xfs_fileoff_t
*first_unused
, /* unused block */
1242 int whichfork
) /* data or attr fork */
1244 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1245 struct xfs_bmbt_irec got
;
1246 struct xfs_iext_cursor icur
;
1247 xfs_fileoff_t lastaddr
= 0;
1248 xfs_fileoff_t lowest
, max
;
1251 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
) {
1256 ASSERT(xfs_ifork_has_extents(ifp
));
1258 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1262 lowest
= max
= *first_unused
;
1263 for_each_xfs_iext(ifp
, &icur
, &got
) {
1265 * See if the hole before this extent will work.
1267 if (got
.br_startoff
>= lowest
+ len
&&
1268 got
.br_startoff
- max
>= len
)
1270 lastaddr
= got
.br_startoff
+ got
.br_blockcount
;
1271 max
= XFS_FILEOFF_MAX(lastaddr
, lowest
);
1274 *first_unused
= max
;
1279 * Returns the file-relative block number of the last block - 1 before
1280 * last_block (input value) in the file.
1281 * This is not based on i_size, it is based on the extent records.
1282 * Returns 0 for local files, as they do not have extent records.
1285 xfs_bmap_last_before(
1286 struct xfs_trans
*tp
, /* transaction pointer */
1287 struct xfs_inode
*ip
, /* incore inode */
1288 xfs_fileoff_t
*last_block
, /* last block */
1289 int whichfork
) /* data or attr fork */
1291 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1292 struct xfs_bmbt_irec got
;
1293 struct xfs_iext_cursor icur
;
1296 switch (ifp
->if_format
) {
1297 case XFS_DINODE_FMT_LOCAL
:
1300 case XFS_DINODE_FMT_BTREE
:
1301 case XFS_DINODE_FMT_EXTENTS
:
1305 xfs_bmap_mark_sick(ip
, whichfork
);
1306 return -EFSCORRUPTED
;
1309 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1313 if (!xfs_iext_lookup_extent_before(ip
, ifp
, last_block
, &icur
, &got
))
1319 xfs_bmap_last_extent(
1320 struct xfs_trans
*tp
,
1321 struct xfs_inode
*ip
,
1323 struct xfs_bmbt_irec
*rec
,
1326 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1327 struct xfs_iext_cursor icur
;
1330 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1334 xfs_iext_last(ifp
, &icur
);
1335 if (!xfs_iext_get_extent(ifp
, &icur
, rec
))
1343 * Check the last inode extent to determine whether this allocation will result
1344 * in blocks being allocated at the end of the file. When we allocate new data
1345 * blocks at the end of the file which do not start at the previous data block,
1346 * we will try to align the new blocks at stripe unit boundaries.
1348 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1349 * at, or past the EOF.
1353 struct xfs_bmalloca
*bma
,
1356 struct xfs_bmbt_irec rec
;
1361 error
= xfs_bmap_last_extent(NULL
, bma
->ip
, whichfork
, &rec
,
1372 * Check if we are allocation or past the last extent, or at least into
1373 * the last delayed allocated extent.
1375 bma
->aeof
= bma
->offset
>= rec
.br_startoff
+ rec
.br_blockcount
||
1376 (bma
->offset
>= rec
.br_startoff
&&
1377 isnullstartblock(rec
.br_startblock
));
1382 * Returns the file-relative block number of the first block past eof in
1383 * the file. This is not based on i_size, it is based on the extent records.
1384 * Returns 0 for local files, as they do not have extent records.
1387 xfs_bmap_last_offset(
1388 struct xfs_inode
*ip
,
1389 xfs_fileoff_t
*last_block
,
1392 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1393 struct xfs_bmbt_irec rec
;
1399 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
)
1402 if (XFS_IS_CORRUPT(ip
->i_mount
, !xfs_ifork_has_extents(ifp
))) {
1403 xfs_bmap_mark_sick(ip
, whichfork
);
1404 return -EFSCORRUPTED
;
1407 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, &is_empty
);
1408 if (error
|| is_empty
)
1411 *last_block
= rec
.br_startoff
+ rec
.br_blockcount
;
1416 * Extent tree manipulation functions used during allocation.
1420 * Convert a delayed allocation to a real allocation.
1422 STATIC
int /* error */
1423 xfs_bmap_add_extent_delay_real(
1424 struct xfs_bmalloca
*bma
,
1427 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
1428 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
1429 struct xfs_bmbt_irec
*new = &bma
->got
;
1430 int error
; /* error return value */
1431 int i
; /* temp state */
1432 xfs_fileoff_t new_endoff
; /* end offset of new entry */
1433 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
1434 /* left is 0, right is 1, prev is 2 */
1435 int rval
=0; /* return value (logging flags) */
1436 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
1437 xfs_filblks_t da_new
; /* new count del alloc blocks used */
1438 xfs_filblks_t da_old
; /* old count del alloc blocks used */
1439 xfs_filblks_t temp
=0; /* value for da_new calculations */
1440 int tmp_rval
; /* partial logging flags */
1441 struct xfs_bmbt_irec old
;
1443 ASSERT(whichfork
!= XFS_ATTR_FORK
);
1444 ASSERT(!isnullstartblock(new->br_startblock
));
1445 ASSERT(!bma
->cur
|| (bma
->cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
1447 XFS_STATS_INC(mp
, xs_add_exlist
);
1454 * Set up a bunch of variables to make the tests simpler.
1456 xfs_iext_get_extent(ifp
, &bma
->icur
, &PREV
);
1457 new_endoff
= new->br_startoff
+ new->br_blockcount
;
1458 ASSERT(isnullstartblock(PREV
.br_startblock
));
1459 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
1460 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
1462 da_old
= startblockval(PREV
.br_startblock
);
1466 * Set flags determining what part of the previous delayed allocation
1467 * extent is being replaced by a real allocation.
1469 if (PREV
.br_startoff
== new->br_startoff
)
1470 state
|= BMAP_LEFT_FILLING
;
1471 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
1472 state
|= BMAP_RIGHT_FILLING
;
1475 * Check and set flags if this segment has a left neighbor.
1476 * Don't set contiguous if the combined extent would be too large.
1478 if (xfs_iext_peek_prev_extent(ifp
, &bma
->icur
, &LEFT
)) {
1479 state
|= BMAP_LEFT_VALID
;
1480 if (isnullstartblock(LEFT
.br_startblock
))
1481 state
|= BMAP_LEFT_DELAY
;
1484 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
1485 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
1486 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
1487 LEFT
.br_state
== new->br_state
&&
1488 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
)
1489 state
|= BMAP_LEFT_CONTIG
;
1492 * Check and set flags if this segment has a right neighbor.
1493 * Don't set contiguous if the combined extent would be too large.
1494 * Also check for all-three-contiguous being too large.
1496 if (xfs_iext_peek_next_extent(ifp
, &bma
->icur
, &RIGHT
)) {
1497 state
|= BMAP_RIGHT_VALID
;
1498 if (isnullstartblock(RIGHT
.br_startblock
))
1499 state
|= BMAP_RIGHT_DELAY
;
1502 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
1503 new_endoff
== RIGHT
.br_startoff
&&
1504 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
1505 new->br_state
== RIGHT
.br_state
&&
1506 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
1507 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1508 BMAP_RIGHT_FILLING
)) !=
1509 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1510 BMAP_RIGHT_FILLING
) ||
1511 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
1512 <= XFS_MAX_BMBT_EXTLEN
))
1513 state
|= BMAP_RIGHT_CONTIG
;
1517 * Switch out based on the FILLING and CONTIG state bits.
1519 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1520 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
1521 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1522 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1524 * Filling in all of a previously delayed allocation extent.
1525 * The left and right neighbors are both contiguous with new.
1527 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
1529 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1530 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1531 xfs_iext_prev(ifp
, &bma
->icur
);
1532 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1535 if (bma
->cur
== NULL
)
1536 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1538 rval
= XFS_ILOG_CORE
;
1539 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1542 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1543 xfs_btree_mark_sick(bma
->cur
);
1544 error
= -EFSCORRUPTED
;
1547 error
= xfs_btree_delete(bma
->cur
, &i
);
1550 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1551 xfs_btree_mark_sick(bma
->cur
);
1552 error
= -EFSCORRUPTED
;
1555 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
1558 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1559 xfs_btree_mark_sick(bma
->cur
);
1560 error
= -EFSCORRUPTED
;
1563 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1567 ASSERT(da_new
<= da_old
);
1570 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1572 * Filling in all of a previously delayed allocation extent.
1573 * The left neighbor is contiguous, the right is not.
1576 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
1578 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1579 xfs_iext_prev(ifp
, &bma
->icur
);
1580 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1582 if (bma
->cur
== NULL
)
1583 rval
= XFS_ILOG_DEXT
;
1586 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1589 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1590 xfs_btree_mark_sick(bma
->cur
);
1591 error
= -EFSCORRUPTED
;
1594 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1598 ASSERT(da_new
<= da_old
);
1601 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1603 * Filling in all of a previously delayed allocation extent.
1604 * The right neighbor is contiguous, the left is not. Take care
1605 * with delay -> unwritten extent allocation here because the
1606 * delalloc record we are overwriting is always written.
1608 PREV
.br_startblock
= new->br_startblock
;
1609 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
1610 PREV
.br_state
= new->br_state
;
1612 xfs_iext_next(ifp
, &bma
->icur
);
1613 xfs_iext_remove(bma
->ip
, &bma
->icur
, state
);
1614 xfs_iext_prev(ifp
, &bma
->icur
);
1615 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1617 if (bma
->cur
== NULL
)
1618 rval
= XFS_ILOG_DEXT
;
1621 error
= xfs_bmbt_lookup_eq(bma
->cur
, &RIGHT
, &i
);
1624 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1625 xfs_btree_mark_sick(bma
->cur
);
1626 error
= -EFSCORRUPTED
;
1629 error
= xfs_bmbt_update(bma
->cur
, &PREV
);
1633 ASSERT(da_new
<= da_old
);
1636 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
1638 * Filling in all of a previously delayed allocation extent.
1639 * Neither the left nor right neighbors are contiguous with
1642 PREV
.br_startblock
= new->br_startblock
;
1643 PREV
.br_state
= new->br_state
;
1644 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1647 if (bma
->cur
== NULL
)
1648 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1650 rval
= XFS_ILOG_CORE
;
1651 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1654 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1655 xfs_btree_mark_sick(bma
->cur
);
1656 error
= -EFSCORRUPTED
;
1659 error
= xfs_btree_insert(bma
->cur
, &i
);
1662 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1663 xfs_btree_mark_sick(bma
->cur
);
1664 error
= -EFSCORRUPTED
;
1668 ASSERT(da_new
<= da_old
);
1671 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
1673 * Filling in the first part of a previous delayed allocation.
1674 * The left neighbor is contiguous.
1677 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1678 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1679 startblockval(PREV
.br_startblock
));
1681 LEFT
.br_blockcount
+= new->br_blockcount
;
1683 PREV
.br_blockcount
= temp
;
1684 PREV
.br_startoff
+= new->br_blockcount
;
1685 PREV
.br_startblock
= nullstartblock(da_new
);
1687 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1688 xfs_iext_prev(ifp
, &bma
->icur
);
1689 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &LEFT
);
1691 if (bma
->cur
== NULL
)
1692 rval
= XFS_ILOG_DEXT
;
1695 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1698 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1699 xfs_btree_mark_sick(bma
->cur
);
1700 error
= -EFSCORRUPTED
;
1703 error
= xfs_bmbt_update(bma
->cur
, &LEFT
);
1707 ASSERT(da_new
<= da_old
);
1710 case BMAP_LEFT_FILLING
:
1712 * Filling in the first part of a previous delayed allocation.
1713 * The left neighbor is not contiguous.
1715 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1718 if (bma
->cur
== NULL
)
1719 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1721 rval
= XFS_ILOG_CORE
;
1722 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1725 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1726 xfs_btree_mark_sick(bma
->cur
);
1727 error
= -EFSCORRUPTED
;
1730 error
= xfs_btree_insert(bma
->cur
, &i
);
1733 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1734 xfs_btree_mark_sick(bma
->cur
);
1735 error
= -EFSCORRUPTED
;
1740 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1741 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1742 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1748 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1749 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1750 startblockval(PREV
.br_startblock
) -
1751 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1753 PREV
.br_startoff
= new_endoff
;
1754 PREV
.br_blockcount
= temp
;
1755 PREV
.br_startblock
= nullstartblock(da_new
);
1756 xfs_iext_next(ifp
, &bma
->icur
);
1757 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1758 xfs_iext_prev(ifp
, &bma
->icur
);
1761 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1763 * Filling in the last part of a previous delayed allocation.
1764 * The right neighbor is contiguous with the new allocation.
1767 RIGHT
.br_startoff
= new->br_startoff
;
1768 RIGHT
.br_startblock
= new->br_startblock
;
1769 RIGHT
.br_blockcount
+= new->br_blockcount
;
1771 if (bma
->cur
== NULL
)
1772 rval
= XFS_ILOG_DEXT
;
1775 error
= xfs_bmbt_lookup_eq(bma
->cur
, &old
, &i
);
1778 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1779 xfs_btree_mark_sick(bma
->cur
);
1780 error
= -EFSCORRUPTED
;
1783 error
= xfs_bmbt_update(bma
->cur
, &RIGHT
);
1788 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1789 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1790 startblockval(PREV
.br_startblock
));
1792 PREV
.br_blockcount
= temp
;
1793 PREV
.br_startblock
= nullstartblock(da_new
);
1795 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1796 xfs_iext_next(ifp
, &bma
->icur
);
1797 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &RIGHT
);
1798 ASSERT(da_new
<= da_old
);
1801 case BMAP_RIGHT_FILLING
:
1803 * Filling in the last part of a previous delayed allocation.
1804 * The right neighbor is not contiguous.
1806 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, new);
1809 if (bma
->cur
== NULL
)
1810 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1812 rval
= XFS_ILOG_CORE
;
1813 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1816 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1817 xfs_btree_mark_sick(bma
->cur
);
1818 error
= -EFSCORRUPTED
;
1821 error
= xfs_btree_insert(bma
->cur
, &i
);
1824 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1825 xfs_btree_mark_sick(bma
->cur
);
1826 error
= -EFSCORRUPTED
;
1831 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1832 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1833 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1839 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1840 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1841 startblockval(PREV
.br_startblock
) -
1842 (bma
->cur
? bma
->cur
->bc_bmap
.allocated
: 0));
1844 PREV
.br_startblock
= nullstartblock(da_new
);
1845 PREV
.br_blockcount
= temp
;
1846 xfs_iext_insert(bma
->ip
, &bma
->icur
, &PREV
, state
);
1847 xfs_iext_next(ifp
, &bma
->icur
);
1848 ASSERT(da_new
<= da_old
);
1853 * Filling in the middle part of a previous delayed allocation.
1854 * Contiguity is impossible here.
1855 * This case is avoided almost all the time.
1857 * We start with a delayed allocation:
1859 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1862 * and we are allocating:
1863 * +rrrrrrrrrrrrrrrrr+
1866 * and we set it up for insertion as:
1867 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1869 * PREV @ idx LEFT RIGHT
1870 * inserted at idx + 1
1874 /* LEFT is the new middle */
1877 /* RIGHT is the new right */
1878 RIGHT
.br_state
= PREV
.br_state
;
1879 RIGHT
.br_startoff
= new_endoff
;
1880 RIGHT
.br_blockcount
=
1881 PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
1882 RIGHT
.br_startblock
=
1883 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1884 RIGHT
.br_blockcount
));
1887 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
1888 PREV
.br_startblock
=
1889 nullstartblock(xfs_bmap_worst_indlen(bma
->ip
,
1890 PREV
.br_blockcount
));
1891 xfs_iext_update_extent(bma
->ip
, state
, &bma
->icur
, &PREV
);
1893 xfs_iext_next(ifp
, &bma
->icur
);
1894 xfs_iext_insert(bma
->ip
, &bma
->icur
, &RIGHT
, state
);
1895 xfs_iext_insert(bma
->ip
, &bma
->icur
, &LEFT
, state
);
1898 if (bma
->cur
== NULL
)
1899 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1901 rval
= XFS_ILOG_CORE
;
1902 error
= xfs_bmbt_lookup_eq(bma
->cur
, new, &i
);
1905 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
1906 xfs_btree_mark_sick(bma
->cur
);
1907 error
= -EFSCORRUPTED
;
1910 error
= xfs_btree_insert(bma
->cur
, &i
);
1913 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1914 xfs_btree_mark_sick(bma
->cur
);
1915 error
= -EFSCORRUPTED
;
1920 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1921 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1922 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1928 da_new
= startblockval(PREV
.br_startblock
) +
1929 startblockval(RIGHT
.br_startblock
);
1932 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1933 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1934 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
1935 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1936 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
1937 case BMAP_LEFT_CONTIG
:
1938 case BMAP_RIGHT_CONTIG
:
1940 * These cases are all impossible.
1945 /* add reverse mapping unless caller opted out */
1946 if (!(bma
->flags
& XFS_BMAPI_NORMAP
))
1947 xfs_rmap_map_extent(bma
->tp
, bma
->ip
, whichfork
, new);
1949 /* convert to a btree if necessary */
1950 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1951 int tmp_logflags
; /* partial log flag return val */
1953 ASSERT(bma
->cur
== NULL
);
1954 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1955 &bma
->cur
, da_old
> 0, &tmp_logflags
,
1957 bma
->logflags
|= tmp_logflags
;
1962 if (da_new
!= da_old
)
1963 xfs_mod_delalloc(bma
->ip
, 0, (int64_t)da_new
- da_old
);
1966 da_new
+= bma
->cur
->bc_bmap
.allocated
;
1967 bma
->cur
->bc_bmap
.allocated
= 0;
1970 /* adjust for changes in reserved delayed indirect blocks */
1971 if (da_new
< da_old
)
1972 xfs_add_fdblocks(mp
, da_old
- da_new
);
1973 else if (da_new
> da_old
)
1974 error
= xfs_dec_fdblocks(mp
, da_new
- da_old
, true);
1976 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
1978 if (whichfork
!= XFS_COW_FORK
)
1979 bma
->logflags
|= rval
;
1987 * Convert an unwritten allocation to a real allocation or vice versa.
1990 xfs_bmap_add_extent_unwritten_real(
1991 struct xfs_trans
*tp
,
1992 xfs_inode_t
*ip
, /* incore inode pointer */
1994 struct xfs_iext_cursor
*icur
,
1995 struct xfs_btree_cur
**curp
, /* if *curp is null, not a btree */
1996 xfs_bmbt_irec_t
*new, /* new data to add to file extents */
1997 int *logflagsp
) /* inode logging flags */
1999 struct xfs_btree_cur
*cur
; /* btree cursor */
2000 int error
; /* error return value */
2001 int i
; /* temp state */
2002 struct xfs_ifork
*ifp
; /* inode fork pointer */
2003 xfs_fileoff_t new_endoff
; /* end offset of new entry */
2004 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
2005 /* left is 0, right is 1, prev is 2 */
2006 int rval
=0; /* return value (logging flags) */
2007 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2008 struct xfs_mount
*mp
= ip
->i_mount
;
2009 struct xfs_bmbt_irec old
;
2014 ifp
= xfs_ifork_ptr(ip
, whichfork
);
2016 ASSERT(!isnullstartblock(new->br_startblock
));
2018 XFS_STATS_INC(mp
, xs_add_exlist
);
2025 * Set up a bunch of variables to make the tests simpler.
2028 xfs_iext_get_extent(ifp
, icur
, &PREV
);
2029 ASSERT(new->br_state
!= PREV
.br_state
);
2030 new_endoff
= new->br_startoff
+ new->br_blockcount
;
2031 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
2032 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
2035 * Set flags determining what part of the previous oldext allocation
2036 * extent is being replaced by a newext allocation.
2038 if (PREV
.br_startoff
== new->br_startoff
)
2039 state
|= BMAP_LEFT_FILLING
;
2040 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
2041 state
|= BMAP_RIGHT_FILLING
;
2044 * Check and set flags if this segment has a left neighbor.
2045 * Don't set contiguous if the combined extent would be too large.
2047 if (xfs_iext_peek_prev_extent(ifp
, icur
, &LEFT
)) {
2048 state
|= BMAP_LEFT_VALID
;
2049 if (isnullstartblock(LEFT
.br_startblock
))
2050 state
|= BMAP_LEFT_DELAY
;
2053 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2054 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
2055 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
2056 LEFT
.br_state
== new->br_state
&&
2057 LEFT
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
)
2058 state
|= BMAP_LEFT_CONTIG
;
2061 * Check and set flags if this segment has a right neighbor.
2062 * Don't set contiguous if the combined extent would be too large.
2063 * Also check for all-three-contiguous being too large.
2065 if (xfs_iext_peek_next_extent(ifp
, icur
, &RIGHT
)) {
2066 state
|= BMAP_RIGHT_VALID
;
2067 if (isnullstartblock(RIGHT
.br_startblock
))
2068 state
|= BMAP_RIGHT_DELAY
;
2071 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2072 new_endoff
== RIGHT
.br_startoff
&&
2073 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
2074 new->br_state
== RIGHT
.br_state
&&
2075 new->br_blockcount
+ RIGHT
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2076 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2077 BMAP_RIGHT_FILLING
)) !=
2078 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2079 BMAP_RIGHT_FILLING
) ||
2080 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
2081 <= XFS_MAX_BMBT_EXTLEN
))
2082 state
|= BMAP_RIGHT_CONTIG
;
2085 * Switch out based on the FILLING and CONTIG state bits.
2087 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2088 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
2089 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2090 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2092 * Setting all of a previous oldext extent to newext.
2093 * The left and right neighbors are both contiguous with new.
2095 LEFT
.br_blockcount
+= PREV
.br_blockcount
+ RIGHT
.br_blockcount
;
2097 xfs_iext_remove(ip
, icur
, state
);
2098 xfs_iext_remove(ip
, icur
, state
);
2099 xfs_iext_prev(ifp
, icur
);
2100 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2101 ifp
->if_nextents
-= 2;
2103 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2105 rval
= XFS_ILOG_CORE
;
2106 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2109 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2110 xfs_btree_mark_sick(cur
);
2111 error
= -EFSCORRUPTED
;
2114 if ((error
= xfs_btree_delete(cur
, &i
)))
2116 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2117 xfs_btree_mark_sick(cur
);
2118 error
= -EFSCORRUPTED
;
2121 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2123 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2124 xfs_btree_mark_sick(cur
);
2125 error
= -EFSCORRUPTED
;
2128 if ((error
= xfs_btree_delete(cur
, &i
)))
2130 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2131 xfs_btree_mark_sick(cur
);
2132 error
= -EFSCORRUPTED
;
2135 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2137 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2138 xfs_btree_mark_sick(cur
);
2139 error
= -EFSCORRUPTED
;
2142 error
= xfs_bmbt_update(cur
, &LEFT
);
2148 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2150 * Setting all of a previous oldext extent to newext.
2151 * The left neighbor is contiguous, the right is not.
2153 LEFT
.br_blockcount
+= PREV
.br_blockcount
;
2155 xfs_iext_remove(ip
, icur
, state
);
2156 xfs_iext_prev(ifp
, icur
);
2157 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2160 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2162 rval
= XFS_ILOG_CORE
;
2163 error
= xfs_bmbt_lookup_eq(cur
, &PREV
, &i
);
2166 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2167 xfs_btree_mark_sick(cur
);
2168 error
= -EFSCORRUPTED
;
2171 if ((error
= xfs_btree_delete(cur
, &i
)))
2173 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2174 xfs_btree_mark_sick(cur
);
2175 error
= -EFSCORRUPTED
;
2178 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2180 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2181 xfs_btree_mark_sick(cur
);
2182 error
= -EFSCORRUPTED
;
2185 error
= xfs_bmbt_update(cur
, &LEFT
);
2191 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2193 * Setting all of a previous oldext extent to newext.
2194 * The right neighbor is contiguous, the left is not.
2196 PREV
.br_blockcount
+= RIGHT
.br_blockcount
;
2197 PREV
.br_state
= new->br_state
;
2199 xfs_iext_next(ifp
, icur
);
2200 xfs_iext_remove(ip
, icur
, state
);
2201 xfs_iext_prev(ifp
, icur
);
2202 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2206 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2208 rval
= XFS_ILOG_CORE
;
2209 error
= xfs_bmbt_lookup_eq(cur
, &RIGHT
, &i
);
2212 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2213 xfs_btree_mark_sick(cur
);
2214 error
= -EFSCORRUPTED
;
2217 if ((error
= xfs_btree_delete(cur
, &i
)))
2219 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2220 xfs_btree_mark_sick(cur
);
2221 error
= -EFSCORRUPTED
;
2224 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2226 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2227 xfs_btree_mark_sick(cur
);
2228 error
= -EFSCORRUPTED
;
2231 error
= xfs_bmbt_update(cur
, &PREV
);
2237 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
2239 * Setting all of a previous oldext extent to newext.
2240 * Neither the left nor right neighbors are contiguous with
2243 PREV
.br_state
= new->br_state
;
2244 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2247 rval
= XFS_ILOG_DEXT
;
2250 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2253 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2254 xfs_btree_mark_sick(cur
);
2255 error
= -EFSCORRUPTED
;
2258 error
= xfs_bmbt_update(cur
, &PREV
);
2264 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
2266 * Setting the first part of a previous oldext extent to newext.
2267 * The left neighbor is contiguous.
2269 LEFT
.br_blockcount
+= new->br_blockcount
;
2272 PREV
.br_startoff
+= new->br_blockcount
;
2273 PREV
.br_startblock
+= new->br_blockcount
;
2274 PREV
.br_blockcount
-= new->br_blockcount
;
2276 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2277 xfs_iext_prev(ifp
, icur
);
2278 xfs_iext_update_extent(ip
, state
, icur
, &LEFT
);
2281 rval
= XFS_ILOG_DEXT
;
2284 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2287 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2288 xfs_btree_mark_sick(cur
);
2289 error
= -EFSCORRUPTED
;
2292 error
= xfs_bmbt_update(cur
, &PREV
);
2295 error
= xfs_btree_decrement(cur
, 0, &i
);
2298 error
= xfs_bmbt_update(cur
, &LEFT
);
2304 case BMAP_LEFT_FILLING
:
2306 * Setting the first part of a previous oldext extent to newext.
2307 * The left neighbor is not contiguous.
2310 PREV
.br_startoff
+= new->br_blockcount
;
2311 PREV
.br_startblock
+= new->br_blockcount
;
2312 PREV
.br_blockcount
-= new->br_blockcount
;
2314 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2315 xfs_iext_insert(ip
, icur
, new, state
);
2319 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2321 rval
= XFS_ILOG_CORE
;
2322 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2325 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2326 xfs_btree_mark_sick(cur
);
2327 error
= -EFSCORRUPTED
;
2330 error
= xfs_bmbt_update(cur
, &PREV
);
2333 cur
->bc_rec
.b
= *new;
2334 if ((error
= xfs_btree_insert(cur
, &i
)))
2336 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2337 xfs_btree_mark_sick(cur
);
2338 error
= -EFSCORRUPTED
;
2344 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2346 * Setting the last part of a previous oldext extent to newext.
2347 * The right neighbor is contiguous with the new allocation.
2350 PREV
.br_blockcount
-= new->br_blockcount
;
2352 RIGHT
.br_startoff
= new->br_startoff
;
2353 RIGHT
.br_startblock
= new->br_startblock
;
2354 RIGHT
.br_blockcount
+= new->br_blockcount
;
2356 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2357 xfs_iext_next(ifp
, icur
);
2358 xfs_iext_update_extent(ip
, state
, icur
, &RIGHT
);
2361 rval
= XFS_ILOG_DEXT
;
2364 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2367 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2368 xfs_btree_mark_sick(cur
);
2369 error
= -EFSCORRUPTED
;
2372 error
= xfs_bmbt_update(cur
, &PREV
);
2375 error
= xfs_btree_increment(cur
, 0, &i
);
2378 error
= xfs_bmbt_update(cur
, &RIGHT
);
2384 case BMAP_RIGHT_FILLING
:
2386 * Setting the last part of a previous oldext extent to newext.
2387 * The right neighbor is not contiguous.
2390 PREV
.br_blockcount
-= new->br_blockcount
;
2392 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2393 xfs_iext_next(ifp
, icur
);
2394 xfs_iext_insert(ip
, icur
, new, state
);
2398 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2400 rval
= XFS_ILOG_CORE
;
2401 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2404 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2405 xfs_btree_mark_sick(cur
);
2406 error
= -EFSCORRUPTED
;
2409 error
= xfs_bmbt_update(cur
, &PREV
);
2412 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2415 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2416 xfs_btree_mark_sick(cur
);
2417 error
= -EFSCORRUPTED
;
2420 if ((error
= xfs_btree_insert(cur
, &i
)))
2422 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2423 xfs_btree_mark_sick(cur
);
2424 error
= -EFSCORRUPTED
;
2432 * Setting the middle part of a previous oldext extent to
2433 * newext. Contiguity is impossible here.
2434 * One extent becomes three extents.
2437 PREV
.br_blockcount
= new->br_startoff
- PREV
.br_startoff
;
2440 r
[1].br_startoff
= new_endoff
;
2441 r
[1].br_blockcount
=
2442 old
.br_startoff
+ old
.br_blockcount
- new_endoff
;
2443 r
[1].br_startblock
= new->br_startblock
+ new->br_blockcount
;
2444 r
[1].br_state
= PREV
.br_state
;
2446 xfs_iext_update_extent(ip
, state
, icur
, &PREV
);
2447 xfs_iext_next(ifp
, icur
);
2448 xfs_iext_insert(ip
, icur
, &r
[1], state
);
2449 xfs_iext_insert(ip
, icur
, &r
[0], state
);
2450 ifp
->if_nextents
+= 2;
2453 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2455 rval
= XFS_ILOG_CORE
;
2456 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2459 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2460 xfs_btree_mark_sick(cur
);
2461 error
= -EFSCORRUPTED
;
2464 /* new right extent - oldext */
2465 error
= xfs_bmbt_update(cur
, &r
[1]);
2468 /* new left extent - oldext */
2469 cur
->bc_rec
.b
= PREV
;
2470 if ((error
= xfs_btree_insert(cur
, &i
)))
2472 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2473 xfs_btree_mark_sick(cur
);
2474 error
= -EFSCORRUPTED
;
2478 * Reset the cursor to the position of the new extent
2479 * we are about to insert as we can't trust it after
2480 * the previous insert.
2482 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2485 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2486 xfs_btree_mark_sick(cur
);
2487 error
= -EFSCORRUPTED
;
2490 /* new middle extent - newext */
2491 if ((error
= xfs_btree_insert(cur
, &i
)))
2493 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2494 xfs_btree_mark_sick(cur
);
2495 error
= -EFSCORRUPTED
;
2501 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2502 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2503 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2504 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2505 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2506 case BMAP_LEFT_CONTIG
:
2507 case BMAP_RIGHT_CONTIG
:
2509 * These cases are all impossible.
2514 /* update reverse mappings */
2515 xfs_rmap_convert_extent(mp
, tp
, ip
, whichfork
, new);
2517 /* convert to a btree if necessary */
2518 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2519 int tmp_logflags
; /* partial log flag return val */
2521 ASSERT(cur
== NULL
);
2522 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
2523 &tmp_logflags
, whichfork
);
2524 *logflagsp
|= tmp_logflags
;
2529 /* clear out the allocated field, done with it now in any case. */
2531 cur
->bc_bmap
.allocated
= 0;
2535 xfs_bmap_check_leaf_extents(*curp
, ip
, whichfork
);
2545 * Convert a hole to a delayed allocation.
2548 xfs_bmap_add_extent_hole_delay(
2549 xfs_inode_t
*ip
, /* incore inode pointer */
2551 struct xfs_iext_cursor
*icur
,
2552 xfs_bmbt_irec_t
*new) /* new data to add to file extents */
2554 struct xfs_ifork
*ifp
; /* inode fork pointer */
2555 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2556 xfs_filblks_t newlen
=0; /* new indirect size */
2557 xfs_filblks_t oldlen
=0; /* old indirect size */
2558 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2559 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2560 xfs_filblks_t temp
; /* temp for indirect calculations */
2562 ifp
= xfs_ifork_ptr(ip
, whichfork
);
2563 ASSERT(isnullstartblock(new->br_startblock
));
2566 * Check and set flags if this segment has a left neighbor
2568 if (xfs_iext_peek_prev_extent(ifp
, icur
, &left
)) {
2569 state
|= BMAP_LEFT_VALID
;
2570 if (isnullstartblock(left
.br_startblock
))
2571 state
|= BMAP_LEFT_DELAY
;
2575 * Check and set flags if the current (right) segment exists.
2576 * If it doesn't exist, we're converting the hole at end-of-file.
2578 if (xfs_iext_get_extent(ifp
, icur
, &right
)) {
2579 state
|= BMAP_RIGHT_VALID
;
2580 if (isnullstartblock(right
.br_startblock
))
2581 state
|= BMAP_RIGHT_DELAY
;
2585 * Set contiguity flags on the left and right neighbors.
2586 * Don't let extents get too large, even if the pieces are contiguous.
2588 if ((state
& BMAP_LEFT_VALID
) && (state
& BMAP_LEFT_DELAY
) &&
2589 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2590 left
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
)
2591 state
|= BMAP_LEFT_CONTIG
;
2593 if ((state
& BMAP_RIGHT_VALID
) && (state
& BMAP_RIGHT_DELAY
) &&
2594 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2595 new->br_blockcount
+ right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2596 (!(state
& BMAP_LEFT_CONTIG
) ||
2597 (left
.br_blockcount
+ new->br_blockcount
+
2598 right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
)))
2599 state
|= BMAP_RIGHT_CONTIG
;
2602 * Switch out based on the contiguity flags.
2604 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2605 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2607 * New allocation is contiguous with delayed allocations
2608 * on the left and on the right.
2609 * Merge all three into a single extent record.
2611 temp
= left
.br_blockcount
+ new->br_blockcount
+
2612 right
.br_blockcount
;
2614 oldlen
= startblockval(left
.br_startblock
) +
2615 startblockval(new->br_startblock
) +
2616 startblockval(right
.br_startblock
);
2617 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2619 left
.br_startblock
= nullstartblock(newlen
);
2620 left
.br_blockcount
= temp
;
2622 xfs_iext_remove(ip
, icur
, state
);
2623 xfs_iext_prev(ifp
, icur
);
2624 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2627 case BMAP_LEFT_CONTIG
:
2629 * New allocation is contiguous with a delayed allocation
2631 * Merge the new allocation with the left neighbor.
2633 temp
= left
.br_blockcount
+ new->br_blockcount
;
2635 oldlen
= startblockval(left
.br_startblock
) +
2636 startblockval(new->br_startblock
);
2637 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2639 left
.br_blockcount
= temp
;
2640 left
.br_startblock
= nullstartblock(newlen
);
2642 xfs_iext_prev(ifp
, icur
);
2643 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2646 case BMAP_RIGHT_CONTIG
:
2648 * New allocation is contiguous with a delayed allocation
2650 * Merge the new allocation with the right neighbor.
2652 temp
= new->br_blockcount
+ right
.br_blockcount
;
2653 oldlen
= startblockval(new->br_startblock
) +
2654 startblockval(right
.br_startblock
);
2655 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2657 right
.br_startoff
= new->br_startoff
;
2658 right
.br_startblock
= nullstartblock(newlen
);
2659 right
.br_blockcount
= temp
;
2660 xfs_iext_update_extent(ip
, state
, icur
, &right
);
2665 * New allocation is not contiguous with another
2666 * delayed allocation.
2667 * Insert a new entry.
2669 oldlen
= newlen
= 0;
2670 xfs_iext_insert(ip
, icur
, new, state
);
2673 if (oldlen
!= newlen
) {
2674 ASSERT(oldlen
> newlen
);
2675 xfs_add_fdblocks(ip
->i_mount
, oldlen
- newlen
);
2678 * Nothing to do for disk quota accounting here.
2680 xfs_mod_delalloc(ip
, 0, (int64_t)newlen
- oldlen
);
2685 * Convert a hole to a real allocation.
2687 STATIC
int /* error */
2688 xfs_bmap_add_extent_hole_real(
2689 struct xfs_trans
*tp
,
2690 struct xfs_inode
*ip
,
2692 struct xfs_iext_cursor
*icur
,
2693 struct xfs_btree_cur
**curp
,
2694 struct xfs_bmbt_irec
*new,
2698 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
2699 struct xfs_mount
*mp
= ip
->i_mount
;
2700 struct xfs_btree_cur
*cur
= *curp
;
2701 int error
; /* error return value */
2702 int i
; /* temp state */
2703 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2704 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2705 int rval
=0; /* return value (logging flags) */
2706 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
2707 struct xfs_bmbt_irec old
;
2709 ASSERT(!isnullstartblock(new->br_startblock
));
2710 ASSERT(!cur
|| !(cur
->bc_flags
& XFS_BTREE_BMBT_WASDEL
));
2712 XFS_STATS_INC(mp
, xs_add_exlist
);
2715 * Check and set flags if this segment has a left neighbor.
2717 if (xfs_iext_peek_prev_extent(ifp
, icur
, &left
)) {
2718 state
|= BMAP_LEFT_VALID
;
2719 if (isnullstartblock(left
.br_startblock
))
2720 state
|= BMAP_LEFT_DELAY
;
2724 * Check and set flags if this segment has a current value.
2725 * Not true if we're inserting into the "hole" at eof.
2727 if (xfs_iext_get_extent(ifp
, icur
, &right
)) {
2728 state
|= BMAP_RIGHT_VALID
;
2729 if (isnullstartblock(right
.br_startblock
))
2730 state
|= BMAP_RIGHT_DELAY
;
2734 * We're inserting a real allocation between "left" and "right".
2735 * Set the contiguity flags. Don't let extents get too large.
2737 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2738 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2739 left
.br_startblock
+ left
.br_blockcount
== new->br_startblock
&&
2740 left
.br_state
== new->br_state
&&
2741 left
.br_blockcount
+ new->br_blockcount
<= XFS_MAX_BMBT_EXTLEN
)
2742 state
|= BMAP_LEFT_CONTIG
;
2744 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2745 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2746 new->br_startblock
+ new->br_blockcount
== right
.br_startblock
&&
2747 new->br_state
== right
.br_state
&&
2748 new->br_blockcount
+ right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
&&
2749 (!(state
& BMAP_LEFT_CONTIG
) ||
2750 left
.br_blockcount
+ new->br_blockcount
+
2751 right
.br_blockcount
<= XFS_MAX_BMBT_EXTLEN
))
2752 state
|= BMAP_RIGHT_CONTIG
;
2756 * Select which case we're in here, and implement it.
2758 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2759 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2761 * New allocation is contiguous with real allocations on the
2762 * left and on the right.
2763 * Merge all three into a single extent record.
2765 left
.br_blockcount
+= new->br_blockcount
+ right
.br_blockcount
;
2767 xfs_iext_remove(ip
, icur
, state
);
2768 xfs_iext_prev(ifp
, icur
);
2769 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2773 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2775 rval
= XFS_ILOG_CORE
;
2776 error
= xfs_bmbt_lookup_eq(cur
, &right
, &i
);
2779 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2780 xfs_btree_mark_sick(cur
);
2781 error
= -EFSCORRUPTED
;
2784 error
= xfs_btree_delete(cur
, &i
);
2787 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2788 xfs_btree_mark_sick(cur
);
2789 error
= -EFSCORRUPTED
;
2792 error
= xfs_btree_decrement(cur
, 0, &i
);
2795 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2796 xfs_btree_mark_sick(cur
);
2797 error
= -EFSCORRUPTED
;
2800 error
= xfs_bmbt_update(cur
, &left
);
2806 case BMAP_LEFT_CONTIG
:
2808 * New allocation is contiguous with a real allocation
2810 * Merge the new allocation with the left neighbor.
2813 left
.br_blockcount
+= new->br_blockcount
;
2815 xfs_iext_prev(ifp
, icur
);
2816 xfs_iext_update_extent(ip
, state
, icur
, &left
);
2819 rval
= xfs_ilog_fext(whichfork
);
2822 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2825 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2826 xfs_btree_mark_sick(cur
);
2827 error
= -EFSCORRUPTED
;
2830 error
= xfs_bmbt_update(cur
, &left
);
2836 case BMAP_RIGHT_CONTIG
:
2838 * New allocation is contiguous with a real allocation
2840 * Merge the new allocation with the right neighbor.
2844 right
.br_startoff
= new->br_startoff
;
2845 right
.br_startblock
= new->br_startblock
;
2846 right
.br_blockcount
+= new->br_blockcount
;
2847 xfs_iext_update_extent(ip
, state
, icur
, &right
);
2850 rval
= xfs_ilog_fext(whichfork
);
2853 error
= xfs_bmbt_lookup_eq(cur
, &old
, &i
);
2856 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2857 xfs_btree_mark_sick(cur
);
2858 error
= -EFSCORRUPTED
;
2861 error
= xfs_bmbt_update(cur
, &right
);
2869 * New allocation is not contiguous with another
2871 * Insert a new entry.
2873 xfs_iext_insert(ip
, icur
, new, state
);
2877 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2879 rval
= XFS_ILOG_CORE
;
2880 error
= xfs_bmbt_lookup_eq(cur
, new, &i
);
2883 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
2884 xfs_btree_mark_sick(cur
);
2885 error
= -EFSCORRUPTED
;
2888 error
= xfs_btree_insert(cur
, &i
);
2891 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2892 xfs_btree_mark_sick(cur
);
2893 error
= -EFSCORRUPTED
;
2900 /* add reverse mapping unless caller opted out */
2901 if (!(flags
& XFS_BMAPI_NORMAP
))
2902 xfs_rmap_map_extent(tp
, ip
, whichfork
, new);
2904 /* convert to a btree if necessary */
2905 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2906 int tmp_logflags
; /* partial log flag return val */
2908 ASSERT(cur
== NULL
);
2909 error
= xfs_bmap_extents_to_btree(tp
, ip
, curp
, 0,
2910 &tmp_logflags
, whichfork
);
2911 *logflagsp
|= tmp_logflags
;
2917 /* clear out the allocated field, done with it now in any case. */
2919 cur
->bc_bmap
.allocated
= 0;
2921 xfs_bmap_check_leaf_extents(cur
, ip
, whichfork
);
2928 * Functions used in the extent read, allocate and remove paths
2932 * Adjust the size of the new extent based on i_extsize and rt extsize.
2935 xfs_bmap_extsize_align(
2937 xfs_bmbt_irec_t
*gotp
, /* next extent pointer */
2938 xfs_bmbt_irec_t
*prevp
, /* previous extent pointer */
2939 xfs_extlen_t extsz
, /* align to this extent size */
2940 int rt
, /* is this a realtime inode? */
2941 int eof
, /* is extent at end-of-file? */
2942 int delay
, /* creating delalloc extent? */
2943 int convert
, /* overwriting unwritten extent? */
2944 xfs_fileoff_t
*offp
, /* in/out: aligned offset */
2945 xfs_extlen_t
*lenp
) /* in/out: aligned length */
2947 xfs_fileoff_t orig_off
; /* original offset */
2948 xfs_extlen_t orig_alen
; /* original length */
2949 xfs_fileoff_t orig_end
; /* original off+len */
2950 xfs_fileoff_t nexto
; /* next file offset */
2951 xfs_fileoff_t prevo
; /* previous file offset */
2952 xfs_fileoff_t align_off
; /* temp for offset */
2953 xfs_extlen_t align_alen
; /* temp for length */
2954 xfs_extlen_t temp
; /* temp for calculations */
2959 orig_off
= align_off
= *offp
;
2960 orig_alen
= align_alen
= *lenp
;
2961 orig_end
= orig_off
+ orig_alen
;
2964 * If this request overlaps an existing extent, then don't
2965 * attempt to perform any additional alignment.
2967 if (!delay
&& !eof
&&
2968 (orig_off
>= gotp
->br_startoff
) &&
2969 (orig_end
<= gotp
->br_startoff
+ gotp
->br_blockcount
)) {
2974 * If the file offset is unaligned vs. the extent size
2975 * we need to align it. This will be possible unless
2976 * the file was previously written with a kernel that didn't
2977 * perform this alignment, or if a truncate shot us in the
2980 div_u64_rem(orig_off
, extsz
, &temp
);
2986 /* Same adjustment for the end of the requested area. */
2987 temp
= (align_alen
% extsz
);
2989 align_alen
+= extsz
- temp
;
2992 * For large extent hint sizes, the aligned extent might be larger than
2993 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2994 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2995 * allocation loops handle short allocation just fine, so it is safe to
2996 * do this. We only want to do it when we are forced to, though, because
2997 * it means more allocation operations are required.
2999 while (align_alen
> XFS_MAX_BMBT_EXTLEN
)
3000 align_alen
-= extsz
;
3001 ASSERT(align_alen
<= XFS_MAX_BMBT_EXTLEN
);
3004 * If the previous block overlaps with this proposed allocation
3005 * then move the start forward without adjusting the length.
3007 if (prevp
->br_startoff
!= NULLFILEOFF
) {
3008 if (prevp
->br_startblock
== HOLESTARTBLOCK
)
3009 prevo
= prevp
->br_startoff
;
3011 prevo
= prevp
->br_startoff
+ prevp
->br_blockcount
;
3014 if (align_off
!= orig_off
&& align_off
< prevo
)
3017 * If the next block overlaps with this proposed allocation
3018 * then move the start back without adjusting the length,
3019 * but not before offset 0.
3020 * This may of course make the start overlap previous block,
3021 * and if we hit the offset 0 limit then the next block
3022 * can still overlap too.
3024 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
) {
3025 if ((delay
&& gotp
->br_startblock
== HOLESTARTBLOCK
) ||
3026 (!delay
&& gotp
->br_startblock
== DELAYSTARTBLOCK
))
3027 nexto
= gotp
->br_startoff
+ gotp
->br_blockcount
;
3029 nexto
= gotp
->br_startoff
;
3031 nexto
= NULLFILEOFF
;
3033 align_off
+ align_alen
!= orig_end
&&
3034 align_off
+ align_alen
> nexto
)
3035 align_off
= nexto
> align_alen
? nexto
- align_alen
: 0;
3037 * If we're now overlapping the next or previous extent that
3038 * means we can't fit an extsz piece in this hole. Just move
3039 * the start forward to the first valid spot and set
3040 * the length so we hit the end.
3042 if (align_off
!= orig_off
&& align_off
< prevo
)
3044 if (align_off
+ align_alen
!= orig_end
&&
3045 align_off
+ align_alen
> nexto
&&
3046 nexto
!= NULLFILEOFF
) {
3047 ASSERT(nexto
> prevo
);
3048 align_alen
= nexto
- align_off
;
3052 * If realtime, and the result isn't a multiple of the realtime
3053 * extent size we need to remove blocks until it is.
3055 if (rt
&& (temp
= xfs_extlen_to_rtxmod(mp
, align_alen
))) {
3057 * We're not covering the original request, or
3058 * we won't be able to once we fix the length.
3060 if (orig_off
< align_off
||
3061 orig_end
> align_off
+ align_alen
||
3062 align_alen
- temp
< orig_alen
)
3065 * Try to fix it by moving the start up.
3067 if (align_off
+ temp
<= orig_off
) {
3072 * Try to fix it by moving the end in.
3074 else if (align_off
+ align_alen
- temp
>= orig_end
)
3077 * Set the start to the minimum then trim the length.
3080 align_alen
-= orig_off
- align_off
;
3081 align_off
= orig_off
;
3082 align_alen
-= xfs_extlen_to_rtxmod(mp
, align_alen
);
3085 * Result doesn't cover the request, fail it.
3087 if (orig_off
< align_off
|| orig_end
> align_off
+ align_alen
)
3090 ASSERT(orig_off
>= align_off
);
3091 /* see XFS_BMBT_MAX_EXTLEN handling above */
3092 ASSERT(orig_end
<= align_off
+ align_alen
||
3093 align_alen
+ extsz
> XFS_MAX_BMBT_EXTLEN
);
3097 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
)
3098 ASSERT(align_off
+ align_alen
<= gotp
->br_startoff
);
3099 if (prevp
->br_startoff
!= NULLFILEOFF
)
3100 ASSERT(align_off
>= prevp
->br_startoff
+ prevp
->br_blockcount
);
3108 #define XFS_ALLOC_GAP_UNITS 4
3110 /* returns true if ap->blkno was modified */
3113 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3115 xfs_fsblock_t adjust
; /* adjustment to block numbers */
3116 xfs_mount_t
*mp
; /* mount point structure */
3117 int rt
; /* true if inode is realtime */
3119 #define ISVALID(x,y) \
3121 (x) < mp->m_sb.sb_rblocks : \
3122 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3123 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3124 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3126 mp
= ap
->ip
->i_mount
;
3127 rt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3128 (ap
->datatype
& XFS_ALLOC_USERDATA
);
3130 * If allocating at eof, and there's a previous real block,
3131 * try to use its last block as our starting point.
3133 if (ap
->eof
&& ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3134 !isnullstartblock(ap
->prev
.br_startblock
) &&
3135 ISVALID(ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
,
3136 ap
->prev
.br_startblock
)) {
3137 ap
->blkno
= ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
;
3139 * Adjust for the gap between prevp and us.
3141 adjust
= ap
->offset
-
3142 (ap
->prev
.br_startoff
+ ap
->prev
.br_blockcount
);
3144 ISVALID(ap
->blkno
+ adjust
, ap
->prev
.br_startblock
))
3145 ap
->blkno
+= adjust
;
3149 * If not at eof, then compare the two neighbor blocks.
3150 * Figure out whether either one gives us a good starting point,
3151 * and pick the better one.
3154 xfs_fsblock_t gotbno
; /* right side block number */
3155 xfs_fsblock_t gotdiff
=0; /* right side difference */
3156 xfs_fsblock_t prevbno
; /* left side block number */
3157 xfs_fsblock_t prevdiff
=0; /* left side difference */
3160 * If there's a previous (left) block, select a requested
3161 * start block based on it.
3163 if (ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3164 !isnullstartblock(ap
->prev
.br_startblock
) &&
3165 (prevbno
= ap
->prev
.br_startblock
+
3166 ap
->prev
.br_blockcount
) &&
3167 ISVALID(prevbno
, ap
->prev
.br_startblock
)) {
3169 * Calculate gap to end of previous block.
3171 adjust
= prevdiff
= ap
->offset
-
3172 (ap
->prev
.br_startoff
+
3173 ap
->prev
.br_blockcount
);
3175 * Figure the startblock based on the previous block's
3176 * end and the gap size.
3178 * If the gap is large relative to the piece we're
3179 * allocating, or using it gives us an invalid block
3180 * number, then just use the end of the previous block.
3182 if (prevdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3183 ISVALID(prevbno
+ prevdiff
,
3184 ap
->prev
.br_startblock
))
3190 * No previous block or can't follow it, just default.
3193 prevbno
= NULLFSBLOCK
;
3195 * If there's a following (right) block, select a requested
3196 * start block based on it.
3198 if (!isnullstartblock(ap
->got
.br_startblock
)) {
3200 * Calculate gap to start of next block.
3202 adjust
= gotdiff
= ap
->got
.br_startoff
- ap
->offset
;
3204 * Figure the startblock based on the next block's
3205 * start and the gap size.
3207 gotbno
= ap
->got
.br_startblock
;
3210 * If the gap is large relative to the piece we're
3211 * allocating, or using it gives us an invalid block
3212 * number, then just use the start of the next block
3213 * offset by our length.
3215 if (gotdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3216 ISVALID(gotbno
- gotdiff
, gotbno
))
3218 else if (ISVALID(gotbno
- ap
->length
, gotbno
)) {
3219 gotbno
-= ap
->length
;
3220 gotdiff
+= adjust
- ap
->length
;
3225 * No next block, just default.
3228 gotbno
= NULLFSBLOCK
;
3230 * If both valid, pick the better one, else the only good
3231 * one, else ap->blkno is already set (to 0 or the inode block).
3233 if (prevbno
!= NULLFSBLOCK
&& gotbno
!= NULLFSBLOCK
) {
3234 ap
->blkno
= prevdiff
<= gotdiff
? prevbno
: gotbno
;
3237 if (prevbno
!= NULLFSBLOCK
) {
3238 ap
->blkno
= prevbno
;
3241 if (gotbno
!= NULLFSBLOCK
) {
3251 xfs_bmap_longest_free_extent(
3252 struct xfs_perag
*pag
,
3253 struct xfs_trans
*tp
,
3256 xfs_extlen_t longest
;
3259 if (!xfs_perag_initialised_agf(pag
)) {
3260 error
= xfs_alloc_read_agf(pag
, tp
, XFS_ALLOC_FLAG_TRYLOCK
,
3266 longest
= xfs_alloc_longest_free_extent(pag
,
3267 xfs_alloc_min_freelist(pag
->pag_mount
, pag
),
3268 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
));
3269 if (*blen
< longest
)
3276 xfs_bmap_select_minlen(
3277 struct xfs_bmalloca
*ap
,
3278 struct xfs_alloc_arg
*args
,
3283 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3284 * possible that there is enough contiguous free space for this request.
3286 if (blen
< ap
->minlen
)
3290 * If the best seen length is less than the request length,
3291 * use the best as the minimum, otherwise we've got the maxlen we
3294 if (blen
< args
->maxlen
)
3296 return args
->maxlen
;
3300 xfs_bmap_btalloc_select_lengths(
3301 struct xfs_bmalloca
*ap
,
3302 struct xfs_alloc_arg
*args
,
3305 struct xfs_mount
*mp
= args
->mp
;
3306 struct xfs_perag
*pag
;
3307 xfs_agnumber_t agno
, startag
;
3310 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3311 args
->total
= ap
->minlen
;
3312 args
->minlen
= ap
->minlen
;
3316 args
->total
= ap
->total
;
3317 startag
= XFS_FSB_TO_AGNO(mp
, ap
->blkno
);
3318 if (startag
== NULLAGNUMBER
)
3322 for_each_perag_wrap(mp
, startag
, agno
, pag
) {
3323 error
= xfs_bmap_longest_free_extent(pag
, args
->tp
, blen
);
3324 if (error
&& error
!= -EAGAIN
)
3327 if (*blen
>= args
->maxlen
)
3331 xfs_perag_rele(pag
);
3333 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, *blen
);
3337 /* Update all inode and quota accounting for the allocation we just did. */
3339 xfs_bmap_alloc_account(
3340 struct xfs_bmalloca
*ap
)
3342 bool isrt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3343 !(ap
->flags
& XFS_BMAPI_ATTRFORK
);
3346 if (ap
->flags
& XFS_BMAPI_COWFORK
) {
3348 * COW fork blocks are in-core only and thus are treated as
3349 * in-core quota reservation (like delalloc blocks) even when
3350 * converted to real blocks. The quota reservation is not
3351 * accounted to disk until blocks are remapped to the data
3352 * fork. So if these blocks were previously delalloc, we
3353 * already have quota reservation and there's nothing to do
3357 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3362 * Otherwise, we've allocated blocks in a hole. The transaction
3363 * has acquired in-core quota reservation for this extent.
3364 * Rather than account these as real blocks, however, we reduce
3365 * the transaction quota reservation based on the allocation.
3366 * This essentially transfers the transaction quota reservation
3367 * to that of a delalloc extent.
3369 ap
->ip
->i_delayed_blks
+= ap
->length
;
3370 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, isrt
?
3371 XFS_TRANS_DQ_RES_RTBLKS
: XFS_TRANS_DQ_RES_BLKS
,
3376 /* data/attr fork only */
3377 ap
->ip
->i_nblocks
+= ap
->length
;
3378 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3380 ap
->ip
->i_delayed_blks
-= ap
->length
;
3381 xfs_mod_delalloc(ap
->ip
, -(int64_t)ap
->length
, 0);
3382 fld
= isrt
? XFS_TRANS_DQ_DELRTBCOUNT
: XFS_TRANS_DQ_DELBCOUNT
;
3384 fld
= isrt
? XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
3387 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
, fld
, ap
->length
);
3391 xfs_bmap_compute_alignments(
3392 struct xfs_bmalloca
*ap
,
3393 struct xfs_alloc_arg
*args
)
3395 struct xfs_mount
*mp
= args
->mp
;
3396 xfs_extlen_t align
= 0; /* minimum allocation alignment */
3397 int stripe_align
= 0;
3399 /* stripe alignment for allocation is determined by mount parameters */
3400 if (mp
->m_swidth
&& xfs_has_swalloc(mp
))
3401 stripe_align
= mp
->m_swidth
;
3402 else if (mp
->m_dalign
)
3403 stripe_align
= mp
->m_dalign
;
3405 if (ap
->flags
& XFS_BMAPI_COWFORK
)
3406 align
= xfs_get_cowextsz_hint(ap
->ip
);
3407 else if (ap
->datatype
& XFS_ALLOC_USERDATA
)
3408 align
= xfs_get_extsz_hint(ap
->ip
);
3410 if (xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
, align
, 0,
3411 ap
->eof
, 0, ap
->conv
, &ap
->offset
,
3417 /* apply extent size hints if obtained earlier */
3420 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3422 args
->mod
= args
->prod
- args
->mod
;
3423 } else if (mp
->m_sb
.sb_blocksize
>= PAGE_SIZE
) {
3427 args
->prod
= PAGE_SIZE
>> mp
->m_sb
.sb_blocklog
;
3428 div_u64_rem(ap
->offset
, args
->prod
, &args
->mod
);
3430 args
->mod
= args
->prod
- args
->mod
;
3433 return stripe_align
;
3437 xfs_bmap_process_allocated_extent(
3438 struct xfs_bmalloca
*ap
,
3439 struct xfs_alloc_arg
*args
,
3440 xfs_fileoff_t orig_offset
,
3441 xfs_extlen_t orig_length
)
3443 ap
->blkno
= args
->fsbno
;
3444 ap
->length
= args
->len
;
3446 * If the extent size hint is active, we tried to round the
3447 * caller's allocation request offset down to extsz and the
3448 * length up to another extsz boundary. If we found a free
3449 * extent we mapped it in starting at this new offset. If the
3450 * newly mapped space isn't long enough to cover any of the
3451 * range of offsets that was originally requested, move the
3452 * mapping up so that we can fill as much of the caller's
3453 * original request as possible. Free space is apparently
3454 * very fragmented so we're unlikely to be able to satisfy the
3457 if (ap
->length
<= orig_length
)
3458 ap
->offset
= orig_offset
;
3459 else if (ap
->offset
+ ap
->length
< orig_offset
+ orig_length
)
3460 ap
->offset
= orig_offset
+ orig_length
- ap
->length
;
3461 xfs_bmap_alloc_account(ap
);
3466 xfs_bmap_exact_minlen_extent_alloc(
3467 struct xfs_bmalloca
*ap
)
3469 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3470 struct xfs_alloc_arg args
= { .tp
= ap
->tp
, .mp
= mp
};
3471 xfs_fileoff_t orig_offset
;
3472 xfs_extlen_t orig_length
;
3477 if (ap
->minlen
!= 1) {
3478 ap
->blkno
= NULLFSBLOCK
;
3483 orig_offset
= ap
->offset
;
3484 orig_length
= ap
->length
;
3486 args
.alloc_minlen_only
= 1;
3488 xfs_bmap_compute_alignments(ap
, &args
);
3491 * Unlike the longest extent available in an AG, we don't track
3492 * the length of an AG's shortest extent.
3493 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3494 * hence we can afford to start traversing from the 0th AG since
3495 * we need not be concerned about a drop in performance in
3496 * "debug only" code paths.
3498 ap
->blkno
= XFS_AGB_TO_FSB(mp
, 0, 0);
3500 args
.oinfo
= XFS_RMAP_OINFO_SKIP_UPDATE
;
3501 args
.minlen
= args
.maxlen
= ap
->minlen
;
3502 args
.total
= ap
->total
;
3505 args
.minalignslop
= 0;
3507 args
.minleft
= ap
->minleft
;
3508 args
.wasdel
= ap
->wasdel
;
3509 args
.resv
= XFS_AG_RESV_NONE
;
3510 args
.datatype
= ap
->datatype
;
3512 error
= xfs_alloc_vextent_first_ag(&args
, ap
->blkno
);
3516 if (args
.fsbno
!= NULLFSBLOCK
) {
3517 xfs_bmap_process_allocated_extent(ap
, &args
, orig_offset
,
3520 ap
->blkno
= NULLFSBLOCK
;
3528 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3533 * If we are not low on available data blocks and we are allocating at
3534 * EOF, optimise allocation for contiguous file extension and/or stripe
3535 * alignment of the new extent.
3537 * NOTE: ap->aeof is only set if the allocation length is >= the
3538 * stripe unit and the allocation offset is at the end of file.
3541 xfs_bmap_btalloc_at_eof(
3542 struct xfs_bmalloca
*ap
,
3543 struct xfs_alloc_arg
*args
,
3548 struct xfs_mount
*mp
= args
->mp
;
3549 struct xfs_perag
*caller_pag
= args
->pag
;
3553 * If there are already extents in the file, try an exact EOF block
3554 * allocation to extend the file as a contiguous extent. If that fails,
3555 * or it's the first allocation in a file, just try for a stripe aligned
3559 xfs_extlen_t nextminlen
= 0;
3562 * Compute the minlen+alignment for the next case. Set slop so
3563 * that the value of minlen+alignment+slop doesn't go up between
3566 args
->alignment
= 1;
3567 if (blen
> stripe_align
&& blen
<= args
->maxlen
)
3568 nextminlen
= blen
- stripe_align
;
3570 nextminlen
= args
->minlen
;
3571 if (nextminlen
+ stripe_align
> args
->minlen
+ 1)
3572 args
->minalignslop
= nextminlen
+ stripe_align
-
3575 args
->minalignslop
= 0;
3578 args
->pag
= xfs_perag_get(mp
, XFS_FSB_TO_AGNO(mp
, ap
->blkno
));
3579 error
= xfs_alloc_vextent_exact_bno(args
, ap
->blkno
);
3581 xfs_perag_put(args
->pag
);
3587 if (args
->fsbno
!= NULLFSBLOCK
)
3590 * Exact allocation failed. Reset to try an aligned allocation
3591 * according to the original allocation specification.
3593 args
->alignment
= stripe_align
;
3594 args
->minlen
= nextminlen
;
3595 args
->minalignslop
= 0;
3598 * Adjust minlen to try and preserve alignment if we
3599 * can't guarantee an aligned maxlen extent.
3601 args
->alignment
= stripe_align
;
3602 if (blen
> args
->alignment
&&
3603 blen
<= args
->maxlen
+ args
->alignment
)
3604 args
->minlen
= blen
- args
->alignment
;
3605 args
->minalignslop
= 0;
3609 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3612 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3613 ASSERT(args
->pag
== NULL
);
3614 args
->pag
= caller_pag
;
3619 if (args
->fsbno
!= NULLFSBLOCK
)
3623 * Allocation failed, so turn return the allocation args to their
3624 * original non-aligned state so the caller can proceed on allocation
3625 * failure as if this function was never called.
3627 args
->alignment
= 1;
3632 * We have failed multiple allocation attempts so now are in a low space
3633 * allocation situation. Try a locality first full filesystem minimum length
3634 * allocation whilst still maintaining necessary total block reservation
3637 * If that fails, we are now critically low on space, so perform a last resort
3638 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3639 * filesystem free space scan. We also indicate to future allocations in this
3640 * transaction that we are critically low on space so they don't waste time on
3641 * allocation modes that are unlikely to succeed.
3644 xfs_bmap_btalloc_low_space(
3645 struct xfs_bmalloca
*ap
,
3646 struct xfs_alloc_arg
*args
)
3650 if (args
->minlen
> ap
->minlen
) {
3651 args
->minlen
= ap
->minlen
;
3652 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3653 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3657 /* Last ditch attempt before failure is declared. */
3658 args
->total
= ap
->minlen
;
3659 error
= xfs_alloc_vextent_first_ag(args
, 0);
3662 ap
->tp
->t_flags
|= XFS_TRANS_LOWMODE
;
3667 xfs_bmap_btalloc_filestreams(
3668 struct xfs_bmalloca
*ap
,
3669 struct xfs_alloc_arg
*args
,
3672 xfs_extlen_t blen
= 0;
3676 error
= xfs_filestream_select_ag(ap
, args
, &blen
);
3682 * If we are in low space mode, then optimal allocation will fail so
3683 * prepare for minimal allocation and jump to the low space algorithm
3686 if (ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
) {
3687 args
->minlen
= ap
->minlen
;
3688 ASSERT(args
->fsbno
== NULLFSBLOCK
);
3692 args
->minlen
= xfs_bmap_select_minlen(ap
, args
, blen
);
3694 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3697 if (!error
&& args
->fsbno
== NULLFSBLOCK
)
3698 error
= xfs_alloc_vextent_near_bno(args
, ap
->blkno
);
3702 * We are now done with the perag reference for the filestreams
3703 * association provided by xfs_filestream_select_ag(). Release it now as
3704 * we've either succeeded, had a fatal error or we are out of space and
3705 * need to do a full filesystem scan for free space which will take it's
3708 xfs_perag_rele(args
->pag
);
3710 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3713 return xfs_bmap_btalloc_low_space(ap
, args
);
3717 xfs_bmap_btalloc_best_length(
3718 struct xfs_bmalloca
*ap
,
3719 struct xfs_alloc_arg
*args
,
3722 xfs_extlen_t blen
= 0;
3725 ap
->blkno
= XFS_INO_TO_FSB(args
->mp
, ap
->ip
->i_ino
);
3726 xfs_bmap_adjacent(ap
);
3729 * Search for an allocation group with a single extent large enough for
3730 * the request. If one isn't found, then adjust the minimum allocation
3731 * size to the largest space found.
3733 error
= xfs_bmap_btalloc_select_lengths(ap
, args
, &blen
);
3738 * Don't attempt optimal EOF allocation if previous allocations barely
3739 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3740 * optimal or even aligned allocations in this case, so don't waste time
3743 if (ap
->aeof
&& !(ap
->tp
->t_flags
& XFS_TRANS_LOWMODE
)) {
3744 error
= xfs_bmap_btalloc_at_eof(ap
, args
, blen
, stripe_align
,
3746 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3750 error
= xfs_alloc_vextent_start_ag(args
, ap
->blkno
);
3751 if (error
|| args
->fsbno
!= NULLFSBLOCK
)
3754 return xfs_bmap_btalloc_low_space(ap
, args
);
3759 struct xfs_bmalloca
*ap
)
3761 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3762 struct xfs_alloc_arg args
= {
3765 .fsbno
= NULLFSBLOCK
,
3766 .oinfo
= XFS_RMAP_OINFO_SKIP_UPDATE
,
3767 .minleft
= ap
->minleft
,
3768 .wasdel
= ap
->wasdel
,
3769 .resv
= XFS_AG_RESV_NONE
,
3770 .datatype
= ap
->datatype
,
3774 xfs_fileoff_t orig_offset
;
3775 xfs_extlen_t orig_length
;
3780 orig_offset
= ap
->offset
;
3781 orig_length
= ap
->length
;
3783 stripe_align
= xfs_bmap_compute_alignments(ap
, &args
);
3785 /* Trim the allocation back to the maximum an AG can fit. */
3786 args
.maxlen
= min(ap
->length
, mp
->m_ag_max_usable
);
3788 if ((ap
->datatype
& XFS_ALLOC_USERDATA
) &&
3789 xfs_inode_is_filestream(ap
->ip
))
3790 error
= xfs_bmap_btalloc_filestreams(ap
, &args
, stripe_align
);
3792 error
= xfs_bmap_btalloc_best_length(ap
, &args
, stripe_align
);
3796 if (args
.fsbno
!= NULLFSBLOCK
) {
3797 xfs_bmap_process_allocated_extent(ap
, &args
, orig_offset
,
3800 ap
->blkno
= NULLFSBLOCK
;
3806 /* Trim extent to fit a logical block range. */
3809 struct xfs_bmbt_irec
*irec
,
3813 xfs_fileoff_t distance
;
3814 xfs_fileoff_t end
= bno
+ len
;
3816 if (irec
->br_startoff
+ irec
->br_blockcount
<= bno
||
3817 irec
->br_startoff
>= end
) {
3818 irec
->br_blockcount
= 0;
3822 if (irec
->br_startoff
< bno
) {
3823 distance
= bno
- irec
->br_startoff
;
3824 if (isnullstartblock(irec
->br_startblock
))
3825 irec
->br_startblock
= DELAYSTARTBLOCK
;
3826 if (irec
->br_startblock
!= DELAYSTARTBLOCK
&&
3827 irec
->br_startblock
!= HOLESTARTBLOCK
)
3828 irec
->br_startblock
+= distance
;
3829 irec
->br_startoff
+= distance
;
3830 irec
->br_blockcount
-= distance
;
3833 if (end
< irec
->br_startoff
+ irec
->br_blockcount
) {
3834 distance
= irec
->br_startoff
+ irec
->br_blockcount
- end
;
3835 irec
->br_blockcount
-= distance
;
3840 * Trim the returned map to the required bounds
3844 struct xfs_bmbt_irec
*mval
,
3845 struct xfs_bmbt_irec
*got
,
3853 if ((flags
& XFS_BMAPI_ENTIRE
) ||
3854 got
->br_startoff
+ got
->br_blockcount
<= obno
) {
3856 if (isnullstartblock(got
->br_startblock
))
3857 mval
->br_startblock
= DELAYSTARTBLOCK
;
3863 ASSERT((*bno
>= obno
) || (n
== 0));
3865 mval
->br_startoff
= *bno
;
3866 if (isnullstartblock(got
->br_startblock
))
3867 mval
->br_startblock
= DELAYSTARTBLOCK
;
3869 mval
->br_startblock
= got
->br_startblock
+
3870 (*bno
- got
->br_startoff
);
3872 * Return the minimum of what we got and what we asked for for
3873 * the length. We can use the len variable here because it is
3874 * modified below and we could have been there before coming
3875 * here if the first part of the allocation didn't overlap what
3878 mval
->br_blockcount
= XFS_FILBLKS_MIN(end
- *bno
,
3879 got
->br_blockcount
- (*bno
- got
->br_startoff
));
3880 mval
->br_state
= got
->br_state
;
3881 ASSERT(mval
->br_blockcount
<= len
);
3886 * Update and validate the extent map to return
3889 xfs_bmapi_update_map(
3890 struct xfs_bmbt_irec
**map
,
3898 xfs_bmbt_irec_t
*mval
= *map
;
3900 ASSERT((flags
& XFS_BMAPI_ENTIRE
) ||
3901 ((mval
->br_startoff
+ mval
->br_blockcount
) <= end
));
3902 ASSERT((flags
& XFS_BMAPI_ENTIRE
) || (mval
->br_blockcount
<= *len
) ||
3903 (mval
->br_startoff
< obno
));
3905 *bno
= mval
->br_startoff
+ mval
->br_blockcount
;
3907 if (*n
> 0 && mval
->br_startoff
== mval
[-1].br_startoff
) {
3908 /* update previous map with new information */
3909 ASSERT(mval
->br_startblock
== mval
[-1].br_startblock
);
3910 ASSERT(mval
->br_blockcount
> mval
[-1].br_blockcount
);
3911 ASSERT(mval
->br_state
== mval
[-1].br_state
);
3912 mval
[-1].br_blockcount
= mval
->br_blockcount
;
3913 mval
[-1].br_state
= mval
->br_state
;
3914 } else if (*n
> 0 && mval
->br_startblock
!= DELAYSTARTBLOCK
&&
3915 mval
[-1].br_startblock
!= DELAYSTARTBLOCK
&&
3916 mval
[-1].br_startblock
!= HOLESTARTBLOCK
&&
3917 mval
->br_startblock
== mval
[-1].br_startblock
+
3918 mval
[-1].br_blockcount
&&
3919 mval
[-1].br_state
== mval
->br_state
) {
3920 ASSERT(mval
->br_startoff
==
3921 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
);
3922 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3923 } else if (*n
> 0 &&
3924 mval
->br_startblock
== DELAYSTARTBLOCK
&&
3925 mval
[-1].br_startblock
== DELAYSTARTBLOCK
&&
3926 mval
->br_startoff
==
3927 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
) {
3928 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3929 mval
[-1].br_state
= mval
->br_state
;
3930 } else if (!((*n
== 0) &&
3931 ((mval
->br_startoff
+ mval
->br_blockcount
) <=
3940 * Map file blocks to filesystem blocks without allocation.
3944 struct xfs_inode
*ip
,
3947 struct xfs_bmbt_irec
*mval
,
3951 struct xfs_mount
*mp
= ip
->i_mount
;
3952 int whichfork
= xfs_bmapi_whichfork(flags
);
3953 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
3954 struct xfs_bmbt_irec got
;
3957 struct xfs_iext_cursor icur
;
3963 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_ENTIRE
)));
3964 xfs_assert_ilocked(ip
, XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
);
3966 if (WARN_ON_ONCE(!ifp
)) {
3967 xfs_bmap_mark_sick(ip
, whichfork
);
3968 return -EFSCORRUPTED
;
3971 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
3972 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
3973 xfs_bmap_mark_sick(ip
, whichfork
);
3974 return -EFSCORRUPTED
;
3977 if (xfs_is_shutdown(mp
))
3980 XFS_STATS_INC(mp
, xs_blk_mapr
);
3982 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
3986 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
))
3991 while (bno
< end
&& n
< *nmap
) {
3992 /* Reading past eof, act as though there's a hole up to end. */
3994 got
.br_startoff
= end
;
3995 if (got
.br_startoff
> bno
) {
3996 /* Reading in a hole. */
3997 mval
->br_startoff
= bno
;
3998 mval
->br_startblock
= HOLESTARTBLOCK
;
3999 mval
->br_blockcount
=
4000 XFS_FILBLKS_MIN(len
, got
.br_startoff
- bno
);
4001 mval
->br_state
= XFS_EXT_NORM
;
4002 bno
+= mval
->br_blockcount
;
4003 len
-= mval
->br_blockcount
;
4009 /* set up the extent map to return. */
4010 xfs_bmapi_trim_map(mval
, &got
, &bno
, len
, obno
, end
, n
, flags
);
4011 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4013 /* If we're done, stop now. */
4014 if (bno
>= end
|| n
>= *nmap
)
4017 /* Else go on to the next record. */
4018 if (!xfs_iext_next_extent(ifp
, &icur
, &got
))
4026 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4027 * global pool and the extent inserted into the inode in-core extent tree.
4029 * On entry, got refers to the first extent beyond the offset of the extent to
4030 * allocate or eof is specified if no such extent exists. On return, got refers
4031 * to the extent record that was inserted to the inode fork.
4033 * Note that the allocated extent may have been merged with contiguous extents
4034 * during insertion into the inode fork. Thus, got does not reflect the current
4035 * state of the inode fork on return. If necessary, the caller can use lastx to
4036 * look up the updated record in the inode fork.
4039 xfs_bmapi_reserve_delalloc(
4040 struct xfs_inode
*ip
,
4044 xfs_filblks_t prealloc
,
4045 struct xfs_bmbt_irec
*got
,
4046 struct xfs_iext_cursor
*icur
,
4049 struct xfs_mount
*mp
= ip
->i_mount
;
4050 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4052 xfs_extlen_t indlen
;
4056 bool use_cowextszhint
=
4057 whichfork
== XFS_COW_FORK
&& !prealloc
;
4061 * Cap the alloc length. Keep track of prealloc so we know whether to
4062 * tag the inode before we return.
4065 alen
= XFS_FILBLKS_MIN(len
+ prealloc
, XFS_MAX_BMBT_EXTLEN
);
4067 alen
= XFS_FILBLKS_MIN(alen
, got
->br_startoff
- aoff
);
4068 if (prealloc
&& alen
>= len
)
4069 prealloc
= alen
- len
;
4072 * If we're targetting the COW fork but aren't creating a speculative
4073 * posteof preallocation, try to expand the reservation to align with
4074 * the COW extent size hint if there's sufficient free space.
4076 * Unlike the data fork, the CoW cancellation functions will free all
4077 * the reservations at inactivation, so we don't require that every
4078 * delalloc reservation have a dirty pagecache.
4080 if (use_cowextszhint
) {
4081 struct xfs_bmbt_irec prev
;
4082 xfs_extlen_t extsz
= xfs_get_cowextsz_hint(ip
);
4084 if (!xfs_iext_peek_prev_extent(ifp
, icur
, &prev
))
4085 prev
.br_startoff
= NULLFILEOFF
;
4087 error
= xfs_bmap_extsize_align(mp
, got
, &prev
, extsz
, 0, eof
,
4088 1, 0, &aoff
, &alen
);
4093 * Make a transaction-less quota reservation for delayed allocation
4094 * blocks. This number gets adjusted later. We return if we haven't
4095 * allocated blocks already inside this loop.
4097 error
= xfs_quota_reserve_blkres(ip
, alen
);
4102 * Split changing sb for alen and indlen since they could be coming
4103 * from different places.
4105 indlen
= (xfs_extlen_t
)xfs_bmap_worst_indlen(ip
, alen
);
4109 if (XFS_IS_REALTIME_INODE(ip
)) {
4110 error
= xfs_dec_frextents(mp
, xfs_rtb_to_rtx(mp
, alen
));
4112 goto out_unreserve_quota
;
4117 error
= xfs_dec_fdblocks(mp
, fdblocks
, false);
4119 goto out_unreserve_frextents
;
4121 ip
->i_delayed_blks
+= alen
;
4122 xfs_mod_delalloc(ip
, alen
, indlen
);
4124 got
->br_startoff
= aoff
;
4125 got
->br_startblock
= nullstartblock(indlen
);
4126 got
->br_blockcount
= alen
;
4127 got
->br_state
= XFS_EXT_NORM
;
4129 xfs_bmap_add_extent_hole_delay(ip
, whichfork
, icur
, got
);
4132 * Tag the inode if blocks were preallocated. Note that COW fork
4133 * preallocation can occur at the start or end of the extent, even when
4134 * prealloc == 0, so we must also check the aligned offset and length.
4136 if (whichfork
== XFS_DATA_FORK
&& prealloc
)
4137 xfs_inode_set_eofblocks_tag(ip
);
4138 if (whichfork
== XFS_COW_FORK
&& (prealloc
|| aoff
< off
|| alen
> len
))
4139 xfs_inode_set_cowblocks_tag(ip
);
4143 out_unreserve_frextents
:
4144 if (XFS_IS_REALTIME_INODE(ip
))
4145 xfs_add_frextents(mp
, xfs_rtb_to_rtx(mp
, alen
));
4146 out_unreserve_quota
:
4147 if (XFS_IS_QUOTA_ON(mp
))
4148 xfs_quota_unreserve_blkres(ip
, alen
);
4150 if (error
== -ENOSPC
|| error
== -EDQUOT
) {
4151 trace_xfs_delalloc_enospc(ip
, off
, len
);
4153 if (prealloc
|| use_cowextszhint
) {
4154 /* retry without any preallocation */
4155 use_cowextszhint
= false;
4164 xfs_bmap_alloc_userdata(
4165 struct xfs_bmalloca
*bma
)
4167 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
4168 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
4172 * Set the data type being allocated. For the data fork, the first data
4173 * in the file is treated differently to all other allocations. For the
4174 * attribute fork, we only need to ensure the allocated range is not on
4177 bma
->datatype
= XFS_ALLOC_NOBUSY
;
4178 if (whichfork
== XFS_DATA_FORK
|| whichfork
== XFS_COW_FORK
) {
4179 bma
->datatype
|= XFS_ALLOC_USERDATA
;
4180 if (bma
->offset
== 0)
4181 bma
->datatype
|= XFS_ALLOC_INITIAL_USER_DATA
;
4183 if (mp
->m_dalign
&& bma
->length
>= mp
->m_dalign
) {
4184 error
= xfs_bmap_isaeof(bma
, whichfork
);
4189 if (XFS_IS_REALTIME_INODE(bma
->ip
))
4190 return xfs_bmap_rtalloc(bma
);
4193 if (unlikely(XFS_TEST_ERROR(false, mp
,
4194 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT
)))
4195 return xfs_bmap_exact_minlen_extent_alloc(bma
);
4197 return xfs_bmap_btalloc(bma
);
4202 struct xfs_bmalloca
*bma
)
4204 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
4205 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
4206 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4209 ASSERT(bma
->length
> 0);
4210 ASSERT(bma
->length
<= XFS_MAX_BMBT_EXTLEN
);
4212 if (bma
->flags
& XFS_BMAPI_CONTIG
)
4213 bma
->minlen
= bma
->length
;
4217 if (bma
->flags
& XFS_BMAPI_METADATA
) {
4218 if (unlikely(XFS_TEST_ERROR(false, mp
,
4219 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT
)))
4220 error
= xfs_bmap_exact_minlen_extent_alloc(bma
);
4222 error
= xfs_bmap_btalloc(bma
);
4224 error
= xfs_bmap_alloc_userdata(bma
);
4228 if (bma
->blkno
== NULLFSBLOCK
)
4231 if (WARN_ON_ONCE(!xfs_valid_startblock(bma
->ip
, bma
->blkno
))) {
4232 xfs_bmap_mark_sick(bma
->ip
, whichfork
);
4233 return -EFSCORRUPTED
;
4236 if (bma
->flags
& XFS_BMAPI_ZERO
) {
4237 error
= xfs_zero_extent(bma
->ip
, bma
->blkno
, bma
->length
);
4242 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
)
4243 bma
->cur
= xfs_bmbt_init_cursor(mp
, bma
->tp
, bma
->ip
, whichfork
);
4245 * Bump the number of extents we've allocated
4250 if (bma
->cur
&& bma
->wasdel
)
4251 bma
->cur
->bc_flags
|= XFS_BTREE_BMBT_WASDEL
;
4253 bma
->got
.br_startoff
= bma
->offset
;
4254 bma
->got
.br_startblock
= bma
->blkno
;
4255 bma
->got
.br_blockcount
= bma
->length
;
4256 bma
->got
.br_state
= XFS_EXT_NORM
;
4258 if (bma
->flags
& XFS_BMAPI_PREALLOC
)
4259 bma
->got
.br_state
= XFS_EXT_UNWRITTEN
;
4262 error
= xfs_bmap_add_extent_delay_real(bma
, whichfork
);
4264 error
= xfs_bmap_add_extent_hole_real(bma
->tp
, bma
->ip
,
4265 whichfork
, &bma
->icur
, &bma
->cur
, &bma
->got
,
4266 &bma
->logflags
, bma
->flags
);
4271 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4272 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4273 * the neighbouring ones.
4275 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
4277 ASSERT(bma
->got
.br_startoff
<= bma
->offset
);
4278 ASSERT(bma
->got
.br_startoff
+ bma
->got
.br_blockcount
>=
4279 bma
->offset
+ bma
->length
);
4280 ASSERT(bma
->got
.br_state
== XFS_EXT_NORM
||
4281 bma
->got
.br_state
== XFS_EXT_UNWRITTEN
);
4286 xfs_bmapi_convert_unwritten(
4287 struct xfs_bmalloca
*bma
,
4288 struct xfs_bmbt_irec
*mval
,
4292 int whichfork
= xfs_bmapi_whichfork(flags
);
4293 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4294 int tmp_logflags
= 0;
4297 /* check if we need to do unwritten->real conversion */
4298 if (mval
->br_state
== XFS_EXT_UNWRITTEN
&&
4299 (flags
& XFS_BMAPI_PREALLOC
))
4302 /* check if we need to do real->unwritten conversion */
4303 if (mval
->br_state
== XFS_EXT_NORM
&&
4304 (flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
)) !=
4305 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
))
4309 * Modify (by adding) the state flag, if writing.
4311 ASSERT(mval
->br_blockcount
<= len
);
4312 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
&& !bma
->cur
) {
4313 bma
->cur
= xfs_bmbt_init_cursor(bma
->ip
->i_mount
, bma
->tp
,
4314 bma
->ip
, whichfork
);
4316 mval
->br_state
= (mval
->br_state
== XFS_EXT_UNWRITTEN
)
4317 ? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
4320 * Before insertion into the bmbt, zero the range being converted
4323 if (flags
& XFS_BMAPI_ZERO
) {
4324 error
= xfs_zero_extent(bma
->ip
, mval
->br_startblock
,
4325 mval
->br_blockcount
);
4330 error
= xfs_bmap_add_extent_unwritten_real(bma
->tp
, bma
->ip
, whichfork
,
4331 &bma
->icur
, &bma
->cur
, mval
, &tmp_logflags
);
4333 * Log the inode core unconditionally in the unwritten extent conversion
4334 * path because the conversion might not have done so (e.g., if the
4335 * extent count hasn't changed). We need to make sure the inode is dirty
4336 * in the transaction for the sake of fsync(), even if nothing has
4337 * changed, because fsync() will not force the log for this transaction
4338 * unless it sees the inode pinned.
4340 * Note: If we're only converting cow fork extents, there aren't
4341 * any on-disk updates to make, so we don't need to log anything.
4343 if (whichfork
!= XFS_COW_FORK
)
4344 bma
->logflags
|= tmp_logflags
| XFS_ILOG_CORE
;
4349 * Update our extent pointer, given that
4350 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4351 * of the neighbouring ones.
4353 xfs_iext_get_extent(ifp
, &bma
->icur
, &bma
->got
);
4356 * We may have combined previously unwritten space with written space,
4357 * so generate another request.
4359 if (mval
->br_blockcount
< len
)
4366 struct xfs_trans
*tp
,
4367 struct xfs_inode
*ip
,
4370 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, fork
);
4372 if (tp
&& tp
->t_highest_agno
!= NULLAGNUMBER
)
4374 if (ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4376 return be16_to_cpu(ifp
->if_broot
->bb_level
) + 1;
4380 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4381 * a case where the data is changed, there's an error, and it's not logged so we
4382 * don't shutdown when we should. Don't bother logging extents/btree changes if
4383 * we converted to the other format.
4387 struct xfs_bmalloca
*bma
,
4391 struct xfs_ifork
*ifp
= xfs_ifork_ptr(bma
->ip
, whichfork
);
4393 if ((bma
->logflags
& xfs_ilog_fext(whichfork
)) &&
4394 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
4395 bma
->logflags
&= ~xfs_ilog_fext(whichfork
);
4396 else if ((bma
->logflags
& xfs_ilog_fbroot(whichfork
)) &&
4397 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
4398 bma
->logflags
&= ~xfs_ilog_fbroot(whichfork
);
4401 xfs_trans_log_inode(bma
->tp
, bma
->ip
, bma
->logflags
);
4403 xfs_btree_del_cursor(bma
->cur
, error
);
4407 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4408 * extent state if necessary. Details behaviour is controlled by the flags
4409 * parameter. Only allocates blocks from a single allocation group, to avoid
4412 * Returns 0 on success and places the extent mappings in mval. nmaps is used
4413 * as an input/output parameter where the caller specifies the maximum number
4414 * of mappings that may be returned and xfs_bmapi_write passes back the number
4415 * of mappings (including existing mappings) it found.
4417 * Returns a negative error code on failure, including -ENOSPC when it could not
4418 * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4419 * delalloc range, but those blocks were before the passed in range.
4423 struct xfs_trans
*tp
, /* transaction pointer */
4424 struct xfs_inode
*ip
, /* incore inode */
4425 xfs_fileoff_t bno
, /* starting file offs. mapped */
4426 xfs_filblks_t len
, /* length to map in file */
4427 uint32_t flags
, /* XFS_BMAPI_... */
4428 xfs_extlen_t total
, /* total blocks needed */
4429 struct xfs_bmbt_irec
*mval
, /* output: map values */
4430 int *nmap
) /* i/o: mval size/count */
4432 struct xfs_bmalloca bma
= {
4437 struct xfs_mount
*mp
= ip
->i_mount
;
4438 int whichfork
= xfs_bmapi_whichfork(flags
);
4439 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4440 xfs_fileoff_t end
; /* end of mapped file region */
4441 bool eof
= false; /* after the end of extents */
4442 int error
; /* error return */
4443 int n
; /* current extent index */
4444 xfs_fileoff_t obno
; /* old block number (offset) */
4447 xfs_fileoff_t orig_bno
; /* original block number value */
4448 int orig_flags
; /* original flags arg value */
4449 xfs_filblks_t orig_len
; /* original value of len arg */
4450 struct xfs_bmbt_irec
*orig_mval
; /* original value of mval */
4451 int orig_nmap
; /* original value of *nmap */
4461 ASSERT(*nmap
<= XFS_BMAP_MAX_NMAP
);
4464 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_LOCAL
);
4465 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4466 ASSERT(!(flags
& XFS_BMAPI_REMAP
));
4468 /* zeroing is for currently only for data extents, not metadata */
4469 ASSERT((flags
& (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
)) !=
4470 (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
));
4472 * we can allocate unwritten extents or pre-zero allocated blocks,
4473 * but it makes no sense to do both at once. This would result in
4474 * zeroing the unwritten extent twice, but it still being an
4475 * unwritten extent....
4477 ASSERT((flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
)) !=
4478 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
));
4480 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4481 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4482 xfs_bmap_mark_sick(ip
, whichfork
);
4483 return -EFSCORRUPTED
;
4486 if (xfs_is_shutdown(mp
))
4489 XFS_STATS_INC(mp
, xs_blk_mapw
);
4491 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4495 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &bma
.icur
, &bma
.got
))
4497 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4498 bma
.prev
.br_startoff
= NULLFILEOFF
;
4499 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4504 while (bno
< end
&& n
< *nmap
) {
4505 bool need_alloc
= false, wasdelay
= false;
4507 /* in hole or beyond EOF? */
4508 if (eof
|| bma
.got
.br_startoff
> bno
) {
4510 * CoW fork conversions should /never/ hit EOF or
4511 * holes. There should always be something for us
4514 ASSERT(!((flags
& XFS_BMAPI_CONVERT
) &&
4515 (flags
& XFS_BMAPI_COWFORK
)));
4518 } else if (isnullstartblock(bma
.got
.br_startblock
)) {
4523 * First, deal with the hole before the allocated space
4524 * that we found, if any.
4526 if (need_alloc
|| wasdelay
) {
4528 bma
.conv
= !!(flags
& XFS_BMAPI_CONVERT
);
4529 bma
.wasdel
= wasdelay
;
4534 * There's a 32/64 bit type mismatch between the
4535 * allocation length request (which can be 64 bits in
4536 * length) and the bma length request, which is
4537 * xfs_extlen_t and therefore 32 bits. Hence we have to
4538 * be careful and do the min() using the larger type to
4541 bma
.length
= XFS_FILBLKS_MIN(len
, XFS_MAX_BMBT_EXTLEN
);
4544 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4545 bma
.got
.br_blockcount
-
4546 (bno
- bma
.got
.br_startoff
));
4549 bma
.length
= XFS_FILBLKS_MIN(bma
.length
,
4550 bma
.got
.br_startoff
- bno
);
4553 ASSERT(bma
.length
> 0);
4554 error
= xfs_bmapi_allocate(&bma
);
4557 * If we already allocated space in a previous
4558 * iteration return what we go so far when
4559 * running out of space.
4561 if (error
== -ENOSPC
&& bma
.nallocs
)
4567 * If this is a CoW allocation, record the data in
4568 * the refcount btree for orphan recovery.
4570 if (whichfork
== XFS_COW_FORK
)
4571 xfs_refcount_alloc_cow_extent(tp
, bma
.blkno
,
4575 /* Deal with the allocated space we found. */
4576 xfs_bmapi_trim_map(mval
, &bma
.got
, &bno
, len
, obno
,
4579 /* Execute unwritten extent conversion if necessary */
4580 error
= xfs_bmapi_convert_unwritten(&bma
, mval
, len
, flags
);
4581 if (error
== -EAGAIN
)
4586 /* update the extent map to return */
4587 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4590 * If we're done, stop now. Stop when we've allocated
4591 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4592 * the transaction may get too big.
4594 if (bno
>= end
|| n
>= *nmap
|| bma
.nallocs
>= *nmap
)
4597 /* Else go on to the next record. */
4599 if (!xfs_iext_next_extent(ifp
, &bma
.icur
, &bma
.got
))
4603 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4608 ASSERT(ifp
->if_format
!= XFS_DINODE_FMT_BTREE
||
4609 ifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, whichfork
));
4610 xfs_bmapi_finish(&bma
, whichfork
, 0);
4611 xfs_bmap_validate_ret(orig_bno
, orig_len
, orig_flags
, orig_mval
,
4615 * When converting delayed allocations, xfs_bmapi_allocate ignores
4616 * the passed in bno and always converts from the start of the found
4619 * To avoid a successful return with *nmap set to 0, return the magic
4620 * -ENOSR error code for this particular case so that the caller can
4624 ASSERT(bma
.nallocs
>= *nmap
);
4630 xfs_bmapi_finish(&bma
, whichfork
, error
);
4635 * Convert an existing delalloc extent to real blocks based on file offset. This
4636 * attempts to allocate the entire delalloc extent and may require multiple
4637 * invocations to allocate the target offset if a large enough physical extent
4641 xfs_bmapi_convert_one_delalloc(
4642 struct xfs_inode
*ip
,
4645 struct iomap
*iomap
,
4648 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4649 struct xfs_mount
*mp
= ip
->i_mount
;
4650 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
4651 struct xfs_bmalloca bma
= { NULL
};
4653 struct xfs_trans
*tp
;
4656 if (whichfork
== XFS_COW_FORK
)
4657 flags
|= IOMAP_F_SHARED
;
4660 * Space for the extent and indirect blocks was reserved when the
4661 * delalloc extent was created so there's no need to do so here.
4663 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0, 0,
4664 XFS_TRANS_RESERVE
, &tp
);
4668 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
4669 xfs_trans_ijoin(tp
, ip
, 0);
4671 error
= xfs_iext_count_extend(tp
, ip
, whichfork
,
4672 XFS_IEXT_ADD_NOSPLIT_CNT
);
4674 goto out_trans_cancel
;
4676 if (!xfs_iext_lookup_extent(ip
, ifp
, offset_fsb
, &bma
.icur
, &bma
.got
) ||
4677 bma
.got
.br_startoff
> offset_fsb
) {
4679 * No extent found in the range we are trying to convert. This
4680 * should only happen for the COW fork, where another thread
4681 * might have moved the extent to the data fork in the meantime.
4683 WARN_ON_ONCE(whichfork
!= XFS_COW_FORK
);
4685 goto out_trans_cancel
;
4689 * If we find a real extent here we raced with another thread converting
4690 * the extent. Just return the real extent at this offset.
4692 if (!isnullstartblock(bma
.got
.br_startblock
)) {
4693 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4694 xfs_iomap_inode_sequence(ip
, flags
));
4696 *seq
= READ_ONCE(ifp
->if_seq
);
4697 goto out_trans_cancel
;
4703 bma
.minleft
= xfs_bmapi_minleft(tp
, ip
, whichfork
);
4706 * Always allocate convert from the start of the delalloc extent even if
4707 * that is outside the passed in range to create large contiguous
4710 bma
.offset
= bma
.got
.br_startoff
;
4711 bma
.length
= bma
.got
.br_blockcount
;
4714 * When we're converting the delalloc reservations backing dirty pages
4715 * in the page cache, we must be careful about how we create the new
4718 * New CoW fork extents are created unwritten, turned into real extents
4719 * when we're about to write the data to disk, and mapped into the data
4720 * fork after the write finishes. End of story.
4722 * New data fork extents must be mapped in as unwritten and converted
4723 * to real extents after the write succeeds to avoid exposing stale
4724 * disk contents if we crash.
4726 bma
.flags
= XFS_BMAPI_PREALLOC
;
4727 if (whichfork
== XFS_COW_FORK
)
4728 bma
.flags
|= XFS_BMAPI_COWFORK
;
4730 if (!xfs_iext_peek_prev_extent(ifp
, &bma
.icur
, &bma
.prev
))
4731 bma
.prev
.br_startoff
= NULLFILEOFF
;
4733 error
= xfs_bmapi_allocate(&bma
);
4737 XFS_STATS_ADD(mp
, xs_xstrat_bytes
, XFS_FSB_TO_B(mp
, bma
.length
));
4738 XFS_STATS_INC(mp
, xs_xstrat_quick
);
4740 ASSERT(!isnullstartblock(bma
.got
.br_startblock
));
4741 xfs_bmbt_to_iomap(ip
, iomap
, &bma
.got
, 0, flags
,
4742 xfs_iomap_inode_sequence(ip
, flags
));
4744 *seq
= READ_ONCE(ifp
->if_seq
);
4746 if (whichfork
== XFS_COW_FORK
)
4747 xfs_refcount_alloc_cow_extent(tp
, bma
.blkno
, bma
.length
);
4749 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
, &bma
.logflags
,
4754 xfs_bmapi_finish(&bma
, whichfork
, 0);
4755 error
= xfs_trans_commit(tp
);
4756 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4760 xfs_bmapi_finish(&bma
, whichfork
, error
);
4762 xfs_trans_cancel(tp
);
4763 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
4768 * Pass in a dellalloc extent and convert it to real extents, return the real
4769 * extent that maps offset_fsb in iomap.
4772 xfs_bmapi_convert_delalloc(
4773 struct xfs_inode
*ip
,
4776 struct iomap
*iomap
,
4782 * Attempt to allocate whatever delalloc extent currently backs offset
4783 * and put the result into iomap. Allocate in a loop because it may
4784 * take several attempts to allocate real blocks for a contiguous
4785 * delalloc extent if free space is sufficiently fragmented.
4788 error
= xfs_bmapi_convert_one_delalloc(ip
, whichfork
, offset
,
4792 } while (iomap
->offset
+ iomap
->length
<= offset
);
4799 struct xfs_trans
*tp
,
4800 struct xfs_inode
*ip
,
4803 xfs_fsblock_t startblock
,
4806 struct xfs_mount
*mp
= ip
->i_mount
;
4807 struct xfs_ifork
*ifp
;
4808 struct xfs_btree_cur
*cur
= NULL
;
4809 struct xfs_bmbt_irec got
;
4810 struct xfs_iext_cursor icur
;
4811 int whichfork
= xfs_bmapi_whichfork(flags
);
4812 int logflags
= 0, error
;
4814 ifp
= xfs_ifork_ptr(ip
, whichfork
);
4816 ASSERT(len
<= (xfs_filblks_t
)XFS_MAX_BMBT_EXTLEN
);
4817 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
4818 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
|
4819 XFS_BMAPI_NORMAP
)));
4820 ASSERT((flags
& (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
)) !=
4821 (XFS_BMAPI_ATTRFORK
| XFS_BMAPI_PREALLOC
));
4823 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
4824 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
4825 xfs_bmap_mark_sick(ip
, whichfork
);
4826 return -EFSCORRUPTED
;
4829 if (xfs_is_shutdown(mp
))
4832 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4836 if (xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
)) {
4837 /* make sure we only reflink into a hole. */
4838 ASSERT(got
.br_startoff
> bno
);
4839 ASSERT(got
.br_startoff
- bno
>= len
);
4842 ip
->i_nblocks
+= len
;
4843 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
4845 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
4846 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
4848 got
.br_startoff
= bno
;
4849 got
.br_startblock
= startblock
;
4850 got
.br_blockcount
= len
;
4851 if (flags
& XFS_BMAPI_PREALLOC
)
4852 got
.br_state
= XFS_EXT_UNWRITTEN
;
4854 got
.br_state
= XFS_EXT_NORM
;
4856 error
= xfs_bmap_add_extent_hole_real(tp
, ip
, whichfork
, &icur
,
4857 &cur
, &got
, &logflags
, flags
);
4861 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
, whichfork
);
4864 if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_EXTENTS
)
4865 logflags
&= ~XFS_ILOG_DEXT
;
4866 else if (ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
)
4867 logflags
&= ~XFS_ILOG_DBROOT
;
4870 xfs_trans_log_inode(tp
, ip
, logflags
);
4872 xfs_btree_del_cursor(cur
, error
);
4877 * When a delalloc extent is split (e.g., due to a hole punch), the original
4878 * indlen reservation must be shared across the two new extents that are left
4881 * Given the original reservation and the worst case indlen for the two new
4882 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4883 * reservation fairly across the two new extents. If necessary, steal available
4884 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4885 * ores == 1). The number of stolen blocks is returned. The availability and
4886 * subsequent accounting of stolen blocks is the responsibility of the caller.
4889 xfs_bmap_split_indlen(
4890 xfs_filblks_t ores
, /* original res. */
4891 xfs_filblks_t
*indlen1
, /* ext1 worst indlen */
4892 xfs_filblks_t
*indlen2
) /* ext2 worst indlen */
4894 xfs_filblks_t len1
= *indlen1
;
4895 xfs_filblks_t len2
= *indlen2
;
4896 xfs_filblks_t nres
= len1
+ len2
; /* new total res. */
4897 xfs_filblks_t resfactor
;
4900 * We can't meet the total required reservation for the two extents.
4901 * Calculate the percent of the overall shortage between both extents
4902 * and apply this percentage to each of the requested indlen values.
4903 * This distributes the shortage fairly and reduces the chances that one
4904 * of the two extents is left with nothing when extents are repeatedly
4907 resfactor
= (ores
* 100);
4908 do_div(resfactor
, nres
);
4913 ASSERT(len1
+ len2
<= ores
);
4914 ASSERT(len1
< *indlen1
&& len2
< *indlen2
);
4917 * Hand out the remainder to each extent. If one of the two reservations
4918 * is zero, we want to make sure that one gets a block first. The loop
4919 * below starts with len1, so hand len2 a block right off the bat if it
4922 ores
-= (len1
+ len2
);
4923 ASSERT((*indlen1
- len1
) + (*indlen2
- len2
) >= ores
);
4924 if (ores
&& !len2
&& *indlen2
) {
4929 if (len1
< *indlen1
) {
4935 if (len2
< *indlen2
) {
4946 xfs_bmap_del_extent_delay(
4947 struct xfs_inode
*ip
,
4949 struct xfs_iext_cursor
*icur
,
4950 struct xfs_bmbt_irec
*got
,
4951 struct xfs_bmbt_irec
*del
)
4953 struct xfs_mount
*mp
= ip
->i_mount
;
4954 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
4955 struct xfs_bmbt_irec
new;
4956 int64_t da_old
, da_new
, da_diff
= 0;
4957 xfs_fileoff_t del_endoff
, got_endoff
;
4958 xfs_filblks_t got_indlen
, new_indlen
, stolen
= 0;
4959 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
4963 XFS_STATS_INC(mp
, xs_del_exlist
);
4965 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
4966 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4967 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4968 da_old
= startblockval(got
->br_startblock
);
4971 ASSERT(del
->br_blockcount
> 0);
4972 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4973 ASSERT(got_endoff
>= del_endoff
);
4976 * Update the inode delalloc counter now and wait to update the
4977 * sb counters as we might have to borrow some blocks for the
4978 * indirect block accounting.
4980 xfs_quota_unreserve_blkres(ip
, del
->br_blockcount
);
4981 ip
->i_delayed_blks
-= del
->br_blockcount
;
4983 if (got
->br_startoff
== del
->br_startoff
)
4984 state
|= BMAP_LEFT_FILLING
;
4985 if (got_endoff
== del_endoff
)
4986 state
|= BMAP_RIGHT_FILLING
;
4988 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
4989 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
4991 * Matches the whole extent. Delete the entry.
4993 xfs_iext_remove(ip
, icur
, state
);
4994 xfs_iext_prev(ifp
, icur
);
4996 case BMAP_LEFT_FILLING
:
4998 * Deleting the first part of the extent.
5000 got
->br_startoff
= del_endoff
;
5001 got
->br_blockcount
-= del
->br_blockcount
;
5002 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
5003 got
->br_blockcount
), da_old
);
5004 got
->br_startblock
= nullstartblock((int)da_new
);
5005 xfs_iext_update_extent(ip
, state
, icur
, got
);
5007 case BMAP_RIGHT_FILLING
:
5009 * Deleting the last part of the extent.
5011 got
->br_blockcount
= got
->br_blockcount
- del
->br_blockcount
;
5012 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
5013 got
->br_blockcount
), da_old
);
5014 got
->br_startblock
= nullstartblock((int)da_new
);
5015 xfs_iext_update_extent(ip
, state
, icur
, got
);
5019 * Deleting the middle of the extent.
5021 * Distribute the original indlen reservation across the two new
5022 * extents. Steal blocks from the deleted extent if necessary.
5023 * Stealing blocks simply fudges the fdblocks accounting below.
5024 * Warn if either of the new indlen reservations is zero as this
5025 * can lead to delalloc problems.
5027 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
5028 got_indlen
= xfs_bmap_worst_indlen(ip
, got
->br_blockcount
);
5030 new.br_blockcount
= got_endoff
- del_endoff
;
5031 new_indlen
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
5033 WARN_ON_ONCE(!got_indlen
|| !new_indlen
);
5035 * Steal as many blocks as we can to try and satisfy the worst
5036 * case indlen for both new extents.
5038 * However, we can't just steal reservations from the data
5039 * blocks if this is an RT inodes as the data and metadata
5040 * blocks come from different pools. We'll have to live with
5041 * under-filled indirect reservation in this case.
5043 da_new
= got_indlen
+ new_indlen
;
5044 if (da_new
> da_old
&& !isrt
) {
5045 stolen
= XFS_FILBLKS_MIN(da_new
- da_old
,
5046 del
->br_blockcount
);
5049 if (da_new
> da_old
)
5050 xfs_bmap_split_indlen(da_old
, &got_indlen
, &new_indlen
);
5051 da_new
= got_indlen
+ new_indlen
;
5053 got
->br_startblock
= nullstartblock((int)got_indlen
);
5055 new.br_startoff
= del_endoff
;
5056 new.br_state
= got
->br_state
;
5057 new.br_startblock
= nullstartblock((int)new_indlen
);
5059 xfs_iext_update_extent(ip
, state
, icur
, got
);
5060 xfs_iext_next(ifp
, icur
);
5061 xfs_iext_insert(ip
, icur
, &new, state
);
5063 del
->br_blockcount
-= stolen
;
5067 ASSERT(da_old
>= da_new
);
5068 da_diff
= da_old
- da_new
;
5072 xfs_add_frextents(mp
, xfs_rtb_to_rtx(mp
, del
->br_blockcount
));
5074 fdblocks
+= del
->br_blockcount
;
5076 xfs_add_fdblocks(mp
, fdblocks
);
5077 xfs_mod_delalloc(ip
, -(int64_t)del
->br_blockcount
, -da_diff
);
5081 xfs_bmap_del_extent_cow(
5082 struct xfs_inode
*ip
,
5083 struct xfs_iext_cursor
*icur
,
5084 struct xfs_bmbt_irec
*got
,
5085 struct xfs_bmbt_irec
*del
)
5087 struct xfs_mount
*mp
= ip
->i_mount
;
5088 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, XFS_COW_FORK
);
5089 struct xfs_bmbt_irec
new;
5090 xfs_fileoff_t del_endoff
, got_endoff
;
5091 uint32_t state
= BMAP_COWFORK
;
5093 XFS_STATS_INC(mp
, xs_del_exlist
);
5095 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
5096 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
5098 ASSERT(del
->br_blockcount
> 0);
5099 ASSERT(got
->br_startoff
<= del
->br_startoff
);
5100 ASSERT(got_endoff
>= del_endoff
);
5101 ASSERT(!isnullstartblock(got
->br_startblock
));
5103 if (got
->br_startoff
== del
->br_startoff
)
5104 state
|= BMAP_LEFT_FILLING
;
5105 if (got_endoff
== del_endoff
)
5106 state
|= BMAP_RIGHT_FILLING
;
5108 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
5109 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
5111 * Matches the whole extent. Delete the entry.
5113 xfs_iext_remove(ip
, icur
, state
);
5114 xfs_iext_prev(ifp
, icur
);
5116 case BMAP_LEFT_FILLING
:
5118 * Deleting the first part of the extent.
5120 got
->br_startoff
= del_endoff
;
5121 got
->br_blockcount
-= del
->br_blockcount
;
5122 got
->br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
5123 xfs_iext_update_extent(ip
, state
, icur
, got
);
5125 case BMAP_RIGHT_FILLING
:
5127 * Deleting the last part of the extent.
5129 got
->br_blockcount
-= del
->br_blockcount
;
5130 xfs_iext_update_extent(ip
, state
, icur
, got
);
5134 * Deleting the middle of the extent.
5136 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
5138 new.br_startoff
= del_endoff
;
5139 new.br_blockcount
= got_endoff
- del_endoff
;
5140 new.br_state
= got
->br_state
;
5141 new.br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
5143 xfs_iext_update_extent(ip
, state
, icur
, got
);
5144 xfs_iext_next(ifp
, icur
);
5145 xfs_iext_insert(ip
, icur
, &new, state
);
5148 ip
->i_delayed_blks
-= del
->br_blockcount
;
5152 * Called by xfs_bmapi to update file extent records and the btree
5153 * after removing space.
5155 STATIC
int /* error */
5156 xfs_bmap_del_extent_real(
5157 xfs_inode_t
*ip
, /* incore inode pointer */
5158 xfs_trans_t
*tp
, /* current transaction pointer */
5159 struct xfs_iext_cursor
*icur
,
5160 struct xfs_btree_cur
*cur
, /* if null, not a btree */
5161 xfs_bmbt_irec_t
*del
, /* data to remove from extents */
5162 int *logflagsp
, /* inode logging flags */
5163 int whichfork
, /* data or attr fork */
5164 uint32_t bflags
) /* bmapi flags */
5166 xfs_fsblock_t del_endblock
=0; /* first block past del */
5167 xfs_fileoff_t del_endoff
; /* first offset past del */
5168 int error
= 0; /* error return value */
5169 struct xfs_bmbt_irec got
; /* current extent entry */
5170 xfs_fileoff_t got_endoff
; /* first offset past got */
5171 int i
; /* temp state */
5172 struct xfs_ifork
*ifp
; /* inode fork pointer */
5173 xfs_mount_t
*mp
; /* mount structure */
5174 xfs_filblks_t nblks
; /* quota/sb block count */
5175 xfs_bmbt_irec_t
new; /* new record to be inserted */
5177 uint qfield
; /* quota field to update */
5178 uint32_t state
= xfs_bmap_fork_to_state(whichfork
);
5179 struct xfs_bmbt_irec old
;
5184 XFS_STATS_INC(mp
, xs_del_exlist
);
5186 ifp
= xfs_ifork_ptr(ip
, whichfork
);
5187 ASSERT(del
->br_blockcount
> 0);
5188 xfs_iext_get_extent(ifp
, icur
, &got
);
5189 ASSERT(got
.br_startoff
<= del
->br_startoff
);
5190 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
5191 got_endoff
= got
.br_startoff
+ got
.br_blockcount
;
5192 ASSERT(got_endoff
>= del_endoff
);
5193 ASSERT(!isnullstartblock(got
.br_startblock
));
5197 * If it's the case where the directory code is running with no block
5198 * reservation, and the deleted block is in the middle of its extent,
5199 * and the resulting insert of an extent would cause transformation to
5200 * btree format, then reject it. The calling code will then swap blocks
5201 * around instead. We have to do this now, rather than waiting for the
5202 * conversion to btree format, since the transaction will be dirty then.
5204 if (tp
->t_blk_res
== 0 &&
5205 ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
5206 ifp
->if_nextents
>= XFS_IFORK_MAXEXT(ip
, whichfork
) &&
5207 del
->br_startoff
> got
.br_startoff
&& del_endoff
< got_endoff
)
5210 *logflagsp
= XFS_ILOG_CORE
;
5211 if (xfs_ifork_is_realtime(ip
, whichfork
))
5212 qfield
= XFS_TRANS_DQ_RTBCOUNT
;
5214 qfield
= XFS_TRANS_DQ_BCOUNT
;
5215 nblks
= del
->br_blockcount
;
5217 del_endblock
= del
->br_startblock
+ del
->br_blockcount
;
5219 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5222 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5223 xfs_btree_mark_sick(cur
);
5224 return -EFSCORRUPTED
;
5228 if (got
.br_startoff
== del
->br_startoff
)
5229 state
|= BMAP_LEFT_FILLING
;
5230 if (got_endoff
== del_endoff
)
5231 state
|= BMAP_RIGHT_FILLING
;
5233 switch (state
& (BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
)) {
5234 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
5236 * Matches the whole extent. Delete the entry.
5238 xfs_iext_remove(ip
, icur
, state
);
5239 xfs_iext_prev(ifp
, icur
);
5242 *logflagsp
|= XFS_ILOG_CORE
;
5244 *logflagsp
|= xfs_ilog_fext(whichfork
);
5247 if ((error
= xfs_btree_delete(cur
, &i
)))
5249 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5250 xfs_btree_mark_sick(cur
);
5251 return -EFSCORRUPTED
;
5254 case BMAP_LEFT_FILLING
:
5256 * Deleting the first part of the extent.
5258 got
.br_startoff
= del_endoff
;
5259 got
.br_startblock
= del_endblock
;
5260 got
.br_blockcount
-= del
->br_blockcount
;
5261 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5263 *logflagsp
|= xfs_ilog_fext(whichfork
);
5266 error
= xfs_bmbt_update(cur
, &got
);
5270 case BMAP_RIGHT_FILLING
:
5272 * Deleting the last part of the extent.
5274 got
.br_blockcount
-= del
->br_blockcount
;
5275 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5277 *logflagsp
|= xfs_ilog_fext(whichfork
);
5280 error
= xfs_bmbt_update(cur
, &got
);
5286 * Deleting the middle of the extent.
5291 got
.br_blockcount
= del
->br_startoff
- got
.br_startoff
;
5292 xfs_iext_update_extent(ip
, state
, icur
, &got
);
5294 new.br_startoff
= del_endoff
;
5295 new.br_blockcount
= got_endoff
- del_endoff
;
5296 new.br_state
= got
.br_state
;
5297 new.br_startblock
= del_endblock
;
5299 *logflagsp
|= XFS_ILOG_CORE
;
5301 error
= xfs_bmbt_update(cur
, &got
);
5304 error
= xfs_btree_increment(cur
, 0, &i
);
5307 cur
->bc_rec
.b
= new;
5308 error
= xfs_btree_insert(cur
, &i
);
5309 if (error
&& error
!= -ENOSPC
)
5312 * If get no-space back from btree insert, it tried a
5313 * split, and we have a zero block reservation. Fix up
5314 * our state and return the error.
5316 if (error
== -ENOSPC
) {
5318 * Reset the cursor, don't trust it after any
5321 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
5324 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5325 xfs_btree_mark_sick(cur
);
5326 return -EFSCORRUPTED
;
5329 * Update the btree record back
5330 * to the original value.
5332 error
= xfs_bmbt_update(cur
, &old
);
5336 * Reset the extent record back
5337 * to the original value.
5339 xfs_iext_update_extent(ip
, state
, icur
, &old
);
5343 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5344 xfs_btree_mark_sick(cur
);
5345 return -EFSCORRUPTED
;
5348 *logflagsp
|= xfs_ilog_fext(whichfork
);
5351 xfs_iext_next(ifp
, icur
);
5352 xfs_iext_insert(ip
, icur
, &new, state
);
5356 /* remove reverse mapping */
5357 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, del
);
5360 * If we need to, add to list of extents to delete.
5362 if (!(bflags
& XFS_BMAPI_REMAP
)) {
5363 if (xfs_is_reflink_inode(ip
) && whichfork
== XFS_DATA_FORK
) {
5364 xfs_refcount_decrease_extent(tp
, del
);
5365 } else if (xfs_ifork_is_realtime(ip
, whichfork
)) {
5367 * Ensure the bitmap and summary inodes are locked
5368 * and joined to the transaction before modifying them.
5370 if (!(tp
->t_flags
& XFS_TRANS_RTBITMAP_LOCKED
)) {
5371 tp
->t_flags
|= XFS_TRANS_RTBITMAP_LOCKED
;
5372 xfs_rtbitmap_lock(tp
, mp
);
5374 error
= xfs_rtfree_blocks(tp
, del
->br_startblock
,
5375 del
->br_blockcount
);
5377 error
= xfs_free_extent_later(tp
, del
->br_startblock
,
5378 del
->br_blockcount
, NULL
,
5380 ((bflags
& XFS_BMAPI_NODISCARD
) ||
5381 del
->br_state
== XFS_EXT_UNWRITTEN
));
5388 * Adjust inode # blocks in the file.
5391 ip
->i_nblocks
-= nblks
;
5393 * Adjust quota data.
5395 if (qfield
&& !(bflags
& XFS_BMAPI_REMAP
))
5396 xfs_trans_mod_dquot_byino(tp
, ip
, qfield
, (long)-nblks
);
5402 * Unmap (remove) blocks from a file.
5403 * If nexts is nonzero then the number of extents to remove is limited to
5404 * that value. If not all extents in the block range can be removed then
5409 struct xfs_trans
*tp
, /* transaction pointer */
5410 struct xfs_inode
*ip
, /* incore inode */
5411 xfs_fileoff_t start
, /* first file offset deleted */
5412 xfs_filblks_t
*rlen
, /* i/o: amount remaining */
5413 uint32_t flags
, /* misc flags */
5414 xfs_extnum_t nexts
) /* number of extents max */
5416 struct xfs_btree_cur
*cur
; /* bmap btree cursor */
5417 struct xfs_bmbt_irec del
; /* extent being deleted */
5418 int error
; /* error return value */
5419 xfs_extnum_t extno
; /* extent number in list */
5420 struct xfs_bmbt_irec got
; /* current extent record */
5421 struct xfs_ifork
*ifp
; /* inode fork pointer */
5422 int isrt
; /* freeing in rt area */
5423 int logflags
; /* transaction logging flags */
5424 xfs_extlen_t mod
; /* rt extent offset */
5425 struct xfs_mount
*mp
= ip
->i_mount
;
5426 int tmp_logflags
; /* partial logging flags */
5427 int wasdel
; /* was a delayed alloc extent */
5428 int whichfork
; /* data or attribute fork */
5429 xfs_filblks_t len
= *rlen
; /* length to unmap in file */
5431 struct xfs_iext_cursor icur
;
5434 trace_xfs_bunmap(ip
, start
, len
, flags
, _RET_IP_
);
5436 whichfork
= xfs_bmapi_whichfork(flags
);
5437 ASSERT(whichfork
!= XFS_COW_FORK
);
5438 ifp
= xfs_ifork_ptr(ip
, whichfork
);
5439 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
))) {
5440 xfs_bmap_mark_sick(ip
, whichfork
);
5441 return -EFSCORRUPTED
;
5443 if (xfs_is_shutdown(mp
))
5446 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
5450 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5454 if (xfs_iext_count(ifp
) == 0) {
5458 XFS_STATS_INC(mp
, xs_blk_unmap
);
5459 isrt
= xfs_ifork_is_realtime(ip
, whichfork
);
5462 if (!xfs_iext_lookup_extent_before(ip
, ifp
, &end
, &icur
, &got
)) {
5469 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
5470 ASSERT(ifp
->if_format
== XFS_DINODE_FMT_BTREE
);
5471 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5476 while (end
!= (xfs_fileoff_t
)-1 && end
>= start
&&
5477 (nexts
== 0 || extno
< nexts
)) {
5479 * Is the found extent after a hole in which end lives?
5480 * Just back up to the previous extent, if so.
5482 if (got
.br_startoff
> end
&&
5483 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5488 * Is the last block of this extent before the range
5489 * we're supposed to delete? If so, we're done.
5491 end
= XFS_FILEOFF_MIN(end
,
5492 got
.br_startoff
+ got
.br_blockcount
- 1);
5496 * Then deal with the (possibly delayed) allocated space
5500 wasdel
= isnullstartblock(del
.br_startblock
);
5502 if (got
.br_startoff
< start
) {
5503 del
.br_startoff
= start
;
5504 del
.br_blockcount
-= start
- got
.br_startoff
;
5506 del
.br_startblock
+= start
- got
.br_startoff
;
5508 if (del
.br_startoff
+ del
.br_blockcount
> end
+ 1)
5509 del
.br_blockcount
= end
+ 1 - del
.br_startoff
;
5511 if (!isrt
|| (flags
& XFS_BMAPI_REMAP
))
5514 mod
= xfs_rtb_to_rtxoff(mp
,
5515 del
.br_startblock
+ del
.br_blockcount
);
5518 * Realtime extent not lined up at the end.
5519 * The extent could have been split into written
5520 * and unwritten pieces, or we could just be
5521 * unmapping part of it. But we can't really
5522 * get rid of part of a realtime extent.
5524 if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5526 * This piece is unwritten, or we're not
5527 * using unwritten extents. Skip over it.
5529 ASSERT((flags
& XFS_BMAPI_REMAP
) || end
>= mod
);
5530 end
-= mod
> del
.br_blockcount
?
5531 del
.br_blockcount
: mod
;
5532 if (end
< got
.br_startoff
&&
5533 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5540 * It's written, turn it unwritten.
5541 * This is better than zeroing it.
5543 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5544 ASSERT(tp
->t_blk_res
> 0);
5546 * If this spans a realtime extent boundary,
5547 * chop it back to the start of the one we end at.
5549 if (del
.br_blockcount
> mod
) {
5550 del
.br_startoff
+= del
.br_blockcount
- mod
;
5551 del
.br_startblock
+= del
.br_blockcount
- mod
;
5552 del
.br_blockcount
= mod
;
5554 del
.br_state
= XFS_EXT_UNWRITTEN
;
5555 error
= xfs_bmap_add_extent_unwritten_real(tp
, ip
,
5556 whichfork
, &icur
, &cur
, &del
,
5563 mod
= xfs_rtb_to_rtxoff(mp
, del
.br_startblock
);
5565 xfs_extlen_t off
= mp
->m_sb
.sb_rextsize
- mod
;
5568 * Realtime extent is lined up at the end but not
5569 * at the front. We'll get rid of full extents if
5572 if (del
.br_blockcount
> off
) {
5573 del
.br_blockcount
-= off
;
5574 del
.br_startoff
+= off
;
5575 del
.br_startblock
+= off
;
5576 } else if (del
.br_startoff
== start
&&
5577 (del
.br_state
== XFS_EXT_UNWRITTEN
||
5578 tp
->t_blk_res
== 0)) {
5580 * Can't make it unwritten. There isn't
5581 * a full extent here so just skip it.
5583 ASSERT(end
>= del
.br_blockcount
);
5584 end
-= del
.br_blockcount
;
5585 if (got
.br_startoff
> end
&&
5586 !xfs_iext_prev_extent(ifp
, &icur
, &got
)) {
5591 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5592 struct xfs_bmbt_irec prev
;
5593 xfs_fileoff_t unwrite_start
;
5596 * This one is already unwritten.
5597 * It must have a written left neighbor.
5598 * Unwrite the killed part of that one and
5601 if (!xfs_iext_prev_extent(ifp
, &icur
, &prev
))
5603 ASSERT(prev
.br_state
== XFS_EXT_NORM
);
5604 ASSERT(!isnullstartblock(prev
.br_startblock
));
5605 ASSERT(del
.br_startblock
==
5606 prev
.br_startblock
+ prev
.br_blockcount
);
5607 unwrite_start
= max3(start
,
5608 del
.br_startoff
- mod
,
5610 mod
= unwrite_start
- prev
.br_startoff
;
5611 prev
.br_startoff
= unwrite_start
;
5612 prev
.br_startblock
+= mod
;
5613 prev
.br_blockcount
-= mod
;
5614 prev
.br_state
= XFS_EXT_UNWRITTEN
;
5615 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5616 ip
, whichfork
, &icur
, &cur
,
5622 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5623 del
.br_state
= XFS_EXT_UNWRITTEN
;
5624 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5625 ip
, whichfork
, &icur
, &cur
,
5635 xfs_bmap_del_extent_delay(ip
, whichfork
, &icur
, &got
, &del
);
5637 error
= xfs_bmap_del_extent_real(ip
, tp
, &icur
, cur
,
5638 &del
, &tmp_logflags
, whichfork
,
5640 logflags
|= tmp_logflags
;
5645 end
= del
.br_startoff
- 1;
5648 * If not done go on to the next (previous) record.
5650 if (end
!= (xfs_fileoff_t
)-1 && end
>= start
) {
5651 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
5652 (got
.br_startoff
> end
&&
5653 !xfs_iext_prev_extent(ifp
, &icur
, &got
))) {
5660 if (done
|| end
== (xfs_fileoff_t
)-1 || end
< start
)
5663 *rlen
= end
- start
+ 1;
5666 * Convert to a btree if necessary.
5668 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5669 ASSERT(cur
== NULL
);
5670 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
5671 &tmp_logflags
, whichfork
);
5672 logflags
|= tmp_logflags
;
5674 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &logflags
,
5680 * Log everything. Do this after conversion, there's no point in
5681 * logging the extent records if we've converted to btree format.
5683 if ((logflags
& xfs_ilog_fext(whichfork
)) &&
5684 ifp
->if_format
!= XFS_DINODE_FMT_EXTENTS
)
5685 logflags
&= ~xfs_ilog_fext(whichfork
);
5686 else if ((logflags
& xfs_ilog_fbroot(whichfork
)) &&
5687 ifp
->if_format
!= XFS_DINODE_FMT_BTREE
)
5688 logflags
&= ~xfs_ilog_fbroot(whichfork
);
5690 * Log inode even in the error case, if the transaction
5691 * is dirty we'll need to shut down the filesystem.
5694 xfs_trans_log_inode(tp
, ip
, logflags
);
5697 cur
->bc_bmap
.allocated
= 0;
5698 xfs_btree_del_cursor(cur
, error
);
5703 /* Unmap a range of a file. */
5707 struct xfs_inode
*ip
,
5716 error
= __xfs_bunmapi(tp
, ip
, bno
, &len
, flags
, nexts
);
5722 * Determine whether an extent shift can be accomplished by a merge with the
5723 * extent that precedes the target hole of the shift.
5727 struct xfs_bmbt_irec
*left
, /* preceding extent */
5728 struct xfs_bmbt_irec
*got
, /* current extent to shift */
5729 xfs_fileoff_t shift
) /* shift fsb */
5731 xfs_fileoff_t startoff
;
5733 startoff
= got
->br_startoff
- shift
;
5736 * The extent, once shifted, must be adjacent in-file and on-disk with
5737 * the preceding extent.
5739 if ((left
->br_startoff
+ left
->br_blockcount
!= startoff
) ||
5740 (left
->br_startblock
+ left
->br_blockcount
!= got
->br_startblock
) ||
5741 (left
->br_state
!= got
->br_state
) ||
5742 (left
->br_blockcount
+ got
->br_blockcount
> XFS_MAX_BMBT_EXTLEN
))
5749 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5750 * hole in the file. If an extent shift would result in the extent being fully
5751 * adjacent to the extent that currently precedes the hole, we can merge with
5752 * the preceding extent rather than do the shift.
5754 * This function assumes the caller has verified a shift-by-merge is possible
5755 * with the provided extents via xfs_bmse_can_merge().
5759 struct xfs_trans
*tp
,
5760 struct xfs_inode
*ip
,
5762 xfs_fileoff_t shift
, /* shift fsb */
5763 struct xfs_iext_cursor
*icur
,
5764 struct xfs_bmbt_irec
*got
, /* extent to shift */
5765 struct xfs_bmbt_irec
*left
, /* preceding extent */
5766 struct xfs_btree_cur
*cur
,
5767 int *logflags
) /* output */
5769 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5770 struct xfs_bmbt_irec
new;
5771 xfs_filblks_t blockcount
;
5773 struct xfs_mount
*mp
= ip
->i_mount
;
5775 blockcount
= left
->br_blockcount
+ got
->br_blockcount
;
5777 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5778 ASSERT(xfs_bmse_can_merge(left
, got
, shift
));
5781 new.br_blockcount
= blockcount
;
5784 * Update the on-disk extent count, the btree if necessary and log the
5788 *logflags
|= XFS_ILOG_CORE
;
5790 *logflags
|= XFS_ILOG_DEXT
;
5794 /* lookup and remove the extent to merge */
5795 error
= xfs_bmbt_lookup_eq(cur
, got
, &i
);
5798 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5799 xfs_btree_mark_sick(cur
);
5800 return -EFSCORRUPTED
;
5803 error
= xfs_btree_delete(cur
, &i
);
5806 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5807 xfs_btree_mark_sick(cur
);
5808 return -EFSCORRUPTED
;
5811 /* lookup and update size of the previous extent */
5812 error
= xfs_bmbt_lookup_eq(cur
, left
, &i
);
5815 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5816 xfs_btree_mark_sick(cur
);
5817 return -EFSCORRUPTED
;
5820 error
= xfs_bmbt_update(cur
, &new);
5824 /* change to extent format if required after extent removal */
5825 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, logflags
, whichfork
);
5830 xfs_iext_remove(ip
, icur
, 0);
5831 xfs_iext_prev(ifp
, icur
);
5832 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5835 /* update reverse mapping. rmap functions merge the rmaps for us */
5836 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, got
);
5837 memcpy(&new, got
, sizeof(new));
5838 new.br_startoff
= left
->br_startoff
+ left
->br_blockcount
;
5839 xfs_rmap_map_extent(tp
, ip
, whichfork
, &new);
5844 xfs_bmap_shift_update_extent(
5845 struct xfs_trans
*tp
,
5846 struct xfs_inode
*ip
,
5848 struct xfs_iext_cursor
*icur
,
5849 struct xfs_bmbt_irec
*got
,
5850 struct xfs_btree_cur
*cur
,
5852 xfs_fileoff_t startoff
)
5854 struct xfs_mount
*mp
= ip
->i_mount
;
5855 struct xfs_bmbt_irec prev
= *got
;
5858 *logflags
|= XFS_ILOG_CORE
;
5860 got
->br_startoff
= startoff
;
5863 error
= xfs_bmbt_lookup_eq(cur
, &prev
, &i
);
5866 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
5867 xfs_btree_mark_sick(cur
);
5868 return -EFSCORRUPTED
;
5871 error
= xfs_bmbt_update(cur
, got
);
5875 *logflags
|= XFS_ILOG_DEXT
;
5878 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), icur
,
5881 /* update reverse mapping */
5882 xfs_rmap_unmap_extent(tp
, ip
, whichfork
, &prev
);
5883 xfs_rmap_map_extent(tp
, ip
, whichfork
, got
);
5888 xfs_bmap_collapse_extents(
5889 struct xfs_trans
*tp
,
5890 struct xfs_inode
*ip
,
5891 xfs_fileoff_t
*next_fsb
,
5892 xfs_fileoff_t offset_shift_fsb
,
5895 int whichfork
= XFS_DATA_FORK
;
5896 struct xfs_mount
*mp
= ip
->i_mount
;
5897 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
5898 struct xfs_btree_cur
*cur
= NULL
;
5899 struct xfs_bmbt_irec got
, prev
;
5900 struct xfs_iext_cursor icur
;
5901 xfs_fileoff_t new_startoff
;
5905 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
5906 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
5907 xfs_bmap_mark_sick(ip
, whichfork
);
5908 return -EFSCORRUPTED
;
5911 if (xfs_is_shutdown(mp
))
5914 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
5916 error
= xfs_iread_extents(tp
, ip
, whichfork
);
5920 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
5921 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5923 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
5927 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
5928 xfs_bmap_mark_sick(ip
, whichfork
);
5929 error
= -EFSCORRUPTED
;
5933 new_startoff
= got
.br_startoff
- offset_shift_fsb
;
5934 if (xfs_iext_peek_prev_extent(ifp
, &icur
, &prev
)) {
5935 if (new_startoff
< prev
.br_startoff
+ prev
.br_blockcount
) {
5940 if (xfs_bmse_can_merge(&prev
, &got
, offset_shift_fsb
)) {
5941 error
= xfs_bmse_merge(tp
, ip
, whichfork
,
5942 offset_shift_fsb
, &icur
, &got
, &prev
,
5949 if (got
.br_startoff
< offset_shift_fsb
) {
5955 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
5956 cur
, &logflags
, new_startoff
);
5961 if (!xfs_iext_next_extent(ifp
, &icur
, &got
)) {
5966 *next_fsb
= got
.br_startoff
;
5969 xfs_btree_del_cursor(cur
, error
);
5971 xfs_trans_log_inode(tp
, ip
, logflags
);
5975 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5977 xfs_bmap_can_insert_extents(
5978 struct xfs_inode
*ip
,
5980 xfs_fileoff_t shift
)
5982 struct xfs_bmbt_irec got
;
5986 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
);
5988 if (xfs_is_shutdown(ip
->i_mount
))
5991 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
5992 error
= xfs_bmap_last_extent(NULL
, ip
, XFS_DATA_FORK
, &got
, &is_empty
);
5993 if (!error
&& !is_empty
&& got
.br_startoff
>= off
&&
5994 ((got
.br_startoff
+ shift
) & BMBT_STARTOFF_MASK
) < got
.br_startoff
)
5996 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
6002 xfs_bmap_insert_extents(
6003 struct xfs_trans
*tp
,
6004 struct xfs_inode
*ip
,
6005 xfs_fileoff_t
*next_fsb
,
6006 xfs_fileoff_t offset_shift_fsb
,
6008 xfs_fileoff_t stop_fsb
)
6010 int whichfork
= XFS_DATA_FORK
;
6011 struct xfs_mount
*mp
= ip
->i_mount
;
6012 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
6013 struct xfs_btree_cur
*cur
= NULL
;
6014 struct xfs_bmbt_irec got
, next
;
6015 struct xfs_iext_cursor icur
;
6016 xfs_fileoff_t new_startoff
;
6020 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
6021 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
6022 xfs_bmap_mark_sick(ip
, whichfork
);
6023 return -EFSCORRUPTED
;
6026 if (xfs_is_shutdown(mp
))
6029 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_ILOCK_EXCL
);
6031 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6035 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
)
6036 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6038 if (*next_fsb
== NULLFSBLOCK
) {
6039 xfs_iext_last(ifp
, &icur
);
6040 if (!xfs_iext_get_extent(ifp
, &icur
, &got
) ||
6041 stop_fsb
> got
.br_startoff
) {
6046 if (!xfs_iext_lookup_extent(ip
, ifp
, *next_fsb
, &icur
, &got
)) {
6051 if (XFS_IS_CORRUPT(mp
, isnullstartblock(got
.br_startblock
))) {
6052 xfs_bmap_mark_sick(ip
, whichfork
);
6053 error
= -EFSCORRUPTED
;
6057 if (XFS_IS_CORRUPT(mp
, stop_fsb
> got
.br_startoff
)) {
6058 xfs_bmap_mark_sick(ip
, whichfork
);
6059 error
= -EFSCORRUPTED
;
6063 new_startoff
= got
.br_startoff
+ offset_shift_fsb
;
6064 if (xfs_iext_peek_next_extent(ifp
, &icur
, &next
)) {
6065 if (new_startoff
+ got
.br_blockcount
> next
.br_startoff
) {
6071 * Unlike a left shift (which involves a hole punch), a right
6072 * shift does not modify extent neighbors in any way. We should
6073 * never find mergeable extents in this scenario. Check anyways
6074 * and warn if we encounter two extents that could be one.
6076 if (xfs_bmse_can_merge(&got
, &next
, offset_shift_fsb
))
6080 error
= xfs_bmap_shift_update_extent(tp
, ip
, whichfork
, &icur
, &got
,
6081 cur
, &logflags
, new_startoff
);
6085 if (!xfs_iext_prev_extent(ifp
, &icur
, &got
) ||
6086 stop_fsb
>= got
.br_startoff
+ got
.br_blockcount
) {
6091 *next_fsb
= got
.br_startoff
;
6094 xfs_btree_del_cursor(cur
, error
);
6096 xfs_trans_log_inode(tp
, ip
, logflags
);
6101 * Splits an extent into two extents at split_fsb block such that it is the
6102 * first block of the current_ext. @ext is a target extent to be split.
6103 * @split_fsb is a block where the extents is split. If split_fsb lies in a
6104 * hole or the first block of extents, just return 0.
6107 xfs_bmap_split_extent(
6108 struct xfs_trans
*tp
,
6109 struct xfs_inode
*ip
,
6110 xfs_fileoff_t split_fsb
)
6112 int whichfork
= XFS_DATA_FORK
;
6113 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
6114 struct xfs_btree_cur
*cur
= NULL
;
6115 struct xfs_bmbt_irec got
;
6116 struct xfs_bmbt_irec
new; /* split extent */
6117 struct xfs_mount
*mp
= ip
->i_mount
;
6118 xfs_fsblock_t gotblkcnt
; /* new block count for got */
6119 struct xfs_iext_cursor icur
;
6124 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(ifp
)) ||
6125 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
6126 xfs_bmap_mark_sick(ip
, whichfork
);
6127 return -EFSCORRUPTED
;
6130 if (xfs_is_shutdown(mp
))
6133 /* Read in all the extents */
6134 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6139 * If there are not extents, or split_fsb lies in a hole we are done.
6141 if (!xfs_iext_lookup_extent(ip
, ifp
, split_fsb
, &icur
, &got
) ||
6142 got
.br_startoff
>= split_fsb
)
6145 gotblkcnt
= split_fsb
- got
.br_startoff
;
6146 new.br_startoff
= split_fsb
;
6147 new.br_startblock
= got
.br_startblock
+ gotblkcnt
;
6148 new.br_blockcount
= got
.br_blockcount
- gotblkcnt
;
6149 new.br_state
= got
.br_state
;
6151 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
6152 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6153 error
= xfs_bmbt_lookup_eq(cur
, &got
, &i
);
6156 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
6157 xfs_btree_mark_sick(cur
);
6158 error
= -EFSCORRUPTED
;
6163 got
.br_blockcount
= gotblkcnt
;
6164 xfs_iext_update_extent(ip
, xfs_bmap_fork_to_state(whichfork
), &icur
,
6167 logflags
= XFS_ILOG_CORE
;
6169 error
= xfs_bmbt_update(cur
, &got
);
6173 logflags
|= XFS_ILOG_DEXT
;
6175 /* Add new extent */
6176 xfs_iext_next(ifp
, &icur
);
6177 xfs_iext_insert(ip
, &icur
, &new, 0);
6181 error
= xfs_bmbt_lookup_eq(cur
, &new, &i
);
6184 if (XFS_IS_CORRUPT(mp
, i
!= 0)) {
6185 xfs_btree_mark_sick(cur
);
6186 error
= -EFSCORRUPTED
;
6189 error
= xfs_btree_insert(cur
, &i
);
6192 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
6193 xfs_btree_mark_sick(cur
);
6194 error
= -EFSCORRUPTED
;
6200 * Convert to a btree if necessary.
6202 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
6203 int tmp_logflags
; /* partial log flag return val */
6205 ASSERT(cur
== NULL
);
6206 error
= xfs_bmap_extents_to_btree(tp
, ip
, &cur
, 0,
6207 &tmp_logflags
, whichfork
);
6208 logflags
|= tmp_logflags
;
6213 cur
->bc_bmap
.allocated
= 0;
6214 xfs_btree_del_cursor(cur
, error
);
6218 xfs_trans_log_inode(tp
, ip
, logflags
);
6222 /* Record a bmap intent. */
6225 struct xfs_trans
*tp
,
6226 enum xfs_bmap_intent_type type
,
6227 struct xfs_inode
*ip
,
6229 struct xfs_bmbt_irec
*bmap
)
6231 struct xfs_bmap_intent
*bi
;
6233 if ((whichfork
!= XFS_DATA_FORK
&& whichfork
!= XFS_ATTR_FORK
) ||
6234 bmap
->br_startblock
== HOLESTARTBLOCK
||
6235 bmap
->br_startblock
== DELAYSTARTBLOCK
)
6238 bi
= kmem_cache_alloc(xfs_bmap_intent_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
6239 INIT_LIST_HEAD(&bi
->bi_list
);
6242 bi
->bi_whichfork
= whichfork
;
6243 bi
->bi_bmap
= *bmap
;
6245 xfs_bmap_defer_add(tp
, bi
);
6248 /* Map an extent into a file. */
6250 xfs_bmap_map_extent(
6251 struct xfs_trans
*tp
,
6252 struct xfs_inode
*ip
,
6254 struct xfs_bmbt_irec
*PREV
)
6256 __xfs_bmap_add(tp
, XFS_BMAP_MAP
, ip
, whichfork
, PREV
);
6259 /* Unmap an extent out of a file. */
6261 xfs_bmap_unmap_extent(
6262 struct xfs_trans
*tp
,
6263 struct xfs_inode
*ip
,
6265 struct xfs_bmbt_irec
*PREV
)
6267 __xfs_bmap_add(tp
, XFS_BMAP_UNMAP
, ip
, whichfork
, PREV
);
6271 * Process one of the deferred bmap operations. We pass back the
6272 * btree cursor to maintain our lock on the bmapbt between calls.
6275 xfs_bmap_finish_one(
6276 struct xfs_trans
*tp
,
6277 struct xfs_bmap_intent
*bi
)
6279 struct xfs_bmbt_irec
*bmap
= &bi
->bi_bmap
;
6283 if (bi
->bi_whichfork
== XFS_ATTR_FORK
)
6284 flags
|= XFS_BMAPI_ATTRFORK
;
6286 ASSERT(tp
->t_highest_agno
== NULLAGNUMBER
);
6288 trace_xfs_bmap_deferred(bi
);
6290 if (XFS_TEST_ERROR(false, tp
->t_mountp
, XFS_ERRTAG_BMAP_FINISH_ONE
))
6293 switch (bi
->bi_type
) {
6295 if (bi
->bi_bmap
.br_state
== XFS_EXT_UNWRITTEN
)
6296 flags
|= XFS_BMAPI_PREALLOC
;
6297 error
= xfs_bmapi_remap(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6298 bmap
->br_blockcount
, bmap
->br_startblock
,
6300 bmap
->br_blockcount
= 0;
6302 case XFS_BMAP_UNMAP
:
6303 error
= __xfs_bunmapi(tp
, bi
->bi_owner
, bmap
->br_startoff
,
6304 &bmap
->br_blockcount
, flags
| XFS_BMAPI_REMAP
,
6309 xfs_bmap_mark_sick(bi
->bi_owner
, bi
->bi_whichfork
);
6310 error
= -EFSCORRUPTED
;
6316 /* Check that an extent does not have invalid flags or bad ranges. */
6318 xfs_bmap_validate_extent_raw(
6319 struct xfs_mount
*mp
,
6322 struct xfs_bmbt_irec
*irec
)
6324 if (!xfs_verify_fileext(mp
, irec
->br_startoff
, irec
->br_blockcount
))
6325 return __this_address
;
6327 if (rtfile
&& whichfork
== XFS_DATA_FORK
) {
6328 if (!xfs_verify_rtbext(mp
, irec
->br_startblock
,
6329 irec
->br_blockcount
))
6330 return __this_address
;
6332 if (!xfs_verify_fsbext(mp
, irec
->br_startblock
,
6333 irec
->br_blockcount
))
6334 return __this_address
;
6336 if (irec
->br_state
!= XFS_EXT_NORM
&& whichfork
!= XFS_DATA_FORK
)
6337 return __this_address
;
6342 xfs_bmap_intent_init_cache(void)
6344 xfs_bmap_intent_cache
= kmem_cache_create("xfs_bmap_intent",
6345 sizeof(struct xfs_bmap_intent
),
6348 return xfs_bmap_intent_cache
!= NULL
? 0 : -ENOMEM
;
6352 xfs_bmap_intent_destroy_cache(void)
6354 kmem_cache_destroy(xfs_bmap_intent_cache
);
6355 xfs_bmap_intent_cache
= NULL
;
6358 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6360 xfs_bmap_validate_extent(
6361 struct xfs_inode
*ip
,
6363 struct xfs_bmbt_irec
*irec
)
6365 return xfs_bmap_validate_extent_raw(ip
->i_mount
,
6366 XFS_IS_REALTIME_INODE(ip
), whichfork
, irec
);
6370 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6371 * freed from a file in a single transaction.
6373 #define XFS_ITRUNC_MAX_EXTENTS 2
6376 * Unmap every extent in part of an inode's fork. We don't do any higher level
6377 * invalidation work at all.
6381 struct xfs_trans
**tpp
,
6382 struct xfs_inode
*ip
,
6384 xfs_fileoff_t startoff
,
6385 xfs_fileoff_t endoff
)
6387 xfs_filblks_t unmap_len
= endoff
- startoff
+ 1;
6390 xfs_assert_ilocked(ip
, XFS_ILOCK_EXCL
);
6392 while (unmap_len
> 0) {
6393 ASSERT((*tpp
)->t_highest_agno
== NULLAGNUMBER
);
6394 error
= __xfs_bunmapi(*tpp
, ip
, startoff
, &unmap_len
, flags
,
6395 XFS_ITRUNC_MAX_EXTENTS
);
6399 /* free the just unmapped extents */
6400 error
= xfs_defer_finish(tpp
);
6409 struct xfs_bmap_query_range
{
6410 xfs_bmap_query_range_fn fn
;
6414 /* Format btree record and pass to our callback. */
6416 xfs_bmap_query_range_helper(
6417 struct xfs_btree_cur
*cur
,
6418 const union xfs_btree_rec
*rec
,
6421 struct xfs_bmap_query_range
*query
= priv
;
6422 struct xfs_bmbt_irec irec
;
6425 xfs_bmbt_disk_get_all(&rec
->bmbt
, &irec
);
6426 fa
= xfs_bmap_validate_extent(cur
->bc_ino
.ip
, cur
->bc_ino
.whichfork
,
6429 xfs_btree_mark_sick(cur
);
6430 return xfs_bmap_complain_bad_rec(cur
->bc_ino
.ip
,
6431 cur
->bc_ino
.whichfork
, fa
, &irec
);
6434 return query
->fn(cur
, &irec
, query
->priv
);
6437 /* Find all bmaps. */
6440 struct xfs_btree_cur
*cur
,
6441 xfs_bmap_query_range_fn fn
,
6444 struct xfs_bmap_query_range query
= {
6449 return xfs_btree_query_all(cur
, xfs_bmap_query_range_helper
, &query
);