2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "libxfs_priv.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_alloc.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_trans_space.h"
38 #include "xfs_trace.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_quota_defs.h"
42 #include "xfs_ag_resv.h"
43 #include "xfs_refcount.h"
44 #include "xfs_rmap_btree.h"
47 kmem_zone_t
*xfs_bmap_free_item_zone
;
50 * Miscellaneous helper functions
54 * Compute and fill in the value of the maximum depth of a bmap btree
55 * in this filesystem. Done once, during mount.
58 xfs_bmap_compute_maxlevels(
59 xfs_mount_t
*mp
, /* file system mount structure */
60 int whichfork
) /* data or attr fork */
62 int level
; /* btree level */
63 uint maxblocks
; /* max blocks at this level */
64 uint maxleafents
; /* max leaf entries possible */
65 int maxrootrecs
; /* max records in root block */
66 int minleafrecs
; /* min records in leaf block */
67 int minnoderecs
; /* min records in node block */
68 int sz
; /* root block size */
71 * The maximum number of extents in a file, hence the maximum
72 * number of leaf entries, is controlled by the type of di_nextents
73 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
74 * (a signed 16-bit number, xfs_aextnum_t).
76 * Note that we can no longer assume that if we are in ATTR1 that
77 * the fork offset of all the inodes will be
78 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
79 * with ATTR2 and then mounted back with ATTR1, keeping the
80 * di_forkoff's fixed but probably at various positions. Therefore,
81 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
82 * of a minimum size available.
84 if (whichfork
== XFS_DATA_FORK
) {
85 maxleafents
= MAXEXTNUM
;
86 sz
= XFS_BMDR_SPACE_CALC(MINDBTPTRS
);
88 maxleafents
= MAXAEXTNUM
;
89 sz
= XFS_BMDR_SPACE_CALC(MINABTPTRS
);
91 maxrootrecs
= xfs_bmdr_maxrecs(sz
, 0);
92 minleafrecs
= mp
->m_bmap_dmnr
[0];
93 minnoderecs
= mp
->m_bmap_dmnr
[1];
94 maxblocks
= (maxleafents
+ minleafrecs
- 1) / minleafrecs
;
95 for (level
= 1; maxblocks
> 1; level
++) {
96 if (maxblocks
<= maxrootrecs
)
99 maxblocks
= (maxblocks
+ minnoderecs
- 1) / minnoderecs
;
101 mp
->m_bm_maxlevels
[whichfork
] = level
;
104 STATIC
int /* error */
106 struct xfs_btree_cur
*cur
,
110 int *stat
) /* success/failure */
112 cur
->bc_rec
.b
.br_startoff
= off
;
113 cur
->bc_rec
.b
.br_startblock
= bno
;
114 cur
->bc_rec
.b
.br_blockcount
= len
;
115 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
118 STATIC
int /* error */
120 struct xfs_btree_cur
*cur
,
124 int *stat
) /* success/failure */
126 cur
->bc_rec
.b
.br_startoff
= off
;
127 cur
->bc_rec
.b
.br_startblock
= bno
;
128 cur
->bc_rec
.b
.br_blockcount
= len
;
129 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
133 * Check if the inode needs to be converted to btree format.
135 static inline bool xfs_bmap_needs_btree(struct xfs_inode
*ip
, int whichfork
)
137 return whichfork
!= XFS_COW_FORK
&&
138 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
&&
139 XFS_IFORK_NEXTENTS(ip
, whichfork
) >
140 XFS_IFORK_MAXEXT(ip
, whichfork
);
144 * Check if the inode should be converted to extent format.
146 static inline bool xfs_bmap_wants_extents(struct xfs_inode
*ip
, int whichfork
)
148 return whichfork
!= XFS_COW_FORK
&&
149 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
&&
150 XFS_IFORK_NEXTENTS(ip
, whichfork
) <=
151 XFS_IFORK_MAXEXT(ip
, whichfork
);
155 * Update the record referred to by cur to the value given
156 * by [off, bno, len, state].
157 * This either works (return 0) or gets an EFSCORRUPTED error.
161 struct xfs_btree_cur
*cur
,
167 union xfs_btree_rec rec
;
169 xfs_bmbt_disk_set_allf(&rec
.bmbt
, off
, bno
, len
, state
);
170 return xfs_btree_update(cur
, &rec
);
174 * Compute the worst-case number of indirect blocks that will be used
175 * for ip's delayed extent of length "len".
178 xfs_bmap_worst_indlen(
179 xfs_inode_t
*ip
, /* incore inode pointer */
180 xfs_filblks_t len
) /* delayed extent length */
182 int level
; /* btree level number */
183 int maxrecs
; /* maximum record count at this level */
184 xfs_mount_t
*mp
; /* mount structure */
185 xfs_filblks_t rval
; /* return value */
186 xfs_filblks_t orig_len
;
190 /* Calculate the worst-case size of the bmbt. */
192 maxrecs
= mp
->m_bmap_dmxr
[0];
193 for (level
= 0, rval
= 0;
194 level
< XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
);
197 do_div(len
, maxrecs
);
200 rval
+= XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
) -
205 maxrecs
= mp
->m_bmap_dmxr
[1];
208 /* Calculate the worst-case size of the rmapbt. */
209 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
))
210 rval
+= 1 + xfs_rmapbt_calc_size(mp
, orig_len
) +
211 mp
->m_rmap_maxlevels
;
217 * Calculate the default attribute fork offset for newly created inodes.
220 xfs_default_attroffset(
221 struct xfs_inode
*ip
)
223 struct xfs_mount
*mp
= ip
->i_mount
;
226 if (mp
->m_sb
.sb_inodesize
== 256) {
227 offset
= XFS_LITINO(mp
, ip
->i_d
.di_version
) -
228 XFS_BMDR_SPACE_CALC(MINABTPTRS
);
230 offset
= XFS_BMDR_SPACE_CALC(6 * MINABTPTRS
);
233 ASSERT(offset
< XFS_LITINO(mp
, ip
->i_d
.di_version
));
238 * Helper routine to reset inode di_forkoff field when switching
239 * attribute fork from local to extent format - we reset it where
240 * possible to make space available for inline data fork extents.
243 xfs_bmap_forkoff_reset(
247 if (whichfork
== XFS_ATTR_FORK
&&
248 ip
->i_d
.di_format
!= XFS_DINODE_FMT_DEV
&&
249 ip
->i_d
.di_format
!= XFS_DINODE_FMT_UUID
&&
250 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) {
251 uint dfl_forkoff
= xfs_default_attroffset(ip
) >> 3;
253 if (dfl_forkoff
> ip
->i_d
.di_forkoff
)
254 ip
->i_d
.di_forkoff
= dfl_forkoff
;
259 STATIC
struct xfs_buf
*
261 struct xfs_btree_cur
*cur
,
264 struct xfs_log_item_desc
*lidp
;
270 for (i
= 0; i
< XFS_BTREE_MAXLEVELS
; i
++) {
271 if (!cur
->bc_bufs
[i
])
273 if (XFS_BUF_ADDR(cur
->bc_bufs
[i
]) == bno
)
274 return cur
->bc_bufs
[i
];
277 /* Chase down all the log items to see if the bp is there */
278 list_for_each_entry(lidp
, &cur
->bc_tp
->t_items
, lid_trans
) {
279 struct xfs_buf_log_item
*bip
;
280 bip
= (struct xfs_buf_log_item
*)lidp
->lid_item
;
281 if (bip
->bli_item
.li_type
== XFS_LI_BUF
&&
282 XFS_BUF_ADDR(bip
->bli_buf
) == bno
)
291 struct xfs_btree_block
*block
,
297 __be64
*pp
, *thispa
; /* pointer to block address */
298 xfs_bmbt_key_t
*prevp
, *keyp
;
300 ASSERT(be16_to_cpu(block
->bb_level
) > 0);
303 for( i
= 1; i
<= xfs_btree_get_numrecs(block
); i
++) {
304 dmxr
= mp
->m_bmap_dmxr
[0];
305 keyp
= XFS_BMBT_KEY_ADDR(mp
, block
, i
);
308 ASSERT(be64_to_cpu(prevp
->br_startoff
) <
309 be64_to_cpu(keyp
->br_startoff
));
314 * Compare the block numbers to see if there are dups.
317 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, i
, sz
);
319 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, i
, dmxr
);
321 for (j
= i
+1; j
<= be16_to_cpu(block
->bb_numrecs
); j
++) {
323 thispa
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, j
, sz
);
325 thispa
= XFS_BMBT_PTR_ADDR(mp
, block
, j
, dmxr
);
326 if (*thispa
== *pp
) {
327 xfs_warn(mp
, "%s: thispa(%d) == pp(%d) %Ld",
329 (unsigned long long)be64_to_cpu(*thispa
));
330 panic("%s: ptrs are equal in node\n",
338 * Check that the extents for the inode ip are in the right order in all
339 * btree leaves. THis becomes prohibitively expensive for large extent count
340 * files, so don't bother with inodes that have more than 10,000 extents in
341 * them. The btree record ordering checks will still be done, so for such large
342 * bmapbt constructs that is going to catch most corruptions.
345 xfs_bmap_check_leaf_extents(
346 xfs_btree_cur_t
*cur
, /* btree cursor or null */
347 xfs_inode_t
*ip
, /* incore inode pointer */
348 int whichfork
) /* data or attr fork */
350 struct xfs_btree_block
*block
; /* current btree block */
351 xfs_fsblock_t bno
; /* block # of "block" */
352 xfs_buf_t
*bp
; /* buffer for "block" */
353 int error
; /* error return value */
354 xfs_extnum_t i
=0, j
; /* index into the extents list */
355 xfs_ifork_t
*ifp
; /* fork structure */
356 int level
; /* btree level, for checking */
357 xfs_mount_t
*mp
; /* file system mount structure */
358 __be64
*pp
; /* pointer to block address */
359 xfs_bmbt_rec_t
*ep
; /* pointer to current extent */
360 xfs_bmbt_rec_t last
= {0, 0}; /* last extent in prev block */
361 xfs_bmbt_rec_t
*nextp
; /* pointer to next extent */
364 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
) {
368 /* skip large extent count inodes */
369 if (ip
->i_d
.di_nextents
> 10000)
374 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
375 block
= ifp
->if_broot
;
377 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
379 level
= be16_to_cpu(block
->bb_level
);
381 xfs_check_block(block
, mp
, 1, ifp
->if_broot_bytes
);
382 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
383 bno
= be64_to_cpu(*pp
);
385 ASSERT(bno
!= NULLFSBLOCK
);
386 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
387 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
390 * Go down the tree until leaf level is reached, following the first
391 * pointer (leftmost) at each level.
393 while (level
-- > 0) {
394 /* See if buf is in cur first */
396 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
399 error
= xfs_btree_read_bufl(mp
, NULL
, bno
, 0, &bp
,
405 block
= XFS_BUF_TO_BLOCK(bp
);
410 * Check this block for basic sanity (increasing keys and
411 * no duplicate blocks).
414 xfs_check_block(block
, mp
, 0, 0);
415 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
416 bno
= be64_to_cpu(*pp
);
417 XFS_WANT_CORRUPTED_GOTO(mp
,
418 XFS_FSB_SANITY_CHECK(mp
, bno
), error0
);
421 xfs_trans_brelse(NULL
, bp
);
426 * Here with bp and block set to the leftmost leaf node in the tree.
431 * Loop over all leaf nodes checking that all extents are in the right order.
434 xfs_fsblock_t nextbno
;
435 xfs_extnum_t num_recs
;
438 num_recs
= xfs_btree_get_numrecs(block
);
441 * Read-ahead the next leaf block, if any.
444 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
447 * Check all the extents to make sure they are OK.
448 * If we had a previous block, the last entry should
449 * conform with the first entry in this one.
452 ep
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
454 ASSERT(xfs_bmbt_disk_get_startoff(&last
) +
455 xfs_bmbt_disk_get_blockcount(&last
) <=
456 xfs_bmbt_disk_get_startoff(ep
));
458 for (j
= 1; j
< num_recs
; j
++) {
459 nextp
= XFS_BMBT_REC_ADDR(mp
, block
, j
+ 1);
460 ASSERT(xfs_bmbt_disk_get_startoff(ep
) +
461 xfs_bmbt_disk_get_blockcount(ep
) <=
462 xfs_bmbt_disk_get_startoff(nextp
));
470 xfs_trans_brelse(NULL
, bp
);
474 * If we've reached the end, stop.
476 if (bno
== NULLFSBLOCK
)
480 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
483 error
= xfs_btree_read_bufl(mp
, NULL
, bno
, 0, &bp
,
489 block
= XFS_BUF_TO_BLOCK(bp
);
495 xfs_warn(mp
, "%s: at error0", __func__
);
497 xfs_trans_brelse(NULL
, bp
);
499 xfs_warn(mp
, "%s: BAD after btree leaves for %d extents",
501 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__
);
506 * Add bmap trace insert entries for all the contents of the extent records.
509 xfs_bmap_trace_exlist(
510 xfs_inode_t
*ip
, /* incore inode pointer */
511 xfs_extnum_t cnt
, /* count of entries in the list */
512 int whichfork
, /* data or attr or cow fork */
513 unsigned long caller_ip
)
515 xfs_extnum_t idx
; /* extent record index */
516 xfs_ifork_t
*ifp
; /* inode fork pointer */
519 if (whichfork
== XFS_ATTR_FORK
)
520 state
|= BMAP_ATTRFORK
;
521 else if (whichfork
== XFS_COW_FORK
)
522 state
|= BMAP_COWFORK
;
524 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
525 ASSERT(cnt
== xfs_iext_count(ifp
));
526 for (idx
= 0; idx
< cnt
; idx
++)
527 trace_xfs_extlist(ip
, idx
, state
, caller_ip
);
531 * Validate that the bmbt_irecs being returned from bmapi are valid
532 * given the caller's original parameters. Specifically check the
533 * ranges of the returned irecs to ensure that they only extend beyond
534 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
537 xfs_bmap_validate_ret(
541 xfs_bmbt_irec_t
*mval
,
545 int i
; /* index to map values */
547 ASSERT(ret_nmap
<= nmap
);
549 for (i
= 0; i
< ret_nmap
; i
++) {
550 ASSERT(mval
[i
].br_blockcount
> 0);
551 if (!(flags
& XFS_BMAPI_ENTIRE
)) {
552 ASSERT(mval
[i
].br_startoff
>= bno
);
553 ASSERT(mval
[i
].br_blockcount
<= len
);
554 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
<=
557 ASSERT(mval
[i
].br_startoff
< bno
+ len
);
558 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
>
562 mval
[i
- 1].br_startoff
+ mval
[i
- 1].br_blockcount
==
563 mval
[i
].br_startoff
);
564 ASSERT(mval
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
565 mval
[i
].br_startblock
!= HOLESTARTBLOCK
);
566 ASSERT(mval
[i
].br_state
== XFS_EXT_NORM
||
567 mval
[i
].br_state
== XFS_EXT_UNWRITTEN
);
572 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
573 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
577 * bmap free list manipulation functions
581 * Add the extent to the list of extents to be free at transaction end.
582 * The list is maintained sorted (by block number).
586 struct xfs_mount
*mp
,
587 struct xfs_defer_ops
*dfops
,
590 struct xfs_owner_info
*oinfo
)
592 struct xfs_extent_free_item
*new; /* new element */
597 ASSERT(bno
!= NULLFSBLOCK
);
599 ASSERT(len
<= MAXEXTLEN
);
600 ASSERT(!isnullstartblock(bno
));
601 agno
= XFS_FSB_TO_AGNO(mp
, bno
);
602 agbno
= XFS_FSB_TO_AGBNO(mp
, bno
);
603 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
604 ASSERT(agbno
< mp
->m_sb
.sb_agblocks
);
605 ASSERT(len
< mp
->m_sb
.sb_agblocks
);
606 ASSERT(agbno
+ len
<= mp
->m_sb
.sb_agblocks
);
608 ASSERT(xfs_bmap_free_item_zone
!= NULL
);
610 new = kmem_zone_alloc(xfs_bmap_free_item_zone
, KM_SLEEP
);
611 new->xefi_startblock
= bno
;
612 new->xefi_blockcount
= (xfs_extlen_t
)len
;
614 new->xefi_oinfo
= *oinfo
;
616 xfs_rmap_skip_owner_update(&new->xefi_oinfo
);
617 trace_xfs_bmap_free_defer(mp
, XFS_FSB_TO_AGNO(mp
, bno
), 0,
618 XFS_FSB_TO_AGBNO(mp
, bno
), len
);
619 xfs_defer_add(dfops
, XFS_DEFER_OPS_TYPE_FREE
, &new->xefi_list
);
623 * Inode fork format manipulation functions
627 * Transform a btree format file with only one leaf node, where the
628 * extents list will fit in the inode, into an extents format file.
629 * Since the file extents are already in-core, all we have to do is
630 * give up the space for the btree root and pitch the leaf block.
632 STATIC
int /* error */
633 xfs_bmap_btree_to_extents(
634 xfs_trans_t
*tp
, /* transaction pointer */
635 xfs_inode_t
*ip
, /* incore inode pointer */
636 xfs_btree_cur_t
*cur
, /* btree cursor */
637 int *logflagsp
, /* inode logging flags */
638 int whichfork
) /* data or attr fork */
641 struct xfs_btree_block
*cblock
;/* child btree block */
642 xfs_fsblock_t cbno
; /* child block number */
643 xfs_buf_t
*cbp
; /* child block's buffer */
644 int error
; /* error return value */
645 xfs_ifork_t
*ifp
; /* inode fork data */
646 xfs_mount_t
*mp
; /* mount point structure */
647 __be64
*pp
; /* ptr to block address */
648 struct xfs_btree_block
*rblock
;/* root btree block */
649 struct xfs_owner_info oinfo
;
652 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
653 ASSERT(whichfork
!= XFS_COW_FORK
);
654 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
655 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
);
656 rblock
= ifp
->if_broot
;
657 ASSERT(be16_to_cpu(rblock
->bb_level
) == 1);
658 ASSERT(be16_to_cpu(rblock
->bb_numrecs
) == 1);
659 ASSERT(xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0) == 1);
660 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, rblock
, 1, ifp
->if_broot_bytes
);
661 cbno
= be64_to_cpu(*pp
);
664 if ((error
= xfs_btree_check_lptr(cur
, cbno
, 1)))
667 error
= xfs_btree_read_bufl(mp
, tp
, cbno
, 0, &cbp
, XFS_BMAP_BTREE_REF
,
671 cblock
= XFS_BUF_TO_BLOCK(cbp
);
672 if ((error
= xfs_btree_check_block(cur
, cblock
, 0, cbp
)))
674 xfs_rmap_ino_bmbt_owner(&oinfo
, ip
->i_ino
, whichfork
);
675 xfs_bmap_add_free(mp
, cur
->bc_private
.b
.dfops
, cbno
, 1, &oinfo
);
676 ip
->i_d
.di_nblocks
--;
677 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
678 xfs_trans_binval(tp
, cbp
);
679 if (cur
->bc_bufs
[0] == cbp
)
680 cur
->bc_bufs
[0] = NULL
;
681 xfs_iroot_realloc(ip
, -1, whichfork
);
682 ASSERT(ifp
->if_broot
== NULL
);
683 ASSERT((ifp
->if_flags
& XFS_IFBROOT
) == 0);
684 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_EXTENTS
);
685 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
690 * Convert an extents-format file into a btree-format file.
691 * The new file will have a root block (in the inode) and a single child block.
693 STATIC
int /* error */
694 xfs_bmap_extents_to_btree(
695 xfs_trans_t
*tp
, /* transaction pointer */
696 xfs_inode_t
*ip
, /* incore inode pointer */
697 xfs_fsblock_t
*firstblock
, /* first-block-allocated */
698 struct xfs_defer_ops
*dfops
, /* blocks freed in xaction */
699 xfs_btree_cur_t
**curp
, /* cursor returned to caller */
700 int wasdel
, /* converting a delayed alloc */
701 int *logflagsp
, /* inode logging flags */
702 int whichfork
) /* data or attr fork */
704 struct xfs_btree_block
*ablock
; /* allocated (child) bt block */
705 xfs_buf_t
*abp
; /* buffer for ablock */
706 xfs_alloc_arg_t args
; /* allocation arguments */
707 xfs_bmbt_rec_t
*arp
; /* child record pointer */
708 struct xfs_btree_block
*block
; /* btree root block */
709 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
710 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
711 int error
; /* error return value */
712 xfs_extnum_t i
, cnt
; /* extent record index */
713 xfs_ifork_t
*ifp
; /* inode fork pointer */
714 xfs_bmbt_key_t
*kp
; /* root block key pointer */
715 xfs_mount_t
*mp
; /* mount structure */
716 xfs_extnum_t nextents
; /* number of file extents */
717 xfs_bmbt_ptr_t
*pp
; /* root block address pointer */
720 ASSERT(whichfork
!= XFS_COW_FORK
);
721 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
722 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
);
725 * Make space in the inode incore.
727 xfs_iroot_realloc(ip
, 1, whichfork
);
728 ifp
->if_flags
|= XFS_IFBROOT
;
733 block
= ifp
->if_broot
;
734 xfs_btree_init_block_int(mp
, block
, XFS_BUF_DADDR_NULL
,
735 XFS_BTNUM_BMAP
, 1, 1, ip
->i_ino
,
736 XFS_BTREE_LONG_PTRS
);
738 * Need a cursor. Can't allocate until bb_level is filled in.
740 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
741 cur
->bc_private
.b
.firstblock
= *firstblock
;
742 cur
->bc_private
.b
.dfops
= dfops
;
743 cur
->bc_private
.b
.flags
= wasdel
? XFS_BTCUR_BPRV_WASDEL
: 0;
745 * Convert to a btree with two levels, one record in root.
747 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_BTREE
);
748 memset(&args
, 0, sizeof(args
));
751 xfs_rmap_ino_bmbt_owner(&args
.oinfo
, ip
->i_ino
, whichfork
);
752 args
.firstblock
= *firstblock
;
753 if (*firstblock
== NULLFSBLOCK
) {
754 args
.type
= XFS_ALLOCTYPE_START_BNO
;
755 args
.fsbno
= XFS_INO_TO_FSB(mp
, ip
->i_ino
);
756 } else if (dfops
->dop_low
) {
757 args
.type
= XFS_ALLOCTYPE_START_BNO
;
758 args
.fsbno
= *firstblock
;
760 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
761 args
.fsbno
= *firstblock
;
763 args
.minlen
= args
.maxlen
= args
.prod
= 1;
764 args
.wasdel
= wasdel
;
766 if ((error
= xfs_alloc_vextent(&args
))) {
767 xfs_iroot_realloc(ip
, -1, whichfork
);
768 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
772 if (WARN_ON_ONCE(args
.fsbno
== NULLFSBLOCK
)) {
773 xfs_iroot_realloc(ip
, -1, whichfork
);
774 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
778 * Allocation can't fail, the space was reserved.
780 ASSERT(*firstblock
== NULLFSBLOCK
||
781 args
.agno
>= XFS_FSB_TO_AGNO(mp
, *firstblock
));
782 *firstblock
= cur
->bc_private
.b
.firstblock
= args
.fsbno
;
783 cur
->bc_private
.b
.allocated
++;
784 ip
->i_d
.di_nblocks
++;
785 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
786 abp
= xfs_btree_get_bufl(mp
, tp
, args
.fsbno
, 0);
788 * Fill in the child block.
790 abp
->b_ops
= &xfs_bmbt_buf_ops
;
791 ablock
= XFS_BUF_TO_BLOCK(abp
);
792 xfs_btree_init_block_int(mp
, ablock
, abp
->b_bn
,
793 XFS_BTNUM_BMAP
, 0, 0, ip
->i_ino
,
794 XFS_BTREE_LONG_PTRS
);
796 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1);
797 nextents
= xfs_iext_count(ifp
);
798 for (cnt
= i
= 0; i
< nextents
; i
++) {
799 ep
= xfs_iext_get_ext(ifp
, i
);
800 if (!isnullstartblock(xfs_bmbt_get_startblock(ep
))) {
801 arp
->l0
= cpu_to_be64(ep
->l0
);
802 arp
->l1
= cpu_to_be64(ep
->l1
);
806 ASSERT(cnt
== XFS_IFORK_NEXTENTS(ip
, whichfork
));
807 xfs_btree_set_numrecs(ablock
, cnt
);
810 * Fill in the root key and pointer.
812 kp
= XFS_BMBT_KEY_ADDR(mp
, block
, 1);
813 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1);
814 kp
->br_startoff
= cpu_to_be64(xfs_bmbt_disk_get_startoff(arp
));
815 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, xfs_bmbt_get_maxrecs(cur
,
816 be16_to_cpu(block
->bb_level
)));
817 *pp
= cpu_to_be64(args
.fsbno
);
820 * Do all this logging at the end so that
821 * the root is at the right level.
823 xfs_btree_log_block(cur
, abp
, XFS_BB_ALL_BITS
);
824 xfs_btree_log_recs(cur
, abp
, 1, be16_to_cpu(ablock
->bb_numrecs
));
825 ASSERT(*curp
== NULL
);
827 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fbroot(whichfork
);
832 * Convert a local file to an extents file.
833 * This code is out of bounds for data forks of regular files,
834 * since the file data needs to get logged so things will stay consistent.
835 * (The bmap-level manipulations are ok, though).
838 xfs_bmap_local_to_extents_empty(
839 struct xfs_inode
*ip
,
842 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
844 ASSERT(whichfork
!= XFS_COW_FORK
);
845 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
846 ASSERT(ifp
->if_bytes
== 0);
847 ASSERT(XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0);
849 xfs_bmap_forkoff_reset(ip
, whichfork
);
850 ifp
->if_flags
&= ~XFS_IFINLINE
;
851 ifp
->if_flags
|= XFS_IFEXTENTS
;
852 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_EXTENTS
);
856 STATIC
int /* error */
857 xfs_bmap_local_to_extents(
858 xfs_trans_t
*tp
, /* transaction pointer */
859 xfs_inode_t
*ip
, /* incore inode pointer */
860 xfs_fsblock_t
*firstblock
, /* first block allocated in xaction */
861 xfs_extlen_t total
, /* total blocks needed by transaction */
862 int *logflagsp
, /* inode logging flags */
864 void (*init_fn
)(struct xfs_trans
*tp
,
866 struct xfs_inode
*ip
,
867 struct xfs_ifork
*ifp
))
870 int flags
; /* logging flags returned */
871 xfs_ifork_t
*ifp
; /* inode fork pointer */
872 xfs_alloc_arg_t args
; /* allocation arguments */
873 xfs_buf_t
*bp
; /* buffer for extent block */
874 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
877 * We don't want to deal with the case of keeping inode data inline yet.
878 * So sending the data fork of a regular inode is invalid.
880 ASSERT(!(S_ISREG(VFS_I(ip
)->i_mode
) && whichfork
== XFS_DATA_FORK
));
881 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
882 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
884 if (!ifp
->if_bytes
) {
885 xfs_bmap_local_to_extents_empty(ip
, whichfork
);
886 flags
= XFS_ILOG_CORE
;
892 ASSERT((ifp
->if_flags
& (XFS_IFINLINE
|XFS_IFEXTENTS
|XFS_IFEXTIREC
)) ==
894 memset(&args
, 0, sizeof(args
));
896 args
.mp
= ip
->i_mount
;
897 xfs_rmap_ino_owner(&args
.oinfo
, ip
->i_ino
, whichfork
, 0);
898 args
.firstblock
= *firstblock
;
900 * Allocate a block. We know we need only one, since the
901 * file currently fits in an inode.
903 if (*firstblock
== NULLFSBLOCK
) {
904 args
.fsbno
= XFS_INO_TO_FSB(args
.mp
, ip
->i_ino
);
905 args
.type
= XFS_ALLOCTYPE_START_BNO
;
907 args
.fsbno
= *firstblock
;
908 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
911 args
.minlen
= args
.maxlen
= args
.prod
= 1;
912 error
= xfs_alloc_vextent(&args
);
916 /* Can't fail, the space was reserved. */
917 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
918 ASSERT(args
.len
== 1);
919 *firstblock
= args
.fsbno
;
920 bp
= xfs_btree_get_bufl(args
.mp
, tp
, args
.fsbno
, 0);
923 * Initialize the block, copy the data and log the remote buffer.
925 * The callout is responsible for logging because the remote format
926 * might differ from the local format and thus we don't know how much to
927 * log here. Note that init_fn must also set the buffer log item type
930 init_fn(tp
, bp
, ip
, ifp
);
932 /* account for the change in fork size */
933 xfs_idata_realloc(ip
, -ifp
->if_bytes
, whichfork
);
934 xfs_bmap_local_to_extents_empty(ip
, whichfork
);
935 flags
|= XFS_ILOG_CORE
;
937 xfs_iext_add(ifp
, 0, 1);
938 ep
= xfs_iext_get_ext(ifp
, 0);
939 xfs_bmbt_set_allf(ep
, 0, args
.fsbno
, 1, XFS_EXT_NORM
);
940 trace_xfs_bmap_post_update(ip
, 0,
941 whichfork
== XFS_ATTR_FORK
? BMAP_ATTRFORK
: 0,
943 XFS_IFORK_NEXT_SET(ip
, whichfork
, 1);
944 ip
->i_d
.di_nblocks
= 1;
945 xfs_trans_mod_dquot_byino(tp
, ip
,
946 XFS_TRANS_DQ_BCOUNT
, 1L);
947 flags
|= xfs_ilog_fext(whichfork
);
955 * Called from xfs_bmap_add_attrfork to handle btree format files.
957 STATIC
int /* error */
958 xfs_bmap_add_attrfork_btree(
959 xfs_trans_t
*tp
, /* transaction pointer */
960 xfs_inode_t
*ip
, /* incore inode pointer */
961 xfs_fsblock_t
*firstblock
, /* first block allocated */
962 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
963 int *flags
) /* inode logging flags */
965 xfs_btree_cur_t
*cur
; /* btree cursor */
966 int error
; /* error return value */
967 xfs_mount_t
*mp
; /* file system mount struct */
968 int stat
; /* newroot status */
971 if (ip
->i_df
.if_broot_bytes
<= XFS_IFORK_DSIZE(ip
))
972 *flags
|= XFS_ILOG_DBROOT
;
974 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
975 cur
->bc_private
.b
.dfops
= dfops
;
976 cur
->bc_private
.b
.firstblock
= *firstblock
;
977 if ((error
= xfs_bmbt_lookup_ge(cur
, 0, 0, 0, &stat
)))
979 /* must be at least one entry */
980 XFS_WANT_CORRUPTED_GOTO(mp
, stat
== 1, error0
);
981 if ((error
= xfs_btree_new_iroot(cur
, flags
, &stat
)))
984 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
987 *firstblock
= cur
->bc_private
.b
.firstblock
;
988 cur
->bc_private
.b
.allocated
= 0;
989 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
993 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
998 * Called from xfs_bmap_add_attrfork to handle extents format files.
1000 STATIC
int /* error */
1001 xfs_bmap_add_attrfork_extents(
1002 xfs_trans_t
*tp
, /* transaction pointer */
1003 xfs_inode_t
*ip
, /* incore inode pointer */
1004 xfs_fsblock_t
*firstblock
, /* first block allocated */
1005 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
1006 int *flags
) /* inode logging flags */
1008 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
1009 int error
; /* error return value */
1011 if (ip
->i_d
.di_nextents
* sizeof(xfs_bmbt_rec_t
) <= XFS_IFORK_DSIZE(ip
))
1014 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstblock
, dfops
, &cur
, 0,
1015 flags
, XFS_DATA_FORK
);
1017 cur
->bc_private
.b
.allocated
= 0;
1018 xfs_btree_del_cursor(cur
,
1019 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
1025 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1026 * different data fork content type needs a different callout to do the
1027 * conversion. Some are basic and only require special block initialisation
1028 * callouts for the data formating, others (directories) are so specialised they
1029 * handle everything themselves.
1031 * XXX (dgc): investigate whether directory conversion can use the generic
1032 * formatting callout. It should be possible - it's just a very complex
1035 STATIC
int /* error */
1036 xfs_bmap_add_attrfork_local(
1037 xfs_trans_t
*tp
, /* transaction pointer */
1038 xfs_inode_t
*ip
, /* incore inode pointer */
1039 xfs_fsblock_t
*firstblock
, /* first block allocated */
1040 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
1041 int *flags
) /* inode logging flags */
1043 xfs_da_args_t dargs
; /* args for dir/attr code */
1045 if (ip
->i_df
.if_bytes
<= XFS_IFORK_DSIZE(ip
))
1048 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
1049 memset(&dargs
, 0, sizeof(dargs
));
1050 dargs
.geo
= ip
->i_mount
->m_dir_geo
;
1052 dargs
.firstblock
= firstblock
;
1053 dargs
.dfops
= dfops
;
1054 dargs
.total
= dargs
.geo
->fsbcount
;
1055 dargs
.whichfork
= XFS_DATA_FORK
;
1057 return xfs_dir2_sf_to_block(&dargs
);
1060 if (S_ISLNK(VFS_I(ip
)->i_mode
))
1061 return xfs_bmap_local_to_extents(tp
, ip
, firstblock
, 1,
1062 flags
, XFS_DATA_FORK
,
1063 xfs_symlink_local_to_remote
);
1065 /* should only be called for types that support local format data */
1067 return -EFSCORRUPTED
;
1071 * Convert inode from non-attributed to attributed.
1072 * Must not be in a transaction, ip must not be locked.
1074 int /* error code */
1075 xfs_bmap_add_attrfork(
1076 xfs_inode_t
*ip
, /* incore inode pointer */
1077 int size
, /* space new attribute needs */
1078 int rsvd
) /* xact may use reserved blks */
1080 xfs_fsblock_t firstblock
; /* 1st block/ag allocated */
1081 struct xfs_defer_ops dfops
; /* freed extent records */
1082 xfs_mount_t
*mp
; /* mount structure */
1083 xfs_trans_t
*tp
; /* transaction pointer */
1084 int blks
; /* space reservation */
1085 int version
= 1; /* superblock attr version */
1086 int logflags
; /* logging flags */
1087 int error
; /* error return value */
1089 ASSERT(XFS_IFORK_Q(ip
) == 0);
1092 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1094 blks
= XFS_ADDAFORK_SPACE_RES(mp
);
1096 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_addafork
, blks
, 0,
1097 rsvd
? XFS_TRANS_RESERVE
: 0, &tp
);
1101 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1102 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, blks
, 0, rsvd
?
1103 XFS_QMOPT_RES_REGBLKS
| XFS_QMOPT_FORCE_RES
:
1104 XFS_QMOPT_RES_REGBLKS
);
1107 if (XFS_IFORK_Q(ip
))
1109 if (ip
->i_d
.di_anextents
!= 0) {
1110 error
= -EFSCORRUPTED
;
1113 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
) {
1115 * For inodes coming from pre-6.2 filesystems.
1117 ASSERT(ip
->i_d
.di_aformat
== 0);
1118 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1121 xfs_trans_ijoin(tp
, ip
, 0);
1122 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1124 switch (ip
->i_d
.di_format
) {
1125 case XFS_DINODE_FMT_DEV
:
1126 ip
->i_d
.di_forkoff
= roundup(sizeof(xfs_dev_t
), 8) >> 3;
1128 case XFS_DINODE_FMT_UUID
:
1129 ip
->i_d
.di_forkoff
= roundup(sizeof(uuid_t
), 8) >> 3;
1131 case XFS_DINODE_FMT_LOCAL
:
1132 case XFS_DINODE_FMT_EXTENTS
:
1133 case XFS_DINODE_FMT_BTREE
:
1134 ip
->i_d
.di_forkoff
= xfs_attr_shortform_bytesfit(ip
, size
);
1135 if (!ip
->i_d
.di_forkoff
)
1136 ip
->i_d
.di_forkoff
= xfs_default_attroffset(ip
) >> 3;
1137 else if (mp
->m_flags
& XFS_MOUNT_ATTR2
)
1146 ASSERT(ip
->i_afp
== NULL
);
1147 ip
->i_afp
= kmem_zone_zalloc(xfs_ifork_zone
, KM_SLEEP
);
1148 ip
->i_afp
->if_flags
= XFS_IFEXTENTS
;
1150 xfs_defer_init(&dfops
, &firstblock
);
1151 switch (ip
->i_d
.di_format
) {
1152 case XFS_DINODE_FMT_LOCAL
:
1153 error
= xfs_bmap_add_attrfork_local(tp
, ip
, &firstblock
, &dfops
,
1156 case XFS_DINODE_FMT_EXTENTS
:
1157 error
= xfs_bmap_add_attrfork_extents(tp
, ip
, &firstblock
,
1160 case XFS_DINODE_FMT_BTREE
:
1161 error
= xfs_bmap_add_attrfork_btree(tp
, ip
, &firstblock
, &dfops
,
1169 xfs_trans_log_inode(tp
, ip
, logflags
);
1172 if (!xfs_sb_version_hasattr(&mp
->m_sb
) ||
1173 (!xfs_sb_version_hasattr2(&mp
->m_sb
) && version
== 2)) {
1174 bool log_sb
= false;
1176 spin_lock(&mp
->m_sb_lock
);
1177 if (!xfs_sb_version_hasattr(&mp
->m_sb
)) {
1178 xfs_sb_version_addattr(&mp
->m_sb
);
1181 if (!xfs_sb_version_hasattr2(&mp
->m_sb
) && version
== 2) {
1182 xfs_sb_version_addattr2(&mp
->m_sb
);
1185 spin_unlock(&mp
->m_sb_lock
);
1190 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
1193 error
= xfs_trans_commit(tp
);
1194 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1198 xfs_defer_cancel(&dfops
);
1200 xfs_trans_cancel(tp
);
1201 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1206 * Internal and external extent tree search functions.
1210 * Read in the extents to if_extents.
1211 * All inode fields are set up by caller, we just traverse the btree
1212 * and copy the records in. If the file system cannot contain unwritten
1213 * extents, the records are checked for no "state" flags.
1216 xfs_bmap_read_extents(
1217 xfs_trans_t
*tp
, /* transaction pointer */
1218 xfs_inode_t
*ip
, /* incore inode */
1219 int whichfork
) /* data or attr fork */
1221 struct xfs_btree_block
*block
; /* current btree block */
1222 xfs_fsblock_t bno
; /* block # of "block" */
1223 xfs_buf_t
*bp
; /* buffer for "block" */
1224 int error
; /* error return value */
1225 xfs_exntfmt_t exntf
; /* XFS_EXTFMT_NOSTATE, if checking */
1226 xfs_extnum_t i
, j
; /* index into the extents list */
1227 xfs_ifork_t
*ifp
; /* fork structure */
1228 int level
; /* btree level, for checking */
1229 xfs_mount_t
*mp
; /* file system mount structure */
1230 __be64
*pp
; /* pointer to block address */
1232 xfs_extnum_t room
; /* number of entries there's room for */
1235 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1236 exntf
= (whichfork
!= XFS_DATA_FORK
) ? XFS_EXTFMT_NOSTATE
:
1237 XFS_EXTFMT_INODE(ip
);
1238 block
= ifp
->if_broot
;
1240 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1242 level
= be16_to_cpu(block
->bb_level
);
1244 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
1245 bno
= be64_to_cpu(*pp
);
1248 * Go down the tree until leaf level is reached, following the first
1249 * pointer (leftmost) at each level.
1251 while (level
-- > 0) {
1252 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
1253 XFS_BMAP_BTREE_REF
, &xfs_bmbt_buf_ops
);
1256 block
= XFS_BUF_TO_BLOCK(bp
);
1259 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
1260 bno
= be64_to_cpu(*pp
);
1261 XFS_WANT_CORRUPTED_GOTO(mp
,
1262 XFS_FSB_SANITY_CHECK(mp
, bno
), error0
);
1263 xfs_trans_brelse(tp
, bp
);
1266 * Here with bp and block set to the leftmost leaf node in the tree.
1268 room
= xfs_iext_count(ifp
);
1271 * Loop over all leaf nodes. Copy information to the extent records.
1274 xfs_bmbt_rec_t
*frp
;
1275 xfs_fsblock_t nextbno
;
1276 xfs_extnum_t num_recs
;
1279 num_recs
= xfs_btree_get_numrecs(block
);
1280 if (unlikely(i
+ num_recs
> room
)) {
1281 ASSERT(i
+ num_recs
<= room
);
1282 xfs_warn(ip
->i_mount
,
1283 "corrupt dinode %Lu, (btree extents).",
1284 (unsigned long long) ip
->i_ino
);
1285 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1286 XFS_ERRLEVEL_LOW
, ip
->i_mount
, block
);
1290 * Read-ahead the next leaf block, if any.
1292 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
1293 if (nextbno
!= NULLFSBLOCK
)
1294 xfs_btree_reada_bufl(mp
, nextbno
, 1,
1297 * Copy records into the extent records.
1299 frp
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
1301 for (j
= 0; j
< num_recs
; j
++, i
++, frp
++) {
1302 xfs_bmbt_rec_host_t
*trp
= xfs_iext_get_ext(ifp
, i
);
1303 trp
->l0
= be64_to_cpu(frp
->l0
);
1304 trp
->l1
= be64_to_cpu(frp
->l1
);
1306 if (exntf
== XFS_EXTFMT_NOSTATE
) {
1308 * Check all attribute bmap btree records and
1309 * any "older" data bmap btree records for a
1310 * set bit in the "extent flag" position.
1312 if (unlikely(xfs_check_nostate_extents(ifp
,
1313 start
, num_recs
))) {
1314 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1320 xfs_trans_brelse(tp
, bp
);
1323 * If we've reached the end, stop.
1325 if (bno
== NULLFSBLOCK
)
1327 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
1328 XFS_BMAP_BTREE_REF
, &xfs_bmbt_buf_ops
);
1331 block
= XFS_BUF_TO_BLOCK(bp
);
1333 if (i
!= XFS_IFORK_NEXTENTS(ip
, whichfork
))
1334 return -EFSCORRUPTED
;
1335 ASSERT(i
== xfs_iext_count(ifp
));
1336 XFS_BMAP_TRACE_EXLIST(ip
, i
, whichfork
);
1339 xfs_trans_brelse(tp
, bp
);
1340 return -EFSCORRUPTED
;
1344 * Returns the file-relative block number of the first unused block(s)
1345 * in the file with at least "len" logically contiguous blocks free.
1346 * This is the lowest-address hole if the file has holes, else the first block
1347 * past the end of file.
1348 * Return 0 if the file is currently local (in-inode).
1351 xfs_bmap_first_unused(
1352 xfs_trans_t
*tp
, /* transaction pointer */
1353 xfs_inode_t
*ip
, /* incore inode */
1354 xfs_extlen_t len
, /* size of hole to find */
1355 xfs_fileoff_t
*first_unused
, /* unused block */
1356 int whichfork
) /* data or attr fork */
1358 int error
; /* error return value */
1359 int idx
; /* extent record index */
1360 xfs_ifork_t
*ifp
; /* inode fork pointer */
1361 xfs_fileoff_t lastaddr
; /* last block number seen */
1362 xfs_fileoff_t lowest
; /* lowest useful block */
1363 xfs_fileoff_t max
; /* starting useful block */
1364 xfs_fileoff_t off
; /* offset for this block */
1365 xfs_extnum_t nextents
; /* number of extent entries */
1367 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
||
1368 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
||
1369 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
1370 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
) {
1374 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1375 if (!(ifp
->if_flags
& XFS_IFEXTENTS
) &&
1376 (error
= xfs_iread_extents(tp
, ip
, whichfork
)))
1378 lowest
= *first_unused
;
1379 nextents
= xfs_iext_count(ifp
);
1380 for (idx
= 0, lastaddr
= 0, max
= lowest
; idx
< nextents
; idx
++) {
1381 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, idx
);
1382 off
= xfs_bmbt_get_startoff(ep
);
1384 * See if the hole before this extent will work.
1386 if (off
>= lowest
+ len
&& off
- max
>= len
) {
1387 *first_unused
= max
;
1390 lastaddr
= off
+ xfs_bmbt_get_blockcount(ep
);
1391 max
= XFS_FILEOFF_MAX(lastaddr
, lowest
);
1393 *first_unused
= max
;
1398 * Returns the file-relative block number of the last block - 1 before
1399 * last_block (input value) in the file.
1400 * This is not based on i_size, it is based on the extent records.
1401 * Returns 0 for local files, as they do not have extent records.
1404 xfs_bmap_last_before(
1405 struct xfs_trans
*tp
, /* transaction pointer */
1406 struct xfs_inode
*ip
, /* incore inode */
1407 xfs_fileoff_t
*last_block
, /* last block */
1408 int whichfork
) /* data or attr fork */
1410 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1411 struct xfs_bmbt_irec got
;
1415 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
1416 case XFS_DINODE_FMT_LOCAL
:
1419 case XFS_DINODE_FMT_BTREE
:
1420 case XFS_DINODE_FMT_EXTENTS
:
1426 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1427 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1432 if (xfs_iext_lookup_extent(ip
, ifp
, *last_block
- 1, &idx
, &got
)) {
1433 if (got
.br_startoff
<= *last_block
- 1)
1437 if (xfs_iext_get_extent(ifp
, idx
- 1, &got
)) {
1438 *last_block
= got
.br_startoff
+ got
.br_blockcount
;
1447 xfs_bmap_last_extent(
1448 struct xfs_trans
*tp
,
1449 struct xfs_inode
*ip
,
1451 struct xfs_bmbt_irec
*rec
,
1454 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1458 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1459 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1464 nextents
= xfs_iext_count(ifp
);
1465 if (nextents
== 0) {
1470 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, nextents
- 1), rec
);
1476 * Check the last inode extent to determine whether this allocation will result
1477 * in blocks being allocated at the end of the file. When we allocate new data
1478 * blocks at the end of the file which do not start at the previous data block,
1479 * we will try to align the new blocks at stripe unit boundaries.
1481 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1482 * at, or past the EOF.
1486 struct xfs_bmalloca
*bma
,
1489 struct xfs_bmbt_irec rec
;
1494 error
= xfs_bmap_last_extent(NULL
, bma
->ip
, whichfork
, &rec
,
1505 * Check if we are allocation or past the last extent, or at least into
1506 * the last delayed allocated extent.
1508 bma
->aeof
= bma
->offset
>= rec
.br_startoff
+ rec
.br_blockcount
||
1509 (bma
->offset
>= rec
.br_startoff
&&
1510 isnullstartblock(rec
.br_startblock
));
1515 * Returns the file-relative block number of the first block past eof in
1516 * the file. This is not based on i_size, it is based on the extent records.
1517 * Returns 0 for local files, as they do not have extent records.
1520 xfs_bmap_last_offset(
1521 struct xfs_inode
*ip
,
1522 xfs_fileoff_t
*last_block
,
1525 struct xfs_bmbt_irec rec
;
1531 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
)
1534 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
&&
1535 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
1538 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, &is_empty
);
1539 if (error
|| is_empty
)
1542 *last_block
= rec
.br_startoff
+ rec
.br_blockcount
;
1547 * Returns whether the selected fork of the inode has exactly one
1548 * block or not. For the data fork we check this matches di_size,
1549 * implying the file's range is 0..bsize-1.
1551 int /* 1=>1 block, 0=>otherwise */
1553 xfs_inode_t
*ip
, /* incore inode */
1554 int whichfork
) /* data or attr fork */
1556 xfs_bmbt_rec_host_t
*ep
; /* ptr to fork's extent */
1557 xfs_ifork_t
*ifp
; /* inode fork pointer */
1558 int rval
; /* return value */
1559 xfs_bmbt_irec_t s
; /* internal version of extent */
1562 if (whichfork
== XFS_DATA_FORK
)
1563 return XFS_ISIZE(ip
) == ip
->i_mount
->m_sb
.sb_blocksize
;
1565 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) != 1)
1567 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
1569 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1570 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
1571 ep
= xfs_iext_get_ext(ifp
, 0);
1572 xfs_bmbt_get_all(ep
, &s
);
1573 rval
= s
.br_startoff
== 0 && s
.br_blockcount
== 1;
1574 if (rval
&& whichfork
== XFS_DATA_FORK
)
1575 ASSERT(XFS_ISIZE(ip
) == ip
->i_mount
->m_sb
.sb_blocksize
);
1580 * Extent tree manipulation functions used during allocation.
1584 * Convert a delayed allocation to a real allocation.
1586 STATIC
int /* error */
1587 xfs_bmap_add_extent_delay_real(
1588 struct xfs_bmalloca
*bma
,
1591 struct xfs_bmbt_irec
*new = &bma
->got
;
1592 int diff
; /* temp value */
1593 xfs_bmbt_rec_host_t
*ep
; /* extent entry for idx */
1594 int error
; /* error return value */
1595 int i
; /* temp state */
1596 xfs_ifork_t
*ifp
; /* inode fork pointer */
1597 xfs_fileoff_t new_endoff
; /* end offset of new entry */
1598 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
1599 /* left is 0, right is 1, prev is 2 */
1600 int rval
=0; /* return value (logging flags) */
1601 int state
= 0;/* state bits, accessed thru macros */
1602 xfs_filblks_t da_new
; /* new count del alloc blocks used */
1603 xfs_filblks_t da_old
; /* old count del alloc blocks used */
1604 xfs_filblks_t temp
=0; /* value for da_new calculations */
1605 xfs_filblks_t temp2
=0;/* value for da_new calculations */
1606 int tmp_rval
; /* partial logging flags */
1607 struct xfs_mount
*mp
;
1608 xfs_extnum_t
*nextents
;
1610 mp
= bma
->ip
->i_mount
;
1611 ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
1612 ASSERT(whichfork
!= XFS_ATTR_FORK
);
1613 nextents
= (whichfork
== XFS_COW_FORK
? &bma
->ip
->i_cnextents
:
1614 &bma
->ip
->i_d
.di_nextents
);
1616 ASSERT(bma
->idx
>= 0);
1617 ASSERT(bma
->idx
<= xfs_iext_count(ifp
));
1618 ASSERT(!isnullstartblock(new->br_startblock
));
1620 (bma
->cur
->bc_private
.b
.flags
& XFS_BTCUR_BPRV_WASDEL
));
1622 XFS_STATS_INC(mp
, xs_add_exlist
);
1628 if (whichfork
== XFS_COW_FORK
)
1629 state
|= BMAP_COWFORK
;
1632 * Set up a bunch of variables to make the tests simpler.
1634 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
1635 xfs_bmbt_get_all(ep
, &PREV
);
1636 new_endoff
= new->br_startoff
+ new->br_blockcount
;
1637 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
1638 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
1640 da_old
= startblockval(PREV
.br_startblock
);
1644 * Set flags determining what part of the previous delayed allocation
1645 * extent is being replaced by a real allocation.
1647 if (PREV
.br_startoff
== new->br_startoff
)
1648 state
|= BMAP_LEFT_FILLING
;
1649 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
1650 state
|= BMAP_RIGHT_FILLING
;
1653 * Check and set flags if this segment has a left neighbor.
1654 * Don't set contiguous if the combined extent would be too large.
1657 state
|= BMAP_LEFT_VALID
;
1658 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
- 1), &LEFT
);
1660 if (isnullstartblock(LEFT
.br_startblock
))
1661 state
|= BMAP_LEFT_DELAY
;
1664 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
1665 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
1666 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
1667 LEFT
.br_state
== new->br_state
&&
1668 LEFT
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
1669 state
|= BMAP_LEFT_CONTIG
;
1672 * Check and set flags if this segment has a right neighbor.
1673 * Don't set contiguous if the combined extent would be too large.
1674 * Also check for all-three-contiguous being too large.
1676 if (bma
->idx
< xfs_iext_count(ifp
) - 1) {
1677 state
|= BMAP_RIGHT_VALID
;
1678 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
+ 1), &RIGHT
);
1680 if (isnullstartblock(RIGHT
.br_startblock
))
1681 state
|= BMAP_RIGHT_DELAY
;
1684 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
1685 new_endoff
== RIGHT
.br_startoff
&&
1686 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
1687 new->br_state
== RIGHT
.br_state
&&
1688 new->br_blockcount
+ RIGHT
.br_blockcount
<= MAXEXTLEN
&&
1689 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1690 BMAP_RIGHT_FILLING
)) !=
1691 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1692 BMAP_RIGHT_FILLING
) ||
1693 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
1695 state
|= BMAP_RIGHT_CONTIG
;
1699 * Switch out based on the FILLING and CONTIG state bits.
1701 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1702 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
1703 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1704 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1706 * Filling in all of a previously delayed allocation extent.
1707 * The left and right neighbors are both contiguous with new.
1710 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1711 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
1712 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
1713 RIGHT
.br_blockcount
);
1714 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1716 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 2, state
);
1718 if (bma
->cur
== NULL
)
1719 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1721 rval
= XFS_ILOG_CORE
;
1722 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1723 RIGHT
.br_startblock
,
1724 RIGHT
.br_blockcount
, &i
);
1727 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1728 error
= xfs_btree_delete(bma
->cur
, &i
);
1731 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1732 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
1735 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1736 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1738 LEFT
.br_blockcount
+
1739 PREV
.br_blockcount
+
1740 RIGHT
.br_blockcount
, LEFT
.br_state
);
1746 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1748 * Filling in all of a previously delayed allocation extent.
1749 * The left neighbor is contiguous, the right is not.
1753 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1754 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
1755 LEFT
.br_blockcount
+ PREV
.br_blockcount
);
1756 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1758 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 1, state
);
1759 if (bma
->cur
== NULL
)
1760 rval
= XFS_ILOG_DEXT
;
1763 error
= xfs_bmbt_lookup_eq(bma
->cur
, LEFT
.br_startoff
,
1764 LEFT
.br_startblock
, LEFT
.br_blockcount
,
1768 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1769 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1771 LEFT
.br_blockcount
+
1772 PREV
.br_blockcount
, LEFT
.br_state
);
1778 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1780 * Filling in all of a previously delayed allocation extent.
1781 * The right neighbor is contiguous, the left is not.
1783 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1784 xfs_bmbt_set_startblock(ep
, new->br_startblock
);
1785 xfs_bmbt_set_blockcount(ep
,
1786 PREV
.br_blockcount
+ RIGHT
.br_blockcount
);
1787 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1789 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 1, state
);
1790 if (bma
->cur
== NULL
)
1791 rval
= XFS_ILOG_DEXT
;
1794 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1795 RIGHT
.br_startblock
,
1796 RIGHT
.br_blockcount
, &i
);
1799 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1800 error
= xfs_bmbt_update(bma
->cur
, PREV
.br_startoff
,
1802 PREV
.br_blockcount
+
1803 RIGHT
.br_blockcount
, PREV
.br_state
);
1809 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
1811 * Filling in all of a previously delayed allocation extent.
1812 * Neither the left nor right neighbors are contiguous with
1815 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1816 xfs_bmbt_set_startblock(ep
, new->br_startblock
);
1817 xfs_bmbt_set_state(ep
, new->br_state
);
1818 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1821 if (bma
->cur
== NULL
)
1822 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1824 rval
= XFS_ILOG_CORE
;
1825 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
1826 new->br_startblock
, new->br_blockcount
,
1830 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
1831 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
1832 error
= xfs_btree_insert(bma
->cur
, &i
);
1835 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1839 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
1841 * Filling in the first part of a previous delayed allocation.
1842 * The left neighbor is contiguous.
1844 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
- 1, state
, _THIS_IP_
);
1845 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
- 1),
1846 LEFT
.br_blockcount
+ new->br_blockcount
);
1847 xfs_bmbt_set_startoff(ep
,
1848 PREV
.br_startoff
+ new->br_blockcount
);
1849 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
- 1, state
, _THIS_IP_
);
1851 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1852 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1853 xfs_bmbt_set_blockcount(ep
, temp
);
1854 if (bma
->cur
== NULL
)
1855 rval
= XFS_ILOG_DEXT
;
1858 error
= xfs_bmbt_lookup_eq(bma
->cur
, LEFT
.br_startoff
,
1859 LEFT
.br_startblock
, LEFT
.br_blockcount
,
1863 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1864 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1866 LEFT
.br_blockcount
+
1872 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1873 startblockval(PREV
.br_startblock
));
1874 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1875 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1880 case BMAP_LEFT_FILLING
:
1882 * Filling in the first part of a previous delayed allocation.
1883 * The left neighbor is not contiguous.
1885 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1886 xfs_bmbt_set_startoff(ep
, new_endoff
);
1887 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1888 xfs_bmbt_set_blockcount(ep
, temp
);
1889 xfs_iext_insert(bma
->ip
, bma
->idx
, 1, new, state
);
1891 if (bma
->cur
== NULL
)
1892 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1894 rval
= XFS_ILOG_CORE
;
1895 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
1896 new->br_startblock
, new->br_blockcount
,
1900 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
1901 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
1902 error
= xfs_btree_insert(bma
->cur
, &i
);
1905 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1908 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1909 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1910 bma
->firstblock
, bma
->dfops
,
1911 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1916 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1917 startblockval(PREV
.br_startblock
) -
1918 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
1919 ep
= xfs_iext_get_ext(ifp
, bma
->idx
+ 1);
1920 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1921 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1924 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1926 * Filling in the last part of a previous delayed allocation.
1927 * The right neighbor is contiguous with the new allocation.
1929 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1930 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1931 xfs_bmbt_set_blockcount(ep
, temp
);
1932 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, bma
->idx
+ 1),
1933 new->br_startoff
, new->br_startblock
,
1934 new->br_blockcount
+ RIGHT
.br_blockcount
,
1936 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1937 if (bma
->cur
== NULL
)
1938 rval
= XFS_ILOG_DEXT
;
1941 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1942 RIGHT
.br_startblock
,
1943 RIGHT
.br_blockcount
, &i
);
1946 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1947 error
= xfs_bmbt_update(bma
->cur
, new->br_startoff
,
1949 new->br_blockcount
+
1950 RIGHT
.br_blockcount
,
1956 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1957 startblockval(PREV
.br_startblock
));
1958 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1959 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1960 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1965 case BMAP_RIGHT_FILLING
:
1967 * Filling in the last part of a previous delayed allocation.
1968 * The right neighbor is not contiguous.
1970 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1971 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1972 xfs_bmbt_set_blockcount(ep
, temp
);
1973 xfs_iext_insert(bma
->ip
, bma
->idx
+ 1, 1, new, state
);
1975 if (bma
->cur
== NULL
)
1976 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1978 rval
= XFS_ILOG_CORE
;
1979 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
1980 new->br_startblock
, new->br_blockcount
,
1984 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
1985 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
1986 error
= xfs_btree_insert(bma
->cur
, &i
);
1989 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1992 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1993 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1994 bma
->firstblock
, bma
->dfops
, &bma
->cur
, 1,
1995 &tmp_rval
, whichfork
);
2000 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
2001 startblockval(PREV
.br_startblock
) -
2002 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
2003 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
2004 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
2005 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2012 * Filling in the middle part of a previous delayed allocation.
2013 * Contiguity is impossible here.
2014 * This case is avoided almost all the time.
2016 * We start with a delayed allocation:
2018 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2021 * and we are allocating:
2022 * +rrrrrrrrrrrrrrrrr+
2025 * and we set it up for insertion as:
2026 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2028 * PREV @ idx LEFT RIGHT
2029 * inserted at idx + 1
2031 temp
= new->br_startoff
- PREV
.br_startoff
;
2032 temp2
= PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
2033 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, 0, _THIS_IP_
);
2034 xfs_bmbt_set_blockcount(ep
, temp
); /* truncate PREV */
2036 RIGHT
.br_state
= PREV
.br_state
;
2037 RIGHT
.br_startblock
= nullstartblock(
2038 (int)xfs_bmap_worst_indlen(bma
->ip
, temp2
));
2039 RIGHT
.br_startoff
= new_endoff
;
2040 RIGHT
.br_blockcount
= temp2
;
2041 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2042 xfs_iext_insert(bma
->ip
, bma
->idx
+ 1, 2, &LEFT
, state
);
2044 if (bma
->cur
== NULL
)
2045 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2047 rval
= XFS_ILOG_CORE
;
2048 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
2049 new->br_startblock
, new->br_blockcount
,
2053 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2054 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
2055 error
= xfs_btree_insert(bma
->cur
, &i
);
2058 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2061 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
2062 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
2063 bma
->firstblock
, bma
->dfops
, &bma
->cur
,
2064 1, &tmp_rval
, whichfork
);
2069 temp
= xfs_bmap_worst_indlen(bma
->ip
, temp
);
2070 temp2
= xfs_bmap_worst_indlen(bma
->ip
, temp2
);
2071 diff
= (int)(temp
+ temp2
- startblockval(PREV
.br_startblock
) -
2072 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
2074 error
= xfs_mod_fdblocks(bma
->ip
->i_mount
,
2075 -((int64_t)diff
), false);
2081 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
2082 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
2083 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2084 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
+ 2, state
, _THIS_IP_
);
2085 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, bma
->idx
+ 2),
2086 nullstartblock((int)temp2
));
2087 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 2, state
, _THIS_IP_
);
2090 da_new
= temp
+ temp2
;
2093 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2094 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2095 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2096 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2097 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2098 case BMAP_LEFT_CONTIG
:
2099 case BMAP_RIGHT_CONTIG
:
2101 * These cases are all impossible.
2106 /* add reverse mapping */
2107 error
= xfs_rmap_map_extent(mp
, bma
->dfops
, bma
->ip
, whichfork
, new);
2111 /* convert to a btree if necessary */
2112 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
2113 int tmp_logflags
; /* partial log flag return val */
2115 ASSERT(bma
->cur
== NULL
);
2116 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
2117 bma
->firstblock
, bma
->dfops
, &bma
->cur
,
2118 da_old
> 0, &tmp_logflags
, whichfork
);
2119 bma
->logflags
|= tmp_logflags
;
2124 /* adjust for changes in reserved delayed indirect blocks */
2125 if (da_old
|| da_new
) {
2128 temp
+= bma
->cur
->bc_private
.b
.allocated
;
2129 ASSERT(temp
<= da_old
);
2131 xfs_mod_fdblocks(bma
->ip
->i_mount
,
2132 (int64_t)(da_old
- temp
), false);
2135 /* clear out the allocated field, done with it now in any case. */
2137 bma
->cur
->bc_private
.b
.allocated
= 0;
2139 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
2141 if (whichfork
!= XFS_COW_FORK
)
2142 bma
->logflags
|= rval
;
2150 * Convert an unwritten allocation to a real allocation or vice versa.
2152 STATIC
int /* error */
2153 xfs_bmap_add_extent_unwritten_real(
2154 struct xfs_trans
*tp
,
2155 xfs_inode_t
*ip
, /* incore inode pointer */
2157 xfs_extnum_t
*idx
, /* extent number to update/insert */
2158 xfs_btree_cur_t
**curp
, /* if *curp is null, not a btree */
2159 xfs_bmbt_irec_t
*new, /* new data to add to file extents */
2160 xfs_fsblock_t
*first
, /* pointer to firstblock variable */
2161 struct xfs_defer_ops
*dfops
, /* list of extents to be freed */
2162 int *logflagsp
) /* inode logging flags */
2164 xfs_btree_cur_t
*cur
; /* btree cursor */
2165 xfs_bmbt_rec_host_t
*ep
; /* extent entry for idx */
2166 int error
; /* error return value */
2167 int i
; /* temp state */
2168 xfs_ifork_t
*ifp
; /* inode fork pointer */
2169 xfs_fileoff_t new_endoff
; /* end offset of new entry */
2170 xfs_exntst_t newext
; /* new extent state */
2171 xfs_exntst_t oldext
; /* old extent state */
2172 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
2173 /* left is 0, right is 1, prev is 2 */
2174 int rval
=0; /* return value (logging flags) */
2175 int state
= 0;/* state bits, accessed thru macros */
2176 struct xfs_mount
*mp
= ip
->i_mount
;
2181 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2182 if (whichfork
== XFS_COW_FORK
)
2183 state
|= BMAP_COWFORK
;
2186 ASSERT(*idx
<= xfs_iext_count(ifp
));
2187 ASSERT(!isnullstartblock(new->br_startblock
));
2189 XFS_STATS_INC(mp
, xs_add_exlist
);
2196 * Set up a bunch of variables to make the tests simpler.
2199 ep
= xfs_iext_get_ext(ifp
, *idx
);
2200 xfs_bmbt_get_all(ep
, &PREV
);
2201 newext
= new->br_state
;
2202 oldext
= (newext
== XFS_EXT_UNWRITTEN
) ?
2203 XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
2204 ASSERT(PREV
.br_state
== oldext
);
2205 new_endoff
= new->br_startoff
+ new->br_blockcount
;
2206 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
2207 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
2210 * Set flags determining what part of the previous oldext allocation
2211 * extent is being replaced by a newext allocation.
2213 if (PREV
.br_startoff
== new->br_startoff
)
2214 state
|= BMAP_LEFT_FILLING
;
2215 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
2216 state
|= BMAP_RIGHT_FILLING
;
2219 * Check and set flags if this segment has a left neighbor.
2220 * Don't set contiguous if the combined extent would be too large.
2223 state
|= BMAP_LEFT_VALID
;
2224 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
- 1), &LEFT
);
2226 if (isnullstartblock(LEFT
.br_startblock
))
2227 state
|= BMAP_LEFT_DELAY
;
2230 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2231 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
2232 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
2233 LEFT
.br_state
== newext
&&
2234 LEFT
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2235 state
|= BMAP_LEFT_CONTIG
;
2238 * Check and set flags if this segment has a right neighbor.
2239 * Don't set contiguous if the combined extent would be too large.
2240 * Also check for all-three-contiguous being too large.
2242 if (*idx
< xfs_iext_count(ifp
) - 1) {
2243 state
|= BMAP_RIGHT_VALID
;
2244 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
+ 1), &RIGHT
);
2245 if (isnullstartblock(RIGHT
.br_startblock
))
2246 state
|= BMAP_RIGHT_DELAY
;
2249 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2250 new_endoff
== RIGHT
.br_startoff
&&
2251 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
2252 newext
== RIGHT
.br_state
&&
2253 new->br_blockcount
+ RIGHT
.br_blockcount
<= MAXEXTLEN
&&
2254 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2255 BMAP_RIGHT_FILLING
)) !=
2256 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2257 BMAP_RIGHT_FILLING
) ||
2258 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
2260 state
|= BMAP_RIGHT_CONTIG
;
2263 * Switch out based on the FILLING and CONTIG state bits.
2265 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2266 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
2267 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2268 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2270 * Setting all of a previous oldext extent to newext.
2271 * The left and right neighbors are both contiguous with new.
2275 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2276 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2277 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
2278 RIGHT
.br_blockcount
);
2279 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2281 xfs_iext_remove(ip
, *idx
+ 1, 2, state
);
2282 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2283 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 2);
2285 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2287 rval
= XFS_ILOG_CORE
;
2288 if ((error
= xfs_bmbt_lookup_eq(cur
, RIGHT
.br_startoff
,
2289 RIGHT
.br_startblock
,
2290 RIGHT
.br_blockcount
, &i
)))
2292 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2293 if ((error
= xfs_btree_delete(cur
, &i
)))
2295 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2296 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2298 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2299 if ((error
= xfs_btree_delete(cur
, &i
)))
2301 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2302 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2304 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2305 if ((error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2307 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
2308 RIGHT
.br_blockcount
, LEFT
.br_state
)))
2313 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2315 * Setting all of a previous oldext extent to newext.
2316 * The left neighbor is contiguous, the right is not.
2320 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2321 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2322 LEFT
.br_blockcount
+ PREV
.br_blockcount
);
2323 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2325 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2326 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2327 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
2329 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2331 rval
= XFS_ILOG_CORE
;
2332 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2333 PREV
.br_startblock
, PREV
.br_blockcount
,
2336 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2337 if ((error
= xfs_btree_delete(cur
, &i
)))
2339 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2340 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2342 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2343 if ((error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2345 LEFT
.br_blockcount
+ PREV
.br_blockcount
,
2351 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2353 * Setting all of a previous oldext extent to newext.
2354 * The right neighbor is contiguous, the left is not.
2356 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2357 xfs_bmbt_set_blockcount(ep
,
2358 PREV
.br_blockcount
+ RIGHT
.br_blockcount
);
2359 xfs_bmbt_set_state(ep
, newext
);
2360 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2361 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2362 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2363 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
2365 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2367 rval
= XFS_ILOG_CORE
;
2368 if ((error
= xfs_bmbt_lookup_eq(cur
, RIGHT
.br_startoff
,
2369 RIGHT
.br_startblock
,
2370 RIGHT
.br_blockcount
, &i
)))
2372 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2373 if ((error
= xfs_btree_delete(cur
, &i
)))
2375 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2376 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2378 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2379 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2381 new->br_blockcount
+ RIGHT
.br_blockcount
,
2387 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
2389 * Setting all of a previous oldext extent to newext.
2390 * Neither the left nor right neighbors are contiguous with
2393 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2394 xfs_bmbt_set_state(ep
, newext
);
2395 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2398 rval
= XFS_ILOG_DEXT
;
2401 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2402 new->br_startblock
, new->br_blockcount
,
2405 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2406 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2407 new->br_startblock
, new->br_blockcount
,
2413 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
2415 * Setting the first part of a previous oldext extent to newext.
2416 * The left neighbor is contiguous.
2418 trace_xfs_bmap_pre_update(ip
, *idx
- 1, state
, _THIS_IP_
);
2419 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
- 1),
2420 LEFT
.br_blockcount
+ new->br_blockcount
);
2421 xfs_bmbt_set_startoff(ep
,
2422 PREV
.br_startoff
+ new->br_blockcount
);
2423 trace_xfs_bmap_post_update(ip
, *idx
- 1, state
, _THIS_IP_
);
2425 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2426 xfs_bmbt_set_startblock(ep
,
2427 new->br_startblock
+ new->br_blockcount
);
2428 xfs_bmbt_set_blockcount(ep
,
2429 PREV
.br_blockcount
- new->br_blockcount
);
2430 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2435 rval
= XFS_ILOG_DEXT
;
2438 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2439 PREV
.br_startblock
, PREV
.br_blockcount
,
2442 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2443 if ((error
= xfs_bmbt_update(cur
,
2444 PREV
.br_startoff
+ new->br_blockcount
,
2445 PREV
.br_startblock
+ new->br_blockcount
,
2446 PREV
.br_blockcount
- new->br_blockcount
,
2449 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2451 error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2453 LEFT
.br_blockcount
+ new->br_blockcount
,
2460 case BMAP_LEFT_FILLING
:
2462 * Setting the first part of a previous oldext extent to newext.
2463 * The left neighbor is not contiguous.
2465 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2466 ASSERT(ep
&& xfs_bmbt_get_state(ep
) == oldext
);
2467 xfs_bmbt_set_startoff(ep
, new_endoff
);
2468 xfs_bmbt_set_blockcount(ep
,
2469 PREV
.br_blockcount
- new->br_blockcount
);
2470 xfs_bmbt_set_startblock(ep
,
2471 new->br_startblock
+ new->br_blockcount
);
2472 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2474 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2475 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2476 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
2478 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2480 rval
= XFS_ILOG_CORE
;
2481 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2482 PREV
.br_startblock
, PREV
.br_blockcount
,
2485 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2486 if ((error
= xfs_bmbt_update(cur
,
2487 PREV
.br_startoff
+ new->br_blockcount
,
2488 PREV
.br_startblock
+ new->br_blockcount
,
2489 PREV
.br_blockcount
- new->br_blockcount
,
2492 cur
->bc_rec
.b
= *new;
2493 if ((error
= xfs_btree_insert(cur
, &i
)))
2495 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2499 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2501 * Setting the last part of a previous oldext extent to newext.
2502 * The right neighbor is contiguous with the new allocation.
2504 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2505 xfs_bmbt_set_blockcount(ep
,
2506 PREV
.br_blockcount
- new->br_blockcount
);
2507 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2511 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2512 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, *idx
),
2513 new->br_startoff
, new->br_startblock
,
2514 new->br_blockcount
+ RIGHT
.br_blockcount
, newext
);
2515 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2518 rval
= XFS_ILOG_DEXT
;
2521 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2523 PREV
.br_blockcount
, &i
)))
2525 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2526 if ((error
= xfs_bmbt_update(cur
, PREV
.br_startoff
,
2528 PREV
.br_blockcount
- new->br_blockcount
,
2531 if ((error
= xfs_btree_increment(cur
, 0, &i
)))
2533 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2535 new->br_blockcount
+ RIGHT
.br_blockcount
,
2541 case BMAP_RIGHT_FILLING
:
2543 * Setting the last part of a previous oldext extent to newext.
2544 * The right neighbor is not contiguous.
2546 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2547 xfs_bmbt_set_blockcount(ep
,
2548 PREV
.br_blockcount
- new->br_blockcount
);
2549 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2552 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2554 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2555 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
2557 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2559 rval
= XFS_ILOG_CORE
;
2560 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2561 PREV
.br_startblock
, PREV
.br_blockcount
,
2564 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2565 if ((error
= xfs_bmbt_update(cur
, PREV
.br_startoff
,
2567 PREV
.br_blockcount
- new->br_blockcount
,
2570 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2571 new->br_startblock
, new->br_blockcount
,
2574 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2575 cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
2576 if ((error
= xfs_btree_insert(cur
, &i
)))
2578 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2584 * Setting the middle part of a previous oldext extent to
2585 * newext. Contiguity is impossible here.
2586 * One extent becomes three extents.
2588 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2589 xfs_bmbt_set_blockcount(ep
,
2590 new->br_startoff
- PREV
.br_startoff
);
2591 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2594 r
[1].br_startoff
= new_endoff
;
2595 r
[1].br_blockcount
=
2596 PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
2597 r
[1].br_startblock
= new->br_startblock
+ new->br_blockcount
;
2598 r
[1].br_state
= oldext
;
2601 xfs_iext_insert(ip
, *idx
, 2, &r
[0], state
);
2603 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2604 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 2);
2606 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2608 rval
= XFS_ILOG_CORE
;
2609 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2610 PREV
.br_startblock
, PREV
.br_blockcount
,
2613 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2614 /* new right extent - oldext */
2615 if ((error
= xfs_bmbt_update(cur
, r
[1].br_startoff
,
2616 r
[1].br_startblock
, r
[1].br_blockcount
,
2619 /* new left extent - oldext */
2620 cur
->bc_rec
.b
= PREV
;
2621 cur
->bc_rec
.b
.br_blockcount
=
2622 new->br_startoff
- PREV
.br_startoff
;
2623 if ((error
= xfs_btree_insert(cur
, &i
)))
2625 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2627 * Reset the cursor to the position of the new extent
2628 * we are about to insert as we can't trust it after
2629 * the previous insert.
2631 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2632 new->br_startblock
, new->br_blockcount
,
2635 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2636 /* new middle extent - newext */
2637 cur
->bc_rec
.b
.br_state
= new->br_state
;
2638 if ((error
= xfs_btree_insert(cur
, &i
)))
2640 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2644 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2645 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2646 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2647 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2648 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2649 case BMAP_LEFT_CONTIG
:
2650 case BMAP_RIGHT_CONTIG
:
2652 * These cases are all impossible.
2657 /* update reverse mappings */
2658 error
= xfs_rmap_convert_extent(mp
, dfops
, ip
, whichfork
, new);
2662 /* convert to a btree if necessary */
2663 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
2664 int tmp_logflags
; /* partial log flag return val */
2666 ASSERT(cur
== NULL
);
2667 error
= xfs_bmap_extents_to_btree(tp
, ip
, first
, dfops
, &cur
,
2668 0, &tmp_logflags
, whichfork
);
2669 *logflagsp
|= tmp_logflags
;
2674 /* clear out the allocated field, done with it now in any case. */
2676 cur
->bc_private
.b
.allocated
= 0;
2680 xfs_bmap_check_leaf_extents(*curp
, ip
, whichfork
);
2690 * Convert a hole to a delayed allocation.
2693 xfs_bmap_add_extent_hole_delay(
2694 xfs_inode_t
*ip
, /* incore inode pointer */
2696 xfs_extnum_t
*idx
, /* extent number to update/insert */
2697 xfs_bmbt_irec_t
*new) /* new data to add to file extents */
2699 xfs_ifork_t
*ifp
; /* inode fork pointer */
2700 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2701 xfs_filblks_t newlen
=0; /* new indirect size */
2702 xfs_filblks_t oldlen
=0; /* old indirect size */
2703 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2704 int state
; /* state bits, accessed thru macros */
2705 xfs_filblks_t temp
=0; /* temp for indirect calculations */
2707 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2709 if (whichfork
== XFS_COW_FORK
)
2710 state
|= BMAP_COWFORK
;
2711 ASSERT(isnullstartblock(new->br_startblock
));
2714 * Check and set flags if this segment has a left neighbor
2717 state
|= BMAP_LEFT_VALID
;
2718 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
- 1), &left
);
2720 if (isnullstartblock(left
.br_startblock
))
2721 state
|= BMAP_LEFT_DELAY
;
2725 * Check and set flags if the current (right) segment exists.
2726 * If it doesn't exist, we're converting the hole at end-of-file.
2728 if (*idx
< xfs_iext_count(ifp
)) {
2729 state
|= BMAP_RIGHT_VALID
;
2730 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
), &right
);
2732 if (isnullstartblock(right
.br_startblock
))
2733 state
|= BMAP_RIGHT_DELAY
;
2737 * Set contiguity flags on the left and right neighbors.
2738 * Don't let extents get too large, even if the pieces are contiguous.
2740 if ((state
& BMAP_LEFT_VALID
) && (state
& BMAP_LEFT_DELAY
) &&
2741 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2742 left
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2743 state
|= BMAP_LEFT_CONTIG
;
2745 if ((state
& BMAP_RIGHT_VALID
) && (state
& BMAP_RIGHT_DELAY
) &&
2746 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2747 new->br_blockcount
+ right
.br_blockcount
<= MAXEXTLEN
&&
2748 (!(state
& BMAP_LEFT_CONTIG
) ||
2749 (left
.br_blockcount
+ new->br_blockcount
+
2750 right
.br_blockcount
<= MAXEXTLEN
)))
2751 state
|= BMAP_RIGHT_CONTIG
;
2754 * Switch out based on the contiguity flags.
2756 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2757 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2759 * New allocation is contiguous with delayed allocations
2760 * on the left and on the right.
2761 * Merge all three into a single extent record.
2764 temp
= left
.br_blockcount
+ new->br_blockcount
+
2765 right
.br_blockcount
;
2767 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2768 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
), temp
);
2769 oldlen
= startblockval(left
.br_startblock
) +
2770 startblockval(new->br_startblock
) +
2771 startblockval(right
.br_startblock
);
2772 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2774 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, *idx
),
2775 nullstartblock((int)newlen
));
2776 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2778 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2781 case BMAP_LEFT_CONTIG
:
2783 * New allocation is contiguous with a delayed allocation
2785 * Merge the new allocation with the left neighbor.
2788 temp
= left
.br_blockcount
+ new->br_blockcount
;
2790 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2791 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
), temp
);
2792 oldlen
= startblockval(left
.br_startblock
) +
2793 startblockval(new->br_startblock
);
2794 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2796 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, *idx
),
2797 nullstartblock((int)newlen
));
2798 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2801 case BMAP_RIGHT_CONTIG
:
2803 * New allocation is contiguous with a delayed allocation
2805 * Merge the new allocation with the right neighbor.
2807 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2808 temp
= new->br_blockcount
+ right
.br_blockcount
;
2809 oldlen
= startblockval(new->br_startblock
) +
2810 startblockval(right
.br_startblock
);
2811 newlen
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
2813 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, *idx
),
2815 nullstartblock((int)newlen
), temp
, right
.br_state
);
2816 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2821 * New allocation is not contiguous with another
2822 * delayed allocation.
2823 * Insert a new entry.
2825 oldlen
= newlen
= 0;
2826 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2829 if (oldlen
!= newlen
) {
2830 ASSERT(oldlen
> newlen
);
2831 xfs_mod_fdblocks(ip
->i_mount
, (int64_t)(oldlen
- newlen
),
2834 * Nothing to do for disk quota accounting here.
2840 * Convert a hole to a real allocation.
2842 STATIC
int /* error */
2843 xfs_bmap_add_extent_hole_real(
2844 struct xfs_trans
*tp
,
2845 struct xfs_inode
*ip
,
2848 struct xfs_btree_cur
**curp
,
2849 struct xfs_bmbt_irec
*new,
2850 xfs_fsblock_t
*first
,
2851 struct xfs_defer_ops
*dfops
,
2854 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2855 struct xfs_mount
*mp
= ip
->i_mount
;
2856 struct xfs_btree_cur
*cur
= *curp
;
2857 int error
; /* error return value */
2858 int i
; /* temp state */
2859 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2860 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2861 int rval
=0; /* return value (logging flags) */
2862 int state
; /* state bits, accessed thru macros */
2865 ASSERT(*idx
<= xfs_iext_count(ifp
));
2866 ASSERT(!isnullstartblock(new->br_startblock
));
2867 ASSERT(!cur
|| !(cur
->bc_private
.b
.flags
& XFS_BTCUR_BPRV_WASDEL
));
2869 XFS_STATS_INC(mp
, xs_add_exlist
);
2872 if (whichfork
== XFS_ATTR_FORK
)
2873 state
|= BMAP_ATTRFORK
;
2874 if (whichfork
== XFS_COW_FORK
)
2875 state
|= BMAP_COWFORK
;
2878 * Check and set flags if this segment has a left neighbor.
2881 state
|= BMAP_LEFT_VALID
;
2882 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
- 1), &left
);
2883 if (isnullstartblock(left
.br_startblock
))
2884 state
|= BMAP_LEFT_DELAY
;
2888 * Check and set flags if this segment has a current value.
2889 * Not true if we're inserting into the "hole" at eof.
2891 if (*idx
< xfs_iext_count(ifp
)) {
2892 state
|= BMAP_RIGHT_VALID
;
2893 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
), &right
);
2894 if (isnullstartblock(right
.br_startblock
))
2895 state
|= BMAP_RIGHT_DELAY
;
2899 * We're inserting a real allocation between "left" and "right".
2900 * Set the contiguity flags. Don't let extents get too large.
2902 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2903 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2904 left
.br_startblock
+ left
.br_blockcount
== new->br_startblock
&&
2905 left
.br_state
== new->br_state
&&
2906 left
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2907 state
|= BMAP_LEFT_CONTIG
;
2909 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2910 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2911 new->br_startblock
+ new->br_blockcount
== right
.br_startblock
&&
2912 new->br_state
== right
.br_state
&&
2913 new->br_blockcount
+ right
.br_blockcount
<= MAXEXTLEN
&&
2914 (!(state
& BMAP_LEFT_CONTIG
) ||
2915 left
.br_blockcount
+ new->br_blockcount
+
2916 right
.br_blockcount
<= MAXEXTLEN
))
2917 state
|= BMAP_RIGHT_CONTIG
;
2921 * Select which case we're in here, and implement it.
2923 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2924 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2926 * New allocation is contiguous with real allocations on the
2927 * left and on the right.
2928 * Merge all three into a single extent record.
2931 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2932 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2933 left
.br_blockcount
+ new->br_blockcount
+
2934 right
.br_blockcount
);
2935 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2937 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2939 XFS_IFORK_NEXT_SET(ip
, whichfork
,
2940 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
2942 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2944 rval
= XFS_ILOG_CORE
;
2945 error
= xfs_bmbt_lookup_eq(cur
, right
.br_startoff
,
2946 right
.br_startblock
, right
.br_blockcount
,
2950 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2951 error
= xfs_btree_delete(cur
, &i
);
2954 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2955 error
= xfs_btree_decrement(cur
, 0, &i
);
2958 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2959 error
= xfs_bmbt_update(cur
, left
.br_startoff
,
2961 left
.br_blockcount
+
2962 new->br_blockcount
+
2963 right
.br_blockcount
,
2970 case BMAP_LEFT_CONTIG
:
2972 * New allocation is contiguous with a real allocation
2974 * Merge the new allocation with the left neighbor.
2977 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2978 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2979 left
.br_blockcount
+ new->br_blockcount
);
2980 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2983 rval
= xfs_ilog_fext(whichfork
);
2986 error
= xfs_bmbt_lookup_eq(cur
, left
.br_startoff
,
2987 left
.br_startblock
, left
.br_blockcount
,
2991 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2992 error
= xfs_bmbt_update(cur
, left
.br_startoff
,
2994 left
.br_blockcount
+
3002 case BMAP_RIGHT_CONTIG
:
3004 * New allocation is contiguous with a real allocation
3006 * Merge the new allocation with the right neighbor.
3008 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
3009 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, *idx
),
3010 new->br_startoff
, new->br_startblock
,
3011 new->br_blockcount
+ right
.br_blockcount
,
3013 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
3016 rval
= xfs_ilog_fext(whichfork
);
3019 error
= xfs_bmbt_lookup_eq(cur
,
3021 right
.br_startblock
,
3022 right
.br_blockcount
, &i
);
3025 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
3026 error
= xfs_bmbt_update(cur
, new->br_startoff
,
3028 new->br_blockcount
+
3029 right
.br_blockcount
,
3038 * New allocation is not contiguous with another
3040 * Insert a new entry.
3042 xfs_iext_insert(ip
, *idx
, 1, new, state
);
3043 XFS_IFORK_NEXT_SET(ip
, whichfork
,
3044 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
3046 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
3048 rval
= XFS_ILOG_CORE
;
3049 error
= xfs_bmbt_lookup_eq(cur
,
3052 new->br_blockcount
, &i
);
3055 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
3056 cur
->bc_rec
.b
.br_state
= new->br_state
;
3057 error
= xfs_btree_insert(cur
, &i
);
3060 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
3065 /* add reverse mapping */
3066 error
= xfs_rmap_map_extent(mp
, dfops
, ip
, whichfork
, new);
3070 /* convert to a btree if necessary */
3071 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
3072 int tmp_logflags
; /* partial log flag return val */
3074 ASSERT(cur
== NULL
);
3075 error
= xfs_bmap_extents_to_btree(tp
, ip
, first
, dfops
, curp
,
3076 0, &tmp_logflags
, whichfork
);
3077 *logflagsp
|= tmp_logflags
;
3083 /* clear out the allocated field, done with it now in any case. */
3085 cur
->bc_private
.b
.allocated
= 0;
3087 xfs_bmap_check_leaf_extents(cur
, ip
, whichfork
);
3094 * Functions used in the extent read, allocate and remove paths
3098 * Adjust the size of the new extent based on di_extsize and rt extsize.
3101 xfs_bmap_extsize_align(
3103 xfs_bmbt_irec_t
*gotp
, /* next extent pointer */
3104 xfs_bmbt_irec_t
*prevp
, /* previous extent pointer */
3105 xfs_extlen_t extsz
, /* align to this extent size */
3106 int rt
, /* is this a realtime inode? */
3107 int eof
, /* is extent at end-of-file? */
3108 int delay
, /* creating delalloc extent? */
3109 int convert
, /* overwriting unwritten extent? */
3110 xfs_fileoff_t
*offp
, /* in/out: aligned offset */
3111 xfs_extlen_t
*lenp
) /* in/out: aligned length */
3113 xfs_fileoff_t orig_off
; /* original offset */
3114 xfs_extlen_t orig_alen
; /* original length */
3115 xfs_fileoff_t orig_end
; /* original off+len */
3116 xfs_fileoff_t nexto
; /* next file offset */
3117 xfs_fileoff_t prevo
; /* previous file offset */
3118 xfs_fileoff_t align_off
; /* temp for offset */
3119 xfs_extlen_t align_alen
; /* temp for length */
3120 xfs_extlen_t temp
; /* temp for calculations */
3125 orig_off
= align_off
= *offp
;
3126 orig_alen
= align_alen
= *lenp
;
3127 orig_end
= orig_off
+ orig_alen
;
3130 * If this request overlaps an existing extent, then don't
3131 * attempt to perform any additional alignment.
3133 if (!delay
&& !eof
&&
3134 (orig_off
>= gotp
->br_startoff
) &&
3135 (orig_end
<= gotp
->br_startoff
+ gotp
->br_blockcount
)) {
3140 * If the file offset is unaligned vs. the extent size
3141 * we need to align it. This will be possible unless
3142 * the file was previously written with a kernel that didn't
3143 * perform this alignment, or if a truncate shot us in the
3146 temp
= do_mod(orig_off
, extsz
);
3152 /* Same adjustment for the end of the requested area. */
3153 temp
= (align_alen
% extsz
);
3155 align_alen
+= extsz
- temp
;
3158 * For large extent hint sizes, the aligned extent might be larger than
3159 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3160 * the length back under MAXEXTLEN. The outer allocation loops handle
3161 * short allocation just fine, so it is safe to do this. We only want to
3162 * do it when we are forced to, though, because it means more allocation
3163 * operations are required.
3165 while (align_alen
> MAXEXTLEN
)
3166 align_alen
-= extsz
;
3167 ASSERT(align_alen
<= MAXEXTLEN
);
3170 * If the previous block overlaps with this proposed allocation
3171 * then move the start forward without adjusting the length.
3173 if (prevp
->br_startoff
!= NULLFILEOFF
) {
3174 if (prevp
->br_startblock
== HOLESTARTBLOCK
)
3175 prevo
= prevp
->br_startoff
;
3177 prevo
= prevp
->br_startoff
+ prevp
->br_blockcount
;
3180 if (align_off
!= orig_off
&& align_off
< prevo
)
3183 * If the next block overlaps with this proposed allocation
3184 * then move the start back without adjusting the length,
3185 * but not before offset 0.
3186 * This may of course make the start overlap previous block,
3187 * and if we hit the offset 0 limit then the next block
3188 * can still overlap too.
3190 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
) {
3191 if ((delay
&& gotp
->br_startblock
== HOLESTARTBLOCK
) ||
3192 (!delay
&& gotp
->br_startblock
== DELAYSTARTBLOCK
))
3193 nexto
= gotp
->br_startoff
+ gotp
->br_blockcount
;
3195 nexto
= gotp
->br_startoff
;
3197 nexto
= NULLFILEOFF
;
3199 align_off
+ align_alen
!= orig_end
&&
3200 align_off
+ align_alen
> nexto
)
3201 align_off
= nexto
> align_alen
? nexto
- align_alen
: 0;
3203 * If we're now overlapping the next or previous extent that
3204 * means we can't fit an extsz piece in this hole. Just move
3205 * the start forward to the first valid spot and set
3206 * the length so we hit the end.
3208 if (align_off
!= orig_off
&& align_off
< prevo
)
3210 if (align_off
+ align_alen
!= orig_end
&&
3211 align_off
+ align_alen
> nexto
&&
3212 nexto
!= NULLFILEOFF
) {
3213 ASSERT(nexto
> prevo
);
3214 align_alen
= nexto
- align_off
;
3218 * If realtime, and the result isn't a multiple of the realtime
3219 * extent size we need to remove blocks until it is.
3221 if (rt
&& (temp
= (align_alen
% mp
->m_sb
.sb_rextsize
))) {
3223 * We're not covering the original request, or
3224 * we won't be able to once we fix the length.
3226 if (orig_off
< align_off
||
3227 orig_end
> align_off
+ align_alen
||
3228 align_alen
- temp
< orig_alen
)
3231 * Try to fix it by moving the start up.
3233 if (align_off
+ temp
<= orig_off
) {
3238 * Try to fix it by moving the end in.
3240 else if (align_off
+ align_alen
- temp
>= orig_end
)
3243 * Set the start to the minimum then trim the length.
3246 align_alen
-= orig_off
- align_off
;
3247 align_off
= orig_off
;
3248 align_alen
-= align_alen
% mp
->m_sb
.sb_rextsize
;
3251 * Result doesn't cover the request, fail it.
3253 if (orig_off
< align_off
|| orig_end
> align_off
+ align_alen
)
3256 ASSERT(orig_off
>= align_off
);
3257 /* see MAXEXTLEN handling above */
3258 ASSERT(orig_end
<= align_off
+ align_alen
||
3259 align_alen
+ extsz
> MAXEXTLEN
);
3263 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
)
3264 ASSERT(align_off
+ align_alen
<= gotp
->br_startoff
);
3265 if (prevp
->br_startoff
!= NULLFILEOFF
)
3266 ASSERT(align_off
>= prevp
->br_startoff
+ prevp
->br_blockcount
);
3274 #define XFS_ALLOC_GAP_UNITS 4
3278 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3280 xfs_fsblock_t adjust
; /* adjustment to block numbers */
3281 xfs_agnumber_t fb_agno
; /* ag number of ap->firstblock */
3282 xfs_mount_t
*mp
; /* mount point structure */
3283 int nullfb
; /* true if ap->firstblock isn't set */
3284 int rt
; /* true if inode is realtime */
3286 #define ISVALID(x,y) \
3288 (x) < mp->m_sb.sb_rblocks : \
3289 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3290 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3291 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3293 mp
= ap
->ip
->i_mount
;
3294 nullfb
= *ap
->firstblock
== NULLFSBLOCK
;
3295 rt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3296 xfs_alloc_is_userdata(ap
->datatype
);
3297 fb_agno
= nullfb
? NULLAGNUMBER
: XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
);
3299 * If allocating at eof, and there's a previous real block,
3300 * try to use its last block as our starting point.
3302 if (ap
->eof
&& ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3303 !isnullstartblock(ap
->prev
.br_startblock
) &&
3304 ISVALID(ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
,
3305 ap
->prev
.br_startblock
)) {
3306 ap
->blkno
= ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
;
3308 * Adjust for the gap between prevp and us.
3310 adjust
= ap
->offset
-
3311 (ap
->prev
.br_startoff
+ ap
->prev
.br_blockcount
);
3313 ISVALID(ap
->blkno
+ adjust
, ap
->prev
.br_startblock
))
3314 ap
->blkno
+= adjust
;
3317 * If not at eof, then compare the two neighbor blocks.
3318 * Figure out whether either one gives us a good starting point,
3319 * and pick the better one.
3321 else if (!ap
->eof
) {
3322 xfs_fsblock_t gotbno
; /* right side block number */
3323 xfs_fsblock_t gotdiff
=0; /* right side difference */
3324 xfs_fsblock_t prevbno
; /* left side block number */
3325 xfs_fsblock_t prevdiff
=0; /* left side difference */
3328 * If there's a previous (left) block, select a requested
3329 * start block based on it.
3331 if (ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3332 !isnullstartblock(ap
->prev
.br_startblock
) &&
3333 (prevbno
= ap
->prev
.br_startblock
+
3334 ap
->prev
.br_blockcount
) &&
3335 ISVALID(prevbno
, ap
->prev
.br_startblock
)) {
3337 * Calculate gap to end of previous block.
3339 adjust
= prevdiff
= ap
->offset
-
3340 (ap
->prev
.br_startoff
+
3341 ap
->prev
.br_blockcount
);
3343 * Figure the startblock based on the previous block's
3344 * end and the gap size.
3346 * If the gap is large relative to the piece we're
3347 * allocating, or using it gives us an invalid block
3348 * number, then just use the end of the previous block.
3350 if (prevdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3351 ISVALID(prevbno
+ prevdiff
,
3352 ap
->prev
.br_startblock
))
3357 * If the firstblock forbids it, can't use it,
3360 if (!rt
&& !nullfb
&&
3361 XFS_FSB_TO_AGNO(mp
, prevbno
) != fb_agno
)
3362 prevbno
= NULLFSBLOCK
;
3365 * No previous block or can't follow it, just default.
3368 prevbno
= NULLFSBLOCK
;
3370 * If there's a following (right) block, select a requested
3371 * start block based on it.
3373 if (!isnullstartblock(ap
->got
.br_startblock
)) {
3375 * Calculate gap to start of next block.
3377 adjust
= gotdiff
= ap
->got
.br_startoff
- ap
->offset
;
3379 * Figure the startblock based on the next block's
3380 * start and the gap size.
3382 gotbno
= ap
->got
.br_startblock
;
3385 * If the gap is large relative to the piece we're
3386 * allocating, or using it gives us an invalid block
3387 * number, then just use the start of the next block
3388 * offset by our length.
3390 if (gotdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3391 ISVALID(gotbno
- gotdiff
, gotbno
))
3393 else if (ISVALID(gotbno
- ap
->length
, gotbno
)) {
3394 gotbno
-= ap
->length
;
3395 gotdiff
+= adjust
- ap
->length
;
3399 * If the firstblock forbids it, can't use it,
3402 if (!rt
&& !nullfb
&&
3403 XFS_FSB_TO_AGNO(mp
, gotbno
) != fb_agno
)
3404 gotbno
= NULLFSBLOCK
;
3407 * No next block, just default.
3410 gotbno
= NULLFSBLOCK
;
3412 * If both valid, pick the better one, else the only good
3413 * one, else ap->blkno is already set (to 0 or the inode block).
3415 if (prevbno
!= NULLFSBLOCK
&& gotbno
!= NULLFSBLOCK
)
3416 ap
->blkno
= prevdiff
<= gotdiff
? prevbno
: gotbno
;
3417 else if (prevbno
!= NULLFSBLOCK
)
3418 ap
->blkno
= prevbno
;
3419 else if (gotbno
!= NULLFSBLOCK
)
3426 xfs_bmap_longest_free_extent(
3427 struct xfs_trans
*tp
,
3432 struct xfs_mount
*mp
= tp
->t_mountp
;
3433 struct xfs_perag
*pag
;
3434 xfs_extlen_t longest
;
3437 pag
= xfs_perag_get(mp
, ag
);
3438 if (!pag
->pagf_init
) {
3439 error
= xfs_alloc_pagf_init(mp
, tp
, ag
, XFS_ALLOC_FLAG_TRYLOCK
);
3443 if (!pag
->pagf_init
) {
3449 longest
= xfs_alloc_longest_free_extent(mp
, pag
,
3450 xfs_alloc_min_freelist(mp
, pag
),
3451 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
));
3452 if (*blen
< longest
)
3461 xfs_bmap_select_minlen(
3462 struct xfs_bmalloca
*ap
,
3463 struct xfs_alloc_arg
*args
,
3467 if (notinit
|| *blen
< ap
->minlen
) {
3469 * Since we did a BUF_TRYLOCK above, it is possible that
3470 * there is space for this request.
3472 args
->minlen
= ap
->minlen
;
3473 } else if (*blen
< args
->maxlen
) {
3475 * If the best seen length is less than the request length,
3476 * use the best as the minimum.
3478 args
->minlen
= *blen
;
3481 * Otherwise we've seen an extent as big as maxlen, use that
3484 args
->minlen
= args
->maxlen
;
3489 xfs_bmap_btalloc_nullfb(
3490 struct xfs_bmalloca
*ap
,
3491 struct xfs_alloc_arg
*args
,
3494 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3495 xfs_agnumber_t ag
, startag
;
3499 args
->type
= XFS_ALLOCTYPE_START_BNO
;
3500 args
->total
= ap
->total
;
3502 startag
= ag
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
3503 if (startag
== NULLAGNUMBER
)
3506 while (*blen
< args
->maxlen
) {
3507 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
,
3512 if (++ag
== mp
->m_sb
.sb_agcount
)
3518 xfs_bmap_select_minlen(ap
, args
, blen
, notinit
);
3523 xfs_bmap_btalloc_filestreams(
3524 struct xfs_bmalloca
*ap
,
3525 struct xfs_alloc_arg
*args
,
3528 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3533 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
3534 args
->total
= ap
->total
;
3536 ag
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
3537 if (ag
== NULLAGNUMBER
)
3540 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
, ¬init
);
3544 if (*blen
< args
->maxlen
) {
3545 error
= xfs_filestream_new_ag(ap
, &ag
);
3549 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
,
3556 xfs_bmap_select_minlen(ap
, args
, blen
, notinit
);
3559 * Set the failure fallback case to look in the selected AG as stream
3562 ap
->blkno
= args
->fsbno
= XFS_AGB_TO_FSB(mp
, ag
, 0);
3568 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3570 xfs_mount_t
*mp
; /* mount point structure */
3571 xfs_alloctype_t atype
= 0; /* type for allocation routines */
3572 xfs_extlen_t align
= 0; /* minimum allocation alignment */
3573 xfs_agnumber_t fb_agno
; /* ag number of ap->firstblock */
3575 xfs_alloc_arg_t args
;
3577 xfs_extlen_t nextminlen
= 0;
3578 int nullfb
; /* true if ap->firstblock isn't set */
3586 mp
= ap
->ip
->i_mount
;
3588 /* stripe alignment for allocation is determined by mount parameters */
3590 if (mp
->m_swidth
&& (mp
->m_flags
& XFS_MOUNT_SWALLOC
))
3591 stripe_align
= mp
->m_swidth
;
3592 else if (mp
->m_dalign
)
3593 stripe_align
= mp
->m_dalign
;
3595 if (ap
->flags
& XFS_BMAPI_COWFORK
)
3596 align
= xfs_get_cowextsz_hint(ap
->ip
);
3597 else if (xfs_alloc_is_userdata(ap
->datatype
))
3598 align
= xfs_get_extsz_hint(ap
->ip
);
3600 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
3601 align
, 0, ap
->eof
, 0, ap
->conv
,
3602 &ap
->offset
, &ap
->length
);
3608 nullfb
= *ap
->firstblock
== NULLFSBLOCK
;
3609 fb_agno
= nullfb
? NULLAGNUMBER
: XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
);
3611 if (xfs_alloc_is_userdata(ap
->datatype
) &&
3612 xfs_inode_is_filestream(ap
->ip
)) {
3613 ag
= xfs_filestream_lookup_ag(ap
->ip
);
3614 ag
= (ag
!= NULLAGNUMBER
) ? ag
: 0;
3615 ap
->blkno
= XFS_AGB_TO_FSB(mp
, ag
, 0);
3617 ap
->blkno
= XFS_INO_TO_FSB(mp
, ap
->ip
->i_ino
);
3620 ap
->blkno
= *ap
->firstblock
;
3622 xfs_bmap_adjacent(ap
);
3625 * If allowed, use ap->blkno; otherwise must use firstblock since
3626 * it's in the right allocation group.
3628 if (nullfb
|| XFS_FSB_TO_AGNO(mp
, ap
->blkno
) == fb_agno
)
3631 ap
->blkno
= *ap
->firstblock
;
3633 * Normal allocation, done through xfs_alloc_vextent.
3635 tryagain
= isaligned
= 0;
3636 memset(&args
, 0, sizeof(args
));
3639 args
.fsbno
= ap
->blkno
;
3640 xfs_rmap_skip_owner_update(&args
.oinfo
);
3642 /* Trim the allocation back to the maximum an AG can fit. */
3643 args
.maxlen
= MIN(ap
->length
, mp
->m_ag_max_usable
);
3644 args
.firstblock
= *ap
->firstblock
;
3648 * Search for an allocation group with a single extent large
3649 * enough for the request. If one isn't found, then adjust
3650 * the minimum allocation size to the largest space found.
3652 if (xfs_alloc_is_userdata(ap
->datatype
) &&
3653 xfs_inode_is_filestream(ap
->ip
))
3654 error
= xfs_bmap_btalloc_filestreams(ap
, &args
, &blen
);
3656 error
= xfs_bmap_btalloc_nullfb(ap
, &args
, &blen
);
3659 } else if (ap
->dfops
->dop_low
) {
3660 if (xfs_inode_is_filestream(ap
->ip
))
3661 args
.type
= XFS_ALLOCTYPE_FIRST_AG
;
3663 args
.type
= XFS_ALLOCTYPE_START_BNO
;
3664 args
.total
= args
.minlen
= ap
->minlen
;
3666 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
3667 args
.total
= ap
->total
;
3668 args
.minlen
= ap
->minlen
;
3670 /* apply extent size hints if obtained earlier */
3673 if ((args
.mod
= (xfs_extlen_t
)do_mod(ap
->offset
, args
.prod
)))
3674 args
.mod
= (xfs_extlen_t
)(args
.prod
- args
.mod
);
3675 } else if (mp
->m_sb
.sb_blocksize
>= PAGE_SIZE
) {
3679 args
.prod
= PAGE_SIZE
>> mp
->m_sb
.sb_blocklog
;
3680 if ((args
.mod
= (xfs_extlen_t
)(do_mod(ap
->offset
, args
.prod
))))
3681 args
.mod
= (xfs_extlen_t
)(args
.prod
- args
.mod
);
3684 * If we are not low on available data blocks, and the
3685 * underlying logical volume manager is a stripe, and
3686 * the file offset is zero then try to allocate data
3687 * blocks on stripe unit boundary.
3688 * NOTE: ap->aeof is only set if the allocation length
3689 * is >= the stripe unit and the allocation offset is
3690 * at the end of file.
3692 if (!ap
->dfops
->dop_low
&& ap
->aeof
) {
3694 args
.alignment
= stripe_align
;
3698 * Adjust for alignment
3700 if (blen
> args
.alignment
&& blen
<= args
.maxlen
)
3701 args
.minlen
= blen
- args
.alignment
;
3702 args
.minalignslop
= 0;
3705 * First try an exact bno allocation.
3706 * If it fails then do a near or start bno
3707 * allocation with alignment turned on.
3711 args
.type
= XFS_ALLOCTYPE_THIS_BNO
;
3714 * Compute the minlen+alignment for the
3715 * next case. Set slop so that the value
3716 * of minlen+alignment+slop doesn't go up
3717 * between the calls.
3719 if (blen
> stripe_align
&& blen
<= args
.maxlen
)
3720 nextminlen
= blen
- stripe_align
;
3722 nextminlen
= args
.minlen
;
3723 if (nextminlen
+ stripe_align
> args
.minlen
+ 1)
3725 nextminlen
+ stripe_align
-
3728 args
.minalignslop
= 0;
3732 args
.minalignslop
= 0;
3734 args
.minleft
= ap
->minleft
;
3735 args
.wasdel
= ap
->wasdel
;
3736 args
.resv
= XFS_AG_RESV_NONE
;
3737 args
.datatype
= ap
->datatype
;
3738 if (ap
->datatype
& XFS_ALLOC_USERDATA_ZERO
)
3741 error
= xfs_alloc_vextent(&args
);
3745 if (tryagain
&& args
.fsbno
== NULLFSBLOCK
) {
3747 * Exact allocation failed. Now try with alignment
3751 args
.fsbno
= ap
->blkno
;
3752 args
.alignment
= stripe_align
;
3753 args
.minlen
= nextminlen
;
3754 args
.minalignslop
= 0;
3756 if ((error
= xfs_alloc_vextent(&args
)))
3759 if (isaligned
&& args
.fsbno
== NULLFSBLOCK
) {
3761 * allocation failed, so turn off alignment and
3765 args
.fsbno
= ap
->blkno
;
3767 if ((error
= xfs_alloc_vextent(&args
)))
3770 if (args
.fsbno
== NULLFSBLOCK
&& nullfb
&&
3771 args
.minlen
> ap
->minlen
) {
3772 args
.minlen
= ap
->minlen
;
3773 args
.type
= XFS_ALLOCTYPE_START_BNO
;
3774 args
.fsbno
= ap
->blkno
;
3775 if ((error
= xfs_alloc_vextent(&args
)))
3778 if (args
.fsbno
== NULLFSBLOCK
&& nullfb
) {
3780 args
.type
= XFS_ALLOCTYPE_FIRST_AG
;
3781 args
.total
= ap
->minlen
;
3782 if ((error
= xfs_alloc_vextent(&args
)))
3784 ap
->dfops
->dop_low
= true;
3786 if (args
.fsbno
!= NULLFSBLOCK
) {
3788 * check the allocation happened at the same or higher AG than
3789 * the first block that was allocated.
3791 ASSERT(*ap
->firstblock
== NULLFSBLOCK
||
3792 XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
) <=
3793 XFS_FSB_TO_AGNO(mp
, args
.fsbno
));
3795 ap
->blkno
= args
.fsbno
;
3796 if (*ap
->firstblock
== NULLFSBLOCK
)
3797 *ap
->firstblock
= args
.fsbno
;
3798 ASSERT(nullfb
|| fb_agno
<= args
.agno
);
3799 ap
->length
= args
.len
;
3800 if (!(ap
->flags
& XFS_BMAPI_COWFORK
))
3801 ap
->ip
->i_d
.di_nblocks
+= args
.len
;
3802 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3804 ap
->ip
->i_delayed_blks
-= args
.len
;
3806 * Adjust the disk quota also. This was reserved
3809 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
3810 ap
->wasdel
? XFS_TRANS_DQ_DELBCOUNT
:
3811 XFS_TRANS_DQ_BCOUNT
,
3814 ap
->blkno
= NULLFSBLOCK
;
3821 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3822 * It figures out where to ask the underlying allocator to put the new extent.
3826 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3828 if (XFS_IS_REALTIME_INODE(ap
->ip
) &&
3829 xfs_alloc_is_userdata(ap
->datatype
))
3830 return xfs_bmap_rtalloc(ap
);
3831 return xfs_bmap_btalloc(ap
);
3834 /* Trim extent to fit a logical block range. */
3837 struct xfs_bmbt_irec
*irec
,
3841 xfs_fileoff_t distance
;
3842 xfs_fileoff_t end
= bno
+ len
;
3844 if (irec
->br_startoff
+ irec
->br_blockcount
<= bno
||
3845 irec
->br_startoff
>= end
) {
3846 irec
->br_blockcount
= 0;
3850 if (irec
->br_startoff
< bno
) {
3851 distance
= bno
- irec
->br_startoff
;
3852 if (isnullstartblock(irec
->br_startblock
))
3853 irec
->br_startblock
= DELAYSTARTBLOCK
;
3854 if (irec
->br_startblock
!= DELAYSTARTBLOCK
&&
3855 irec
->br_startblock
!= HOLESTARTBLOCK
)
3856 irec
->br_startblock
+= distance
;
3857 irec
->br_startoff
+= distance
;
3858 irec
->br_blockcount
-= distance
;
3861 if (end
< irec
->br_startoff
+ irec
->br_blockcount
) {
3862 distance
= irec
->br_startoff
+ irec
->br_blockcount
- end
;
3863 irec
->br_blockcount
-= distance
;
3868 * Trim the returned map to the required bounds
3872 struct xfs_bmbt_irec
*mval
,
3873 struct xfs_bmbt_irec
*got
,
3881 if ((flags
& XFS_BMAPI_ENTIRE
) ||
3882 got
->br_startoff
+ got
->br_blockcount
<= obno
) {
3884 if (isnullstartblock(got
->br_startblock
))
3885 mval
->br_startblock
= DELAYSTARTBLOCK
;
3891 ASSERT((*bno
>= obno
) || (n
== 0));
3893 mval
->br_startoff
= *bno
;
3894 if (isnullstartblock(got
->br_startblock
))
3895 mval
->br_startblock
= DELAYSTARTBLOCK
;
3897 mval
->br_startblock
= got
->br_startblock
+
3898 (*bno
- got
->br_startoff
);
3900 * Return the minimum of what we got and what we asked for for
3901 * the length. We can use the len variable here because it is
3902 * modified below and we could have been there before coming
3903 * here if the first part of the allocation didn't overlap what
3906 mval
->br_blockcount
= XFS_FILBLKS_MIN(end
- *bno
,
3907 got
->br_blockcount
- (*bno
- got
->br_startoff
));
3908 mval
->br_state
= got
->br_state
;
3909 ASSERT(mval
->br_blockcount
<= len
);
3914 * Update and validate the extent map to return
3917 xfs_bmapi_update_map(
3918 struct xfs_bmbt_irec
**map
,
3926 xfs_bmbt_irec_t
*mval
= *map
;
3928 ASSERT((flags
& XFS_BMAPI_ENTIRE
) ||
3929 ((mval
->br_startoff
+ mval
->br_blockcount
) <= end
));
3930 ASSERT((flags
& XFS_BMAPI_ENTIRE
) || (mval
->br_blockcount
<= *len
) ||
3931 (mval
->br_startoff
< obno
));
3933 *bno
= mval
->br_startoff
+ mval
->br_blockcount
;
3935 if (*n
> 0 && mval
->br_startoff
== mval
[-1].br_startoff
) {
3936 /* update previous map with new information */
3937 ASSERT(mval
->br_startblock
== mval
[-1].br_startblock
);
3938 ASSERT(mval
->br_blockcount
> mval
[-1].br_blockcount
);
3939 ASSERT(mval
->br_state
== mval
[-1].br_state
);
3940 mval
[-1].br_blockcount
= mval
->br_blockcount
;
3941 mval
[-1].br_state
= mval
->br_state
;
3942 } else if (*n
> 0 && mval
->br_startblock
!= DELAYSTARTBLOCK
&&
3943 mval
[-1].br_startblock
!= DELAYSTARTBLOCK
&&
3944 mval
[-1].br_startblock
!= HOLESTARTBLOCK
&&
3945 mval
->br_startblock
== mval
[-1].br_startblock
+
3946 mval
[-1].br_blockcount
&&
3947 ((flags
& XFS_BMAPI_IGSTATE
) ||
3948 mval
[-1].br_state
== mval
->br_state
)) {
3949 ASSERT(mval
->br_startoff
==
3950 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
);
3951 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3952 } else if (*n
> 0 &&
3953 mval
->br_startblock
== DELAYSTARTBLOCK
&&
3954 mval
[-1].br_startblock
== DELAYSTARTBLOCK
&&
3955 mval
->br_startoff
==
3956 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
) {
3957 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
3958 mval
[-1].br_state
= mval
->br_state
;
3959 } else if (!((*n
== 0) &&
3960 ((mval
->br_startoff
+ mval
->br_blockcount
) <=
3969 * Map file blocks to filesystem blocks without allocation.
3973 struct xfs_inode
*ip
,
3976 struct xfs_bmbt_irec
*mval
,
3980 struct xfs_mount
*mp
= ip
->i_mount
;
3981 struct xfs_ifork
*ifp
;
3982 struct xfs_bmbt_irec got
;
3989 int whichfork
= xfs_bmapi_whichfork(flags
);
3992 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
|XFS_BMAPI_ENTIRE
|
3993 XFS_BMAPI_IGSTATE
|XFS_BMAPI_COWFORK
)));
3994 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_SHARED
|XFS_ILOCK_EXCL
));
3996 if (unlikely(XFS_TEST_ERROR(
3997 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
3998 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
3999 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
4000 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW
, mp
);
4001 return -EFSCORRUPTED
;
4004 if (XFS_FORCED_SHUTDOWN(mp
))
4007 XFS_STATS_INC(mp
, xs_blk_mapr
);
4009 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4011 /* No CoW fork? Return a hole. */
4012 if (whichfork
== XFS_COW_FORK
&& !ifp
) {
4013 mval
->br_startoff
= bno
;
4014 mval
->br_startblock
= HOLESTARTBLOCK
;
4015 mval
->br_blockcount
= len
;
4016 mval
->br_state
= XFS_EXT_NORM
;
4021 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
4022 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
4027 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &idx
, &got
))
4032 while (bno
< end
&& n
< *nmap
) {
4033 /* Reading past eof, act as though there's a hole up to end. */
4035 got
.br_startoff
= end
;
4036 if (got
.br_startoff
> bno
) {
4037 /* Reading in a hole. */
4038 mval
->br_startoff
= bno
;
4039 mval
->br_startblock
= HOLESTARTBLOCK
;
4040 mval
->br_blockcount
=
4041 XFS_FILBLKS_MIN(len
, got
.br_startoff
- bno
);
4042 mval
->br_state
= XFS_EXT_NORM
;
4043 bno
+= mval
->br_blockcount
;
4044 len
-= mval
->br_blockcount
;
4050 /* set up the extent map to return. */
4051 xfs_bmapi_trim_map(mval
, &got
, &bno
, len
, obno
, end
, n
, flags
);
4052 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4054 /* If we're done, stop now. */
4055 if (bno
>= end
|| n
>= *nmap
)
4058 /* Else go on to the next record. */
4059 if (!xfs_iext_get_extent(ifp
, ++idx
, &got
))
4067 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4068 * global pool and the extent inserted into the inode in-core extent tree.
4070 * On entry, got refers to the first extent beyond the offset of the extent to
4071 * allocate or eof is specified if no such extent exists. On return, got refers
4072 * to the extent record that was inserted to the inode fork.
4074 * Note that the allocated extent may have been merged with contiguous extents
4075 * during insertion into the inode fork. Thus, got does not reflect the current
4076 * state of the inode fork on return. If necessary, the caller can use lastx to
4077 * look up the updated record in the inode fork.
4080 xfs_bmapi_reserve_delalloc(
4081 struct xfs_inode
*ip
,
4085 xfs_filblks_t prealloc
,
4086 struct xfs_bmbt_irec
*got
,
4087 xfs_extnum_t
*lastx
,
4090 struct xfs_mount
*mp
= ip
->i_mount
;
4091 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4093 xfs_extlen_t indlen
;
4094 char rt
= XFS_IS_REALTIME_INODE(ip
);
4097 xfs_fileoff_t aoff
= off
;
4100 * Cap the alloc length. Keep track of prealloc so we know whether to
4101 * tag the inode before we return.
4103 alen
= XFS_FILBLKS_MIN(len
+ prealloc
, MAXEXTLEN
);
4105 alen
= XFS_FILBLKS_MIN(alen
, got
->br_startoff
- aoff
);
4106 if (prealloc
&& alen
>= len
)
4107 prealloc
= alen
- len
;
4109 /* Figure out the extent size, adjust alen */
4110 if (whichfork
== XFS_COW_FORK
)
4111 extsz
= xfs_get_cowextsz_hint(ip
);
4113 extsz
= xfs_get_extsz_hint(ip
);
4115 struct xfs_bmbt_irec prev
;
4117 if (!xfs_iext_get_extent(ifp
, *lastx
- 1, &prev
))
4118 prev
.br_startoff
= NULLFILEOFF
;
4120 error
= xfs_bmap_extsize_align(mp
, got
, &prev
, extsz
, rt
, eof
,
4121 1, 0, &aoff
, &alen
);
4126 extsz
= alen
/ mp
->m_sb
.sb_rextsize
;
4129 * Make a transaction-less quota reservation for delayed allocation
4130 * blocks. This number gets adjusted later. We return if we haven't
4131 * allocated blocks already inside this loop.
4133 error
= xfs_trans_reserve_quota_nblks(NULL
, ip
, (long)alen
, 0,
4134 rt
? XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4139 * Split changing sb for alen and indlen since they could be coming
4140 * from different places.
4142 indlen
= (xfs_extlen_t
)xfs_bmap_worst_indlen(ip
, alen
);
4146 error
= xfs_mod_frextents(mp
, -((int64_t)extsz
));
4148 error
= xfs_mod_fdblocks(mp
, -((int64_t)alen
), false);
4152 goto out_unreserve_quota
;
4154 error
= xfs_mod_fdblocks(mp
, -((int64_t)indlen
), false);
4156 goto out_unreserve_blocks
;
4159 ip
->i_delayed_blks
+= alen
;
4161 got
->br_startoff
= aoff
;
4162 got
->br_startblock
= nullstartblock(indlen
);
4163 got
->br_blockcount
= alen
;
4164 got
->br_state
= XFS_EXT_NORM
;
4166 xfs_bmap_add_extent_hole_delay(ip
, whichfork
, lastx
, got
);
4169 * Tag the inode if blocks were preallocated. Note that COW fork
4170 * preallocation can occur at the start or end of the extent, even when
4171 * prealloc == 0, so we must also check the aligned offset and length.
4173 if (whichfork
== XFS_DATA_FORK
&& prealloc
)
4174 xfs_inode_set_eofblocks_tag(ip
);
4175 if (whichfork
== XFS_COW_FORK
&& (prealloc
|| aoff
< off
|| alen
> len
))
4176 xfs_inode_set_cowblocks_tag(ip
);
4180 out_unreserve_blocks
:
4182 xfs_mod_frextents(mp
, extsz
);
4184 xfs_mod_fdblocks(mp
, alen
, false);
4185 out_unreserve_quota
:
4186 if (XFS_IS_QUOTA_ON(mp
))
4187 xfs_trans_unreserve_quota_nblks(NULL
, ip
, (long)alen
, 0, rt
?
4188 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4194 struct xfs_bmalloca
*bma
)
4196 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
4197 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
4198 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
4199 int tmp_logflags
= 0;
4202 ASSERT(bma
->length
> 0);
4205 * For the wasdelay case, we could also just allocate the stuff asked
4206 * for in this bmap call but that wouldn't be as good.
4209 bma
->length
= (xfs_extlen_t
)bma
->got
.br_blockcount
;
4210 bma
->offset
= bma
->got
.br_startoff
;
4212 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
- 1),
4216 bma
->length
= XFS_FILBLKS_MIN(bma
->length
, MAXEXTLEN
);
4218 bma
->length
= XFS_FILBLKS_MIN(bma
->length
,
4219 bma
->got
.br_startoff
- bma
->offset
);
4223 * Set the data type being allocated. For the data fork, the first data
4224 * in the file is treated differently to all other allocations. For the
4225 * attribute fork, we only need to ensure the allocated range is not on
4228 if (!(bma
->flags
& XFS_BMAPI_METADATA
)) {
4229 bma
->datatype
= XFS_ALLOC_NOBUSY
;
4230 if (whichfork
== XFS_DATA_FORK
) {
4231 if (bma
->offset
== 0)
4232 bma
->datatype
|= XFS_ALLOC_INITIAL_USER_DATA
;
4234 bma
->datatype
|= XFS_ALLOC_USERDATA
;
4236 if (bma
->flags
& XFS_BMAPI_ZERO
)
4237 bma
->datatype
|= XFS_ALLOC_USERDATA_ZERO
;
4240 bma
->minlen
= (bma
->flags
& XFS_BMAPI_CONTIG
) ? bma
->length
: 1;
4243 * Only want to do the alignment at the eof if it is userdata and
4244 * allocation length is larger than a stripe unit.
4246 if (mp
->m_dalign
&& bma
->length
>= mp
->m_dalign
&&
4247 !(bma
->flags
& XFS_BMAPI_METADATA
) && whichfork
== XFS_DATA_FORK
) {
4248 error
= xfs_bmap_isaeof(bma
, whichfork
);
4253 error
= xfs_bmap_alloc(bma
);
4258 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4259 if (bma
->blkno
== NULLFSBLOCK
)
4261 if ((ifp
->if_flags
& XFS_IFBROOT
) && !bma
->cur
) {
4262 bma
->cur
= xfs_bmbt_init_cursor(mp
, bma
->tp
, bma
->ip
, whichfork
);
4263 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4264 bma
->cur
->bc_private
.b
.dfops
= bma
->dfops
;
4267 * Bump the number of extents we've allocated
4273 bma
->cur
->bc_private
.b
.flags
=
4274 bma
->wasdel
? XFS_BTCUR_BPRV_WASDEL
: 0;
4276 bma
->got
.br_startoff
= bma
->offset
;
4277 bma
->got
.br_startblock
= bma
->blkno
;
4278 bma
->got
.br_blockcount
= bma
->length
;
4279 bma
->got
.br_state
= XFS_EXT_NORM
;
4282 * In the data fork, a wasdelay extent has been initialized, so
4283 * shouldn't be flagged as unwritten.
4285 * For the cow fork, however, we convert delalloc reservations
4286 * (extents allocated for speculative preallocation) to
4287 * allocated unwritten extents, and only convert the unwritten
4288 * extents to real extents when we're about to write the data.
4290 if ((!bma
->wasdel
|| (bma
->flags
& XFS_BMAPI_COWFORK
)) &&
4291 (bma
->flags
& XFS_BMAPI_PREALLOC
) &&
4292 xfs_sb_version_hasextflgbit(&mp
->m_sb
))
4293 bma
->got
.br_state
= XFS_EXT_UNWRITTEN
;
4296 error
= xfs_bmap_add_extent_delay_real(bma
, whichfork
);
4298 error
= xfs_bmap_add_extent_hole_real(bma
->tp
, bma
->ip
,
4299 whichfork
, &bma
->idx
, &bma
->cur
, &bma
->got
,
4300 bma
->firstblock
, bma
->dfops
, &bma
->logflags
);
4302 bma
->logflags
|= tmp_logflags
;
4307 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4308 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4309 * the neighbouring ones.
4311 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
), &bma
->got
);
4313 ASSERT(bma
->got
.br_startoff
<= bma
->offset
);
4314 ASSERT(bma
->got
.br_startoff
+ bma
->got
.br_blockcount
>=
4315 bma
->offset
+ bma
->length
);
4316 ASSERT(bma
->got
.br_state
== XFS_EXT_NORM
||
4317 bma
->got
.br_state
== XFS_EXT_UNWRITTEN
);
4322 xfs_bmapi_convert_unwritten(
4323 struct xfs_bmalloca
*bma
,
4324 struct xfs_bmbt_irec
*mval
,
4328 int whichfork
= xfs_bmapi_whichfork(flags
);
4329 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
4330 int tmp_logflags
= 0;
4333 /* check if we need to do unwritten->real conversion */
4334 if (mval
->br_state
== XFS_EXT_UNWRITTEN
&&
4335 (flags
& XFS_BMAPI_PREALLOC
))
4338 /* check if we need to do real->unwritten conversion */
4339 if (mval
->br_state
== XFS_EXT_NORM
&&
4340 (flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
)) !=
4341 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
))
4345 * Modify (by adding) the state flag, if writing.
4347 ASSERT(mval
->br_blockcount
<= len
);
4348 if ((ifp
->if_flags
& XFS_IFBROOT
) && !bma
->cur
) {
4349 bma
->cur
= xfs_bmbt_init_cursor(bma
->ip
->i_mount
, bma
->tp
,
4350 bma
->ip
, whichfork
);
4351 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4352 bma
->cur
->bc_private
.b
.dfops
= bma
->dfops
;
4354 mval
->br_state
= (mval
->br_state
== XFS_EXT_UNWRITTEN
)
4355 ? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
4358 * Before insertion into the bmbt, zero the range being converted
4361 if (flags
& XFS_BMAPI_ZERO
) {
4362 error
= xfs_zero_extent(bma
->ip
, mval
->br_startblock
,
4363 mval
->br_blockcount
);
4368 error
= xfs_bmap_add_extent_unwritten_real(bma
->tp
, bma
->ip
, whichfork
,
4369 &bma
->idx
, &bma
->cur
, mval
, bma
->firstblock
, bma
->dfops
,
4372 * Log the inode core unconditionally in the unwritten extent conversion
4373 * path because the conversion might not have done so (e.g., if the
4374 * extent count hasn't changed). We need to make sure the inode is dirty
4375 * in the transaction for the sake of fsync(), even if nothing has
4376 * changed, because fsync() will not force the log for this transaction
4377 * unless it sees the inode pinned.
4379 * Note: If we're only converting cow fork extents, there aren't
4380 * any on-disk updates to make, so we don't need to log anything.
4382 if (whichfork
!= XFS_COW_FORK
)
4383 bma
->logflags
|= tmp_logflags
| XFS_ILOG_CORE
;
4388 * Update our extent pointer, given that
4389 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4390 * of the neighbouring ones.
4392 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
), &bma
->got
);
4395 * We may have combined previously unwritten space with written space,
4396 * so generate another request.
4398 if (mval
->br_blockcount
< len
)
4404 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4405 * extent state if necessary. Details behaviour is controlled by the flags
4406 * parameter. Only allocates blocks from a single allocation group, to avoid
4409 * The returned value in "firstblock" from the first call in a transaction
4410 * must be remembered and presented to subsequent calls in "firstblock".
4411 * An upper bound for the number of blocks to be allocated is supplied to
4412 * the first call in "total"; if no allocation group has that many free
4413 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4417 struct xfs_trans
*tp
, /* transaction pointer */
4418 struct xfs_inode
*ip
, /* incore inode */
4419 xfs_fileoff_t bno
, /* starting file offs. mapped */
4420 xfs_filblks_t len
, /* length to map in file */
4421 int flags
, /* XFS_BMAPI_... */
4422 xfs_fsblock_t
*firstblock
, /* first allocated block
4423 controls a.g. for allocs */
4424 xfs_extlen_t total
, /* total blocks needed */
4425 struct xfs_bmbt_irec
*mval
, /* output: map values */
4426 int *nmap
, /* i/o: mval size/count */
4427 struct xfs_defer_ops
*dfops
) /* i/o: list extents to free */
4429 struct xfs_mount
*mp
= ip
->i_mount
;
4430 struct xfs_ifork
*ifp
;
4431 struct xfs_bmalloca bma
= { NULL
}; /* args for xfs_bmap_alloc */
4432 xfs_fileoff_t end
; /* end of mapped file region */
4433 bool eof
= false; /* after the end of extents */
4434 int error
; /* error return */
4435 int n
; /* current extent index */
4436 xfs_fileoff_t obno
; /* old block number (offset) */
4437 int whichfork
; /* data or attr fork */
4440 xfs_fileoff_t orig_bno
; /* original block number value */
4441 int orig_flags
; /* original flags arg value */
4442 xfs_filblks_t orig_len
; /* original value of len arg */
4443 struct xfs_bmbt_irec
*orig_mval
; /* original value of mval */
4444 int orig_nmap
; /* original value of *nmap */
4452 whichfork
= xfs_bmapi_whichfork(flags
);
4455 ASSERT(*nmap
<= XFS_BMAP_MAX_NMAP
);
4456 ASSERT(!(flags
& XFS_BMAPI_IGSTATE
));
4457 ASSERT(tp
!= NULL
||
4458 (flags
& (XFS_BMAPI_CONVERT
| XFS_BMAPI_COWFORK
)) ==
4459 (XFS_BMAPI_CONVERT
| XFS_BMAPI_COWFORK
));
4461 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_LOCAL
);
4462 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
4463 ASSERT(!(flags
& XFS_BMAPI_REMAP
));
4465 /* zeroing is for currently only for data extents, not metadata */
4466 ASSERT((flags
& (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
)) !=
4467 (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
));
4469 * we can allocate unwritten extents or pre-zero allocated blocks,
4470 * but it makes no sense to do both at once. This would result in
4471 * zeroing the unwritten extent twice, but it still being an
4472 * unwritten extent....
4474 ASSERT((flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
)) !=
4475 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
));
4477 if (unlikely(XFS_TEST_ERROR(
4478 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
4479 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
4480 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
4481 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW
, mp
);
4482 return -EFSCORRUPTED
;
4485 if (XFS_FORCED_SHUTDOWN(mp
))
4488 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4490 XFS_STATS_INC(mp
, xs_blk_mapw
);
4492 if (*firstblock
== NULLFSBLOCK
) {
4493 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
)
4494 bma
.minleft
= be16_to_cpu(ifp
->if_broot
->bb_level
) + 1;
4501 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
4502 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4511 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &bma
.idx
, &bma
.got
))
4513 if (!xfs_iext_get_extent(ifp
, bma
.idx
- 1, &bma
.prev
))
4514 bma
.prev
.br_startoff
= NULLFILEOFF
;
4520 bma
.firstblock
= firstblock
;
4522 while (bno
< end
&& n
< *nmap
) {
4523 bool need_alloc
= false, wasdelay
= false;
4525 /* in hole or beyoned EOF? */
4526 if (eof
|| bma
.got
.br_startoff
> bno
) {
4527 if (flags
& XFS_BMAPI_DELALLOC
) {
4529 * For the COW fork we can reasonably get a
4530 * request for converting an extent that races
4531 * with other threads already having converted
4532 * part of it, as there converting COW to
4533 * regular blocks is not protected using the
4536 ASSERT(flags
& XFS_BMAPI_COWFORK
);
4537 if (!(flags
& XFS_BMAPI_COWFORK
)) {
4542 if (eof
|| bno
>= end
)
4547 } else if (isnullstartblock(bma
.got
.br_startblock
)) {
4552 * First, deal with the hole before the allocated space
4553 * that we found, if any.
4555 if (need_alloc
|| wasdelay
) {
4557 bma
.conv
= !!(flags
& XFS_BMAPI_CONVERT
);
4558 bma
.wasdel
= wasdelay
;
4563 * There's a 32/64 bit type mismatch between the
4564 * allocation length request (which can be 64 bits in
4565 * length) and the bma length request, which is
4566 * xfs_extlen_t and therefore 32 bits. Hence we have to
4567 * check for 32-bit overflows and handle them here.
4569 if (len
> (xfs_filblks_t
)MAXEXTLEN
)
4570 bma
.length
= MAXEXTLEN
;
4575 ASSERT(bma
.length
> 0);
4576 error
= xfs_bmapi_allocate(&bma
);
4579 if (bma
.blkno
== NULLFSBLOCK
)
4583 * If this is a CoW allocation, record the data in
4584 * the refcount btree for orphan recovery.
4586 if (whichfork
== XFS_COW_FORK
) {
4587 error
= xfs_refcount_alloc_cow_extent(mp
, dfops
,
4588 bma
.blkno
, bma
.length
);
4594 /* Deal with the allocated space we found. */
4595 xfs_bmapi_trim_map(mval
, &bma
.got
, &bno
, len
, obno
,
4598 /* Execute unwritten extent conversion if necessary */
4599 error
= xfs_bmapi_convert_unwritten(&bma
, mval
, len
, flags
);
4600 if (error
== -EAGAIN
)
4605 /* update the extent map to return */
4606 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4609 * If we're done, stop now. Stop when we've allocated
4610 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4611 * the transaction may get too big.
4613 if (bno
>= end
|| n
>= *nmap
|| bma
.nallocs
>= *nmap
)
4616 /* Else go on to the next record. */
4618 if (!xfs_iext_get_extent(ifp
, ++bma
.idx
, &bma
.got
))
4624 * Transform from btree to extents, give it cur.
4626 if (xfs_bmap_wants_extents(ip
, whichfork
)) {
4627 int tmp_logflags
= 0;
4630 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
,
4631 &tmp_logflags
, whichfork
);
4632 bma
.logflags
|= tmp_logflags
;
4637 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
||
4638 XFS_IFORK_NEXTENTS(ip
, whichfork
) >
4639 XFS_IFORK_MAXEXT(ip
, whichfork
));
4643 * Log everything. Do this after conversion, there's no point in
4644 * logging the extent records if we've converted to btree format.
4646 if ((bma
.logflags
& xfs_ilog_fext(whichfork
)) &&
4647 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
4648 bma
.logflags
&= ~xfs_ilog_fext(whichfork
);
4649 else if ((bma
.logflags
& xfs_ilog_fbroot(whichfork
)) &&
4650 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)
4651 bma
.logflags
&= ~xfs_ilog_fbroot(whichfork
);
4653 * Log whatever the flags say, even if error. Otherwise we might miss
4654 * detecting a case where the data is changed, there's an error,
4655 * and it's not logged so we don't shutdown when we should.
4658 xfs_trans_log_inode(tp
, ip
, bma
.logflags
);
4662 ASSERT(*firstblock
== NULLFSBLOCK
||
4663 XFS_FSB_TO_AGNO(mp
, *firstblock
) <=
4665 bma
.cur
->bc_private
.b
.firstblock
));
4666 *firstblock
= bma
.cur
->bc_private
.b
.firstblock
;
4668 xfs_btree_del_cursor(bma
.cur
,
4669 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
4672 xfs_bmap_validate_ret(orig_bno
, orig_len
, orig_flags
, orig_mval
,
4679 struct xfs_trans
*tp
,
4680 struct xfs_inode
*ip
,
4683 xfs_fsblock_t startblock
,
4684 struct xfs_defer_ops
*dfops
)
4686 struct xfs_mount
*mp
= ip
->i_mount
;
4687 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
4688 struct xfs_btree_cur
*cur
= NULL
;
4689 xfs_fsblock_t firstblock
= NULLFSBLOCK
;
4690 struct xfs_bmbt_irec got
;
4692 int logflags
= 0, error
;
4695 ASSERT(len
<= (xfs_filblks_t
)MAXEXTLEN
);
4696 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
4698 if (unlikely(XFS_TEST_ERROR(
4699 (XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_EXTENTS
&&
4700 XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_BTREE
),
4701 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
4702 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW
, mp
);
4703 return -EFSCORRUPTED
;
4706 if (XFS_FORCED_SHUTDOWN(mp
))
4709 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
4710 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
4715 if (xfs_iext_lookup_extent(ip
, ifp
, bno
, &idx
, &got
)) {
4716 /* make sure we only reflink into a hole. */
4717 ASSERT(got
.br_startoff
> bno
);
4718 ASSERT(got
.br_startoff
- bno
>= len
);
4721 ip
->i_d
.di_nblocks
+= len
;
4722 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
4724 if (ifp
->if_flags
& XFS_IFBROOT
) {
4725 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
4726 cur
->bc_private
.b
.firstblock
= firstblock
;
4727 cur
->bc_private
.b
.dfops
= dfops
;
4728 cur
->bc_private
.b
.flags
= 0;
4731 got
.br_startoff
= bno
;
4732 got
.br_startblock
= startblock
;
4733 got
.br_blockcount
= len
;
4734 got
.br_state
= XFS_EXT_NORM
;
4736 error
= xfs_bmap_add_extent_hole_real(tp
, ip
, XFS_DATA_FORK
, &idx
, &cur
,
4737 &got
, &firstblock
, dfops
, &logflags
);
4741 if (xfs_bmap_wants_extents(ip
, XFS_DATA_FORK
)) {
4742 int tmp_logflags
= 0;
4744 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
,
4745 &tmp_logflags
, XFS_DATA_FORK
);
4746 logflags
|= tmp_logflags
;
4750 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
)
4751 logflags
&= ~XFS_ILOG_DEXT
;
4752 else if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
)
4753 logflags
&= ~XFS_ILOG_DBROOT
;
4756 xfs_trans_log_inode(tp
, ip
, logflags
);
4758 xfs_btree_del_cursor(cur
,
4759 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
4765 * When a delalloc extent is split (e.g., due to a hole punch), the original
4766 * indlen reservation must be shared across the two new extents that are left
4769 * Given the original reservation and the worst case indlen for the two new
4770 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4771 * reservation fairly across the two new extents. If necessary, steal available
4772 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4773 * ores == 1). The number of stolen blocks is returned. The availability and
4774 * subsequent accounting of stolen blocks is the responsibility of the caller.
4776 static xfs_filblks_t
4777 xfs_bmap_split_indlen(
4778 xfs_filblks_t ores
, /* original res. */
4779 xfs_filblks_t
*indlen1
, /* ext1 worst indlen */
4780 xfs_filblks_t
*indlen2
, /* ext2 worst indlen */
4781 xfs_filblks_t avail
) /* stealable blocks */
4783 xfs_filblks_t len1
= *indlen1
;
4784 xfs_filblks_t len2
= *indlen2
;
4785 xfs_filblks_t nres
= len1
+ len2
; /* new total res. */
4786 xfs_filblks_t stolen
= 0;
4787 xfs_filblks_t resfactor
;
4790 * Steal as many blocks as we can to try and satisfy the worst case
4791 * indlen for both new extents.
4793 if (ores
< nres
&& avail
)
4794 stolen
= XFS_FILBLKS_MIN(nres
- ores
, avail
);
4797 /* nothing else to do if we've satisfied the new reservation */
4802 * We can't meet the total required reservation for the two extents.
4803 * Calculate the percent of the overall shortage between both extents
4804 * and apply this percentage to each of the requested indlen values.
4805 * This distributes the shortage fairly and reduces the chances that one
4806 * of the two extents is left with nothing when extents are repeatedly
4809 resfactor
= (ores
* 100);
4810 do_div(resfactor
, nres
);
4815 ASSERT(len1
+ len2
<= ores
);
4816 ASSERT(len1
< *indlen1
&& len2
< *indlen2
);
4819 * Hand out the remainder to each extent. If one of the two reservations
4820 * is zero, we want to make sure that one gets a block first. The loop
4821 * below starts with len1, so hand len2 a block right off the bat if it
4824 ores
-= (len1
+ len2
);
4825 ASSERT((*indlen1
- len1
) + (*indlen2
- len2
) >= ores
);
4826 if (ores
&& !len2
&& *indlen2
) {
4831 if (len1
< *indlen1
) {
4837 if (len2
< *indlen2
) {
4850 xfs_bmap_del_extent_delay(
4851 struct xfs_inode
*ip
,
4854 struct xfs_bmbt_irec
*got
,
4855 struct xfs_bmbt_irec
*del
)
4857 struct xfs_mount
*mp
= ip
->i_mount
;
4858 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4859 struct xfs_bmbt_irec
new;
4860 int64_t da_old
, da_new
, da_diff
= 0;
4861 xfs_fileoff_t del_endoff
, got_endoff
;
4862 xfs_filblks_t got_indlen
, new_indlen
, stolen
;
4863 int error
= 0, state
= 0;
4866 XFS_STATS_INC(mp
, xs_del_exlist
);
4868 isrt
= (whichfork
== XFS_DATA_FORK
) && XFS_IS_REALTIME_INODE(ip
);
4869 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4870 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4871 da_old
= startblockval(got
->br_startblock
);
4875 ASSERT(*idx
<= xfs_iext_count(ifp
));
4876 ASSERT(del
->br_blockcount
> 0);
4877 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4878 ASSERT(got_endoff
>= del_endoff
);
4881 uint64_t rtexts
= XFS_FSB_TO_B(mp
, del
->br_blockcount
);
4883 do_div(rtexts
, mp
->m_sb
.sb_rextsize
);
4884 xfs_mod_frextents(mp
, rtexts
);
4888 * Update the inode delalloc counter now and wait to update the
4889 * sb counters as we might have to borrow some blocks for the
4890 * indirect block accounting.
4892 error
= xfs_trans_reserve_quota_nblks(NULL
, ip
,
4893 -((long)del
->br_blockcount
), 0,
4894 isrt
? XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4897 ip
->i_delayed_blks
-= del
->br_blockcount
;
4899 if (whichfork
== XFS_COW_FORK
)
4900 state
|= BMAP_COWFORK
;
4902 if (got
->br_startoff
== del
->br_startoff
)
4903 state
|= BMAP_LEFT_CONTIG
;
4904 if (got_endoff
== del_endoff
)
4905 state
|= BMAP_RIGHT_CONTIG
;
4907 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
4908 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
4910 * Matches the whole extent. Delete the entry.
4912 xfs_iext_remove(ip
, *idx
, 1, state
);
4915 case BMAP_LEFT_CONTIG
:
4917 * Deleting the first part of the extent.
4919 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4920 got
->br_startoff
= del_endoff
;
4921 got
->br_blockcount
-= del
->br_blockcount
;
4922 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4923 got
->br_blockcount
), da_old
);
4924 got
->br_startblock
= nullstartblock((int)da_new
);
4925 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4926 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4928 case BMAP_RIGHT_CONTIG
:
4930 * Deleting the last part of the extent.
4932 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4933 got
->br_blockcount
= got
->br_blockcount
- del
->br_blockcount
;
4934 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4935 got
->br_blockcount
), da_old
);
4936 got
->br_startblock
= nullstartblock((int)da_new
);
4937 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4938 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4942 * Deleting the middle of the extent.
4944 * Distribute the original indlen reservation across the two new
4945 * extents. Steal blocks from the deleted extent if necessary.
4946 * Stealing blocks simply fudges the fdblocks accounting below.
4947 * Warn if either of the new indlen reservations is zero as this
4948 * can lead to delalloc problems.
4950 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4952 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4953 got_indlen
= xfs_bmap_worst_indlen(ip
, got
->br_blockcount
);
4955 new.br_blockcount
= got_endoff
- del_endoff
;
4956 new_indlen
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
4958 WARN_ON_ONCE(!got_indlen
|| !new_indlen
);
4959 stolen
= xfs_bmap_split_indlen(da_old
, &got_indlen
, &new_indlen
,
4960 del
->br_blockcount
);
4962 got
->br_startblock
= nullstartblock((int)got_indlen
);
4963 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4964 trace_xfs_bmap_post_update(ip
, *idx
, 0, _THIS_IP_
);
4966 new.br_startoff
= del_endoff
;
4967 new.br_state
= got
->br_state
;
4968 new.br_startblock
= nullstartblock((int)new_indlen
);
4971 xfs_iext_insert(ip
, *idx
, 1, &new, state
);
4973 da_new
= got_indlen
+ new_indlen
- stolen
;
4974 del
->br_blockcount
-= stolen
;
4978 ASSERT(da_old
>= da_new
);
4979 da_diff
= da_old
- da_new
;
4981 da_diff
+= del
->br_blockcount
;
4983 xfs_mod_fdblocks(mp
, da_diff
, false);
4988 xfs_bmap_del_extent_cow(
4989 struct xfs_inode
*ip
,
4991 struct xfs_bmbt_irec
*got
,
4992 struct xfs_bmbt_irec
*del
)
4994 struct xfs_mount
*mp
= ip
->i_mount
;
4995 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_COW_FORK
);
4996 struct xfs_bmbt_irec
new;
4997 xfs_fileoff_t del_endoff
, got_endoff
;
4998 int state
= BMAP_COWFORK
;
5000 XFS_STATS_INC(mp
, xs_del_exlist
);
5002 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
5003 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
5006 ASSERT(*idx
<= xfs_iext_count(ifp
));
5007 ASSERT(del
->br_blockcount
> 0);
5008 ASSERT(got
->br_startoff
<= del
->br_startoff
);
5009 ASSERT(got_endoff
>= del_endoff
);
5010 ASSERT(!isnullstartblock(got
->br_startblock
));
5012 if (got
->br_startoff
== del
->br_startoff
)
5013 state
|= BMAP_LEFT_CONTIG
;
5014 if (got_endoff
== del_endoff
)
5015 state
|= BMAP_RIGHT_CONTIG
;
5017 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
5018 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
5020 * Matches the whole extent. Delete the entry.
5022 xfs_iext_remove(ip
, *idx
, 1, state
);
5025 case BMAP_LEFT_CONTIG
:
5027 * Deleting the first part of the extent.
5029 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5030 got
->br_startoff
= del_endoff
;
5031 got
->br_blockcount
-= del
->br_blockcount
;
5032 got
->br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
5033 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
5034 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5036 case BMAP_RIGHT_CONTIG
:
5038 * Deleting the last part of the extent.
5040 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5041 got
->br_blockcount
-= del
->br_blockcount
;
5042 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
5043 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5047 * Deleting the middle of the extent.
5049 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5050 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
5051 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
5052 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5054 new.br_startoff
= del_endoff
;
5055 new.br_blockcount
= got_endoff
- del_endoff
;
5056 new.br_state
= got
->br_state
;
5057 new.br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
5060 xfs_iext_insert(ip
, *idx
, 1, &new, state
);
5066 * Called by xfs_bmapi to update file extent records and the btree
5067 * after removing space (or undoing a delayed allocation).
5069 STATIC
int /* error */
5070 xfs_bmap_del_extent(
5071 xfs_inode_t
*ip
, /* incore inode pointer */
5072 xfs_trans_t
*tp
, /* current transaction pointer */
5073 xfs_extnum_t
*idx
, /* extent number to update/delete */
5074 struct xfs_defer_ops
*dfops
, /* list of extents to be freed */
5075 xfs_btree_cur_t
*cur
, /* if null, not a btree */
5076 xfs_bmbt_irec_t
*del
, /* data to remove from extents */
5077 int *logflagsp
, /* inode logging flags */
5078 int whichfork
, /* data or attr fork */
5079 int bflags
) /* bmapi flags */
5081 xfs_filblks_t da_new
; /* new delay-alloc indirect blocks */
5082 xfs_filblks_t da_old
; /* old delay-alloc indirect blocks */
5083 xfs_fsblock_t del_endblock
=0; /* first block past del */
5084 xfs_fileoff_t del_endoff
; /* first offset past del */
5085 int delay
; /* current block is delayed allocated */
5086 int do_fx
; /* free extent at end of routine */
5087 xfs_bmbt_rec_host_t
*ep
; /* current extent entry pointer */
5088 int error
; /* error return value */
5089 int flags
; /* inode logging flags */
5090 xfs_bmbt_irec_t got
; /* current extent entry */
5091 xfs_fileoff_t got_endoff
; /* first offset past got */
5092 int i
; /* temp state */
5093 xfs_ifork_t
*ifp
; /* inode fork pointer */
5094 xfs_mount_t
*mp
; /* mount structure */
5095 xfs_filblks_t nblks
; /* quota/sb block count */
5096 xfs_bmbt_irec_t
new; /* new record to be inserted */
5098 uint qfield
; /* quota field to update */
5099 xfs_filblks_t temp
; /* for indirect length calculations */
5100 xfs_filblks_t temp2
; /* for indirect length calculations */
5104 XFS_STATS_INC(mp
, xs_del_exlist
);
5106 if (whichfork
== XFS_ATTR_FORK
)
5107 state
|= BMAP_ATTRFORK
;
5108 else if (whichfork
== XFS_COW_FORK
)
5109 state
|= BMAP_COWFORK
;
5111 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5112 ASSERT((*idx
>= 0) && (*idx
< xfs_iext_count(ifp
)));
5113 ASSERT(del
->br_blockcount
> 0);
5114 ep
= xfs_iext_get_ext(ifp
, *idx
);
5115 xfs_bmbt_get_all(ep
, &got
);
5116 ASSERT(got
.br_startoff
<= del
->br_startoff
);
5117 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
5118 got_endoff
= got
.br_startoff
+ got
.br_blockcount
;
5119 ASSERT(got_endoff
>= del_endoff
);
5120 delay
= isnullstartblock(got
.br_startblock
);
5121 ASSERT(isnullstartblock(del
->br_startblock
) == delay
);
5126 * If deleting a real allocation, must free up the disk space.
5129 flags
= XFS_ILOG_CORE
;
5131 * Realtime allocation. Free it and record di_nblocks update.
5133 if (whichfork
== XFS_DATA_FORK
&& XFS_IS_REALTIME_INODE(ip
)) {
5137 ASSERT(do_mod(del
->br_blockcount
,
5138 mp
->m_sb
.sb_rextsize
) == 0);
5139 ASSERT(do_mod(del
->br_startblock
,
5140 mp
->m_sb
.sb_rextsize
) == 0);
5141 bno
= del
->br_startblock
;
5142 len
= del
->br_blockcount
;
5143 do_div(bno
, mp
->m_sb
.sb_rextsize
);
5144 do_div(len
, mp
->m_sb
.sb_rextsize
);
5145 error
= xfs_rtfree_extent(tp
, bno
, (xfs_extlen_t
)len
);
5149 nblks
= len
* mp
->m_sb
.sb_rextsize
;
5150 qfield
= XFS_TRANS_DQ_RTBCOUNT
;
5153 * Ordinary allocation.
5157 nblks
= del
->br_blockcount
;
5158 qfield
= XFS_TRANS_DQ_BCOUNT
;
5161 * Set up del_endblock and cur for later.
5163 del_endblock
= del
->br_startblock
+ del
->br_blockcount
;
5165 if ((error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
,
5166 got
.br_startblock
, got
.br_blockcount
,
5169 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5171 da_old
= da_new
= 0;
5173 da_old
= startblockval(got
.br_startblock
);
5180 * Set flag value to use in switch statement.
5181 * Left-contig is 2, right-contig is 1.
5183 switch (((got
.br_startoff
== del
->br_startoff
) << 1) |
5184 (got_endoff
== del_endoff
)) {
5187 * Matches the whole extent. Delete the entry.
5189 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5190 xfs_iext_remove(ip
, *idx
, 1,
5191 whichfork
== XFS_ATTR_FORK
? BMAP_ATTRFORK
: 0);
5196 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5197 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
5198 flags
|= XFS_ILOG_CORE
;
5200 flags
|= xfs_ilog_fext(whichfork
);
5203 if ((error
= xfs_btree_delete(cur
, &i
)))
5205 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5210 * Deleting the first part of the extent.
5212 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5213 xfs_bmbt_set_startoff(ep
, del_endoff
);
5214 temp
= got
.br_blockcount
- del
->br_blockcount
;
5215 xfs_bmbt_set_blockcount(ep
, temp
);
5217 temp
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
5219 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5220 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5224 xfs_bmbt_set_startblock(ep
, del_endblock
);
5225 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5227 flags
|= xfs_ilog_fext(whichfork
);
5230 if ((error
= xfs_bmbt_update(cur
, del_endoff
, del_endblock
,
5231 got
.br_blockcount
- del
->br_blockcount
,
5238 * Deleting the last part of the extent.
5240 temp
= got
.br_blockcount
- del
->br_blockcount
;
5241 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5242 xfs_bmbt_set_blockcount(ep
, temp
);
5244 temp
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
5246 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5247 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5251 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5253 flags
|= xfs_ilog_fext(whichfork
);
5256 if ((error
= xfs_bmbt_update(cur
, got
.br_startoff
,
5258 got
.br_blockcount
- del
->br_blockcount
,
5265 * Deleting the middle of the extent.
5267 temp
= del
->br_startoff
- got
.br_startoff
;
5268 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5269 xfs_bmbt_set_blockcount(ep
, temp
);
5270 new.br_startoff
= del_endoff
;
5271 temp2
= got_endoff
- del_endoff
;
5272 new.br_blockcount
= temp2
;
5273 new.br_state
= got
.br_state
;
5275 new.br_startblock
= del_endblock
;
5276 flags
|= XFS_ILOG_CORE
;
5278 if ((error
= xfs_bmbt_update(cur
,
5280 got
.br_startblock
, temp
,
5283 if ((error
= xfs_btree_increment(cur
, 0, &i
)))
5285 cur
->bc_rec
.b
= new;
5286 error
= xfs_btree_insert(cur
, &i
);
5287 if (error
&& error
!= -ENOSPC
)
5290 * If get no-space back from btree insert,
5291 * it tried a split, and we have a zero
5292 * block reservation.
5293 * Fix up our state and return the error.
5295 if (error
== -ENOSPC
) {
5297 * Reset the cursor, don't trust
5298 * it after any insert operation.
5300 if ((error
= xfs_bmbt_lookup_eq(cur
,
5305 XFS_WANT_CORRUPTED_GOTO(mp
,
5308 * Update the btree record back
5309 * to the original value.
5311 if ((error
= xfs_bmbt_update(cur
,
5318 * Reset the extent record back
5319 * to the original value.
5321 xfs_bmbt_set_blockcount(ep
,
5327 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5329 flags
|= xfs_ilog_fext(whichfork
);
5330 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5331 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
5333 xfs_filblks_t stolen
;
5334 ASSERT(whichfork
== XFS_DATA_FORK
);
5337 * Distribute the original indlen reservation across the
5338 * two new extents. Steal blocks from the deleted extent
5339 * if necessary. Stealing blocks simply fudges the
5340 * fdblocks accounting in xfs_bunmapi().
5342 temp
= xfs_bmap_worst_indlen(ip
, got
.br_blockcount
);
5343 temp2
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
5344 stolen
= xfs_bmap_split_indlen(da_old
, &temp
, &temp2
,
5345 del
->br_blockcount
);
5346 da_new
= temp
+ temp2
- stolen
;
5347 del
->br_blockcount
-= stolen
;
5350 * Set the reservation for each extent. Warn if either
5351 * is zero as this can lead to delalloc problems.
5353 WARN_ON_ONCE(!temp
|| !temp2
);
5354 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5355 new.br_startblock
= nullstartblock((int)temp2
);
5357 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5358 xfs_iext_insert(ip
, *idx
+ 1, 1, &new, state
);
5363 /* remove reverse mapping */
5365 error
= xfs_rmap_unmap_extent(mp
, dfops
, ip
, whichfork
, del
);
5371 * If we need to, add to list of extents to delete.
5373 if (do_fx
&& !(bflags
& XFS_BMAPI_REMAP
)) {
5374 if (xfs_is_reflink_inode(ip
) && whichfork
== XFS_DATA_FORK
) {
5375 error
= xfs_refcount_decrease_extent(mp
, dfops
, del
);
5379 xfs_bmap_add_free(mp
, dfops
, del
->br_startblock
,
5380 del
->br_blockcount
, NULL
);
5384 * Adjust inode # blocks in the file.
5387 ip
->i_d
.di_nblocks
-= nblks
;
5389 * Adjust quota data.
5391 if (qfield
&& !(bflags
& XFS_BMAPI_REMAP
))
5392 xfs_trans_mod_dquot_byino(tp
, ip
, qfield
, (long)-nblks
);
5395 * Account for change in delayed indirect blocks.
5396 * Nothing to do for disk quota accounting here.
5398 ASSERT(da_old
>= da_new
);
5399 if (da_old
> da_new
)
5400 xfs_mod_fdblocks(mp
, (int64_t)(da_old
- da_new
), false);
5407 * Unmap (remove) blocks from a file.
5408 * If nexts is nonzero then the number of extents to remove is limited to
5409 * that value. If not all extents in the block range can be removed then
5414 xfs_trans_t
*tp
, /* transaction pointer */
5415 struct xfs_inode
*ip
, /* incore inode */
5416 xfs_fileoff_t bno
, /* starting offset to unmap */
5417 xfs_filblks_t
*rlen
, /* i/o: amount remaining */
5418 int flags
, /* misc flags */
5419 xfs_extnum_t nexts
, /* number of extents max */
5420 xfs_fsblock_t
*firstblock
, /* first allocated block
5421 controls a.g. for allocs */
5422 struct xfs_defer_ops
*dfops
) /* i/o: deferred updates */
5424 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
5425 xfs_bmbt_irec_t del
; /* extent being deleted */
5426 int error
; /* error return value */
5427 xfs_extnum_t extno
; /* extent number in list */
5428 xfs_bmbt_irec_t got
; /* current extent record */
5429 xfs_ifork_t
*ifp
; /* inode fork pointer */
5430 int isrt
; /* freeing in rt area */
5431 xfs_extnum_t lastx
; /* last extent index used */
5432 int logflags
; /* transaction logging flags */
5433 xfs_extlen_t mod
; /* rt extent offset */
5434 xfs_mount_t
*mp
; /* mount structure */
5435 xfs_fileoff_t start
; /* first file offset deleted */
5436 int tmp_logflags
; /* partial logging flags */
5437 int wasdel
; /* was a delayed alloc extent */
5438 int whichfork
; /* data or attribute fork */
5440 xfs_filblks_t len
= *rlen
; /* length to unmap in file */
5442 trace_xfs_bunmap(ip
, bno
, len
, flags
, _RET_IP_
);
5444 whichfork
= xfs_bmapi_whichfork(flags
);
5445 ASSERT(whichfork
!= XFS_COW_FORK
);
5446 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5448 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
5449 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)) {
5450 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW
,
5452 return -EFSCORRUPTED
;
5455 if (XFS_FORCED_SHUTDOWN(mp
))
5458 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
5462 if (!(ifp
->if_flags
& XFS_IFEXTENTS
) &&
5463 (error
= xfs_iread_extents(tp
, ip
, whichfork
)))
5465 if (xfs_iext_count(ifp
) == 0) {
5469 XFS_STATS_INC(mp
, xs_blk_unmap
);
5470 isrt
= (whichfork
== XFS_DATA_FORK
) && XFS_IS_REALTIME_INODE(ip
);
5472 bno
= start
+ len
- 1;
5475 * Check to see if the given block number is past the end of the
5476 * file, back up to the last block if so...
5478 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &lastx
, &got
)) {
5480 xfs_iext_get_extent(ifp
, --lastx
, &got
);
5481 bno
= got
.br_startoff
+ got
.br_blockcount
- 1;
5485 if (ifp
->if_flags
& XFS_IFBROOT
) {
5486 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
);
5487 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5488 cur
->bc_private
.b
.firstblock
= *firstblock
;
5489 cur
->bc_private
.b
.dfops
= dfops
;
5490 cur
->bc_private
.b
.flags
= 0;
5496 * Synchronize by locking the bitmap inode.
5498 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTBITMAP
);
5499 xfs_trans_ijoin(tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
5500 xfs_ilock(mp
->m_rsumip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTSUM
);
5501 xfs_trans_ijoin(tp
, mp
->m_rsumip
, XFS_ILOCK_EXCL
);
5505 while (bno
!= (xfs_fileoff_t
)-1 && bno
>= start
&& lastx
>= 0 &&
5506 (nexts
== 0 || extno
< nexts
)) {
5508 * Is the found extent after a hole in which bno lives?
5509 * Just back up to the previous extent, if so.
5511 if (got
.br_startoff
> bno
) {
5514 xfs_iext_get_extent(ifp
, lastx
, &got
);
5517 * Is the last block of this extent before the range
5518 * we're supposed to delete? If so, we're done.
5520 bno
= XFS_FILEOFF_MIN(bno
,
5521 got
.br_startoff
+ got
.br_blockcount
- 1);
5525 * Then deal with the (possibly delayed) allocated space
5529 wasdel
= isnullstartblock(del
.br_startblock
);
5530 if (got
.br_startoff
< start
) {
5531 del
.br_startoff
= start
;
5532 del
.br_blockcount
-= start
- got
.br_startoff
;
5534 del
.br_startblock
+= start
- got
.br_startoff
;
5536 if (del
.br_startoff
+ del
.br_blockcount
> bno
+ 1)
5537 del
.br_blockcount
= bno
+ 1 - del
.br_startoff
;
5538 sum
= del
.br_startblock
+ del
.br_blockcount
;
5540 (mod
= do_mod(sum
, mp
->m_sb
.sb_rextsize
))) {
5542 * Realtime extent not lined up at the end.
5543 * The extent could have been split into written
5544 * and unwritten pieces, or we could just be
5545 * unmapping part of it. But we can't really
5546 * get rid of part of a realtime extent.
5548 if (del
.br_state
== XFS_EXT_UNWRITTEN
||
5549 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
5551 * This piece is unwritten, or we're not
5552 * using unwritten extents. Skip over it.
5555 bno
-= mod
> del
.br_blockcount
?
5556 del
.br_blockcount
: mod
;
5557 if (bno
< got
.br_startoff
) {
5559 xfs_bmbt_get_all(xfs_iext_get_ext(
5565 * It's written, turn it unwritten.
5566 * This is better than zeroing it.
5568 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5569 ASSERT(tp
->t_blk_res
> 0);
5571 * If this spans a realtime extent boundary,
5572 * chop it back to the start of the one we end at.
5574 if (del
.br_blockcount
> mod
) {
5575 del
.br_startoff
+= del
.br_blockcount
- mod
;
5576 del
.br_startblock
+= del
.br_blockcount
- mod
;
5577 del
.br_blockcount
= mod
;
5579 del
.br_state
= XFS_EXT_UNWRITTEN
;
5580 error
= xfs_bmap_add_extent_unwritten_real(tp
, ip
,
5581 whichfork
, &lastx
, &cur
, &del
,
5582 firstblock
, dfops
, &logflags
);
5587 if (isrt
&& (mod
= do_mod(del
.br_startblock
, mp
->m_sb
.sb_rextsize
))) {
5589 * Realtime extent is lined up at the end but not
5590 * at the front. We'll get rid of full extents if
5593 mod
= mp
->m_sb
.sb_rextsize
- mod
;
5594 if (del
.br_blockcount
> mod
) {
5595 del
.br_blockcount
-= mod
;
5596 del
.br_startoff
+= mod
;
5597 del
.br_startblock
+= mod
;
5598 } else if ((del
.br_startoff
== start
&&
5599 (del
.br_state
== XFS_EXT_UNWRITTEN
||
5600 tp
->t_blk_res
== 0)) ||
5601 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
5603 * Can't make it unwritten. There isn't
5604 * a full extent here so just skip it.
5606 ASSERT(bno
>= del
.br_blockcount
);
5607 bno
-= del
.br_blockcount
;
5608 if (got
.br_startoff
> bno
&& --lastx
>= 0)
5609 xfs_iext_get_extent(ifp
, lastx
, &got
);
5611 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5612 struct xfs_bmbt_irec prev
;
5615 * This one is already unwritten.
5616 * It must have a written left neighbor.
5617 * Unwrite the killed part of that one and
5621 xfs_iext_get_extent(ifp
, lastx
- 1, &prev
);
5622 ASSERT(prev
.br_state
== XFS_EXT_NORM
);
5623 ASSERT(!isnullstartblock(prev
.br_startblock
));
5624 ASSERT(del
.br_startblock
==
5625 prev
.br_startblock
+ prev
.br_blockcount
);
5626 if (prev
.br_startoff
< start
) {
5627 mod
= start
- prev
.br_startoff
;
5628 prev
.br_blockcount
-= mod
;
5629 prev
.br_startblock
+= mod
;
5630 prev
.br_startoff
= start
;
5632 prev
.br_state
= XFS_EXT_UNWRITTEN
;
5634 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5635 ip
, whichfork
, &lastx
, &cur
,
5636 &prev
, firstblock
, dfops
,
5642 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5643 del
.br_state
= XFS_EXT_UNWRITTEN
;
5644 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5645 ip
, whichfork
, &lastx
, &cur
,
5646 &del
, firstblock
, dfops
,
5655 * If it's the case where the directory code is running
5656 * with no block reservation, and the deleted block is in
5657 * the middle of its extent, and the resulting insert
5658 * of an extent would cause transformation to btree format,
5659 * then reject it. The calling code will then swap
5660 * blocks around instead.
5661 * We have to do this now, rather than waiting for the
5662 * conversion to btree format, since the transaction
5665 if (!wasdel
&& tp
->t_blk_res
== 0 &&
5666 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
&&
5667 XFS_IFORK_NEXTENTS(ip
, whichfork
) >= /* Note the >= */
5668 XFS_IFORK_MAXEXT(ip
, whichfork
) &&
5669 del
.br_startoff
> got
.br_startoff
&&
5670 del
.br_startoff
+ del
.br_blockcount
<
5671 got
.br_startoff
+ got
.br_blockcount
) {
5677 * Unreserve quota and update realtime free space, if
5678 * appropriate. If delayed allocation, update the inode delalloc
5679 * counter now and wait to update the sb counters as
5680 * xfs_bmap_del_extent() might need to borrow some blocks.
5683 ASSERT(startblockval(del
.br_startblock
) > 0);
5685 xfs_filblks_t rtexts
;
5687 rtexts
= XFS_FSB_TO_B(mp
, del
.br_blockcount
);
5688 do_div(rtexts
, mp
->m_sb
.sb_rextsize
);
5689 xfs_mod_frextents(mp
, (int64_t)rtexts
);
5690 (void)xfs_trans_reserve_quota_nblks(NULL
,
5691 ip
, -((long)del
.br_blockcount
), 0,
5692 XFS_QMOPT_RES_RTBLKS
);
5694 (void)xfs_trans_reserve_quota_nblks(NULL
,
5695 ip
, -((long)del
.br_blockcount
), 0,
5696 XFS_QMOPT_RES_REGBLKS
);
5698 ip
->i_delayed_blks
-= del
.br_blockcount
;
5700 cur
->bc_private
.b
.flags
|=
5701 XFS_BTCUR_BPRV_WASDEL
;
5703 cur
->bc_private
.b
.flags
&= ~XFS_BTCUR_BPRV_WASDEL
;
5705 error
= xfs_bmap_del_extent(ip
, tp
, &lastx
, dfops
, cur
, &del
,
5706 &tmp_logflags
, whichfork
, flags
);
5707 logflags
|= tmp_logflags
;
5711 if (!isrt
&& wasdel
)
5712 xfs_mod_fdblocks(mp
, (int64_t)del
.br_blockcount
, false);
5714 bno
= del
.br_startoff
- 1;
5717 * If not done go on to the next (previous) record.
5719 if (bno
!= (xfs_fileoff_t
)-1 && bno
>= start
) {
5721 xfs_iext_get_extent(ifp
, lastx
, &got
);
5722 if (got
.br_startoff
> bno
&& --lastx
>= 0)
5723 xfs_iext_get_extent(ifp
, lastx
, &got
);
5728 if (bno
== (xfs_fileoff_t
)-1 || bno
< start
|| lastx
< 0)
5731 *rlen
= bno
- start
+ 1;
5734 * Convert to a btree if necessary.
5736 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5737 ASSERT(cur
== NULL
);
5738 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstblock
, dfops
,
5739 &cur
, 0, &tmp_logflags
, whichfork
);
5740 logflags
|= tmp_logflags
;
5745 * transform from btree to extents, give it cur
5747 else if (xfs_bmap_wants_extents(ip
, whichfork
)) {
5748 ASSERT(cur
!= NULL
);
5749 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &tmp_logflags
,
5751 logflags
|= tmp_logflags
;
5756 * transform from extents to local?
5761 * Log everything. Do this after conversion, there's no point in
5762 * logging the extent records if we've converted to btree format.
5764 if ((logflags
& xfs_ilog_fext(whichfork
)) &&
5765 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
5766 logflags
&= ~xfs_ilog_fext(whichfork
);
5767 else if ((logflags
& xfs_ilog_fbroot(whichfork
)) &&
5768 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)
5769 logflags
&= ~xfs_ilog_fbroot(whichfork
);
5771 * Log inode even in the error case, if the transaction
5772 * is dirty we'll need to shut down the filesystem.
5775 xfs_trans_log_inode(tp
, ip
, logflags
);
5778 *firstblock
= cur
->bc_private
.b
.firstblock
;
5779 cur
->bc_private
.b
.allocated
= 0;
5781 xfs_btree_del_cursor(cur
,
5782 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
5787 /* Unmap a range of a file. */
5791 struct xfs_inode
*ip
,
5796 xfs_fsblock_t
*firstblock
,
5797 struct xfs_defer_ops
*dfops
,
5802 error
= __xfs_bunmapi(tp
, ip
, bno
, &len
, flags
, nexts
, firstblock
,
5809 * Determine whether an extent shift can be accomplished by a merge with the
5810 * extent that precedes the target hole of the shift.
5814 struct xfs_bmbt_irec
*left
, /* preceding extent */
5815 struct xfs_bmbt_irec
*got
, /* current extent to shift */
5816 xfs_fileoff_t shift
) /* shift fsb */
5818 xfs_fileoff_t startoff
;
5820 startoff
= got
->br_startoff
- shift
;
5823 * The extent, once shifted, must be adjacent in-file and on-disk with
5824 * the preceding extent.
5826 if ((left
->br_startoff
+ left
->br_blockcount
!= startoff
) ||
5827 (left
->br_startblock
+ left
->br_blockcount
!= got
->br_startblock
) ||
5828 (left
->br_state
!= got
->br_state
) ||
5829 (left
->br_blockcount
+ got
->br_blockcount
> MAXEXTLEN
))
5836 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5837 * hole in the file. If an extent shift would result in the extent being fully
5838 * adjacent to the extent that currently precedes the hole, we can merge with
5839 * the preceding extent rather than do the shift.
5841 * This function assumes the caller has verified a shift-by-merge is possible
5842 * with the provided extents via xfs_bmse_can_merge().
5846 struct xfs_inode
*ip
,
5848 xfs_fileoff_t shift
, /* shift fsb */
5849 int current_ext
, /* idx of gotp */
5850 struct xfs_bmbt_rec_host
*gotp
, /* extent to shift */
5851 struct xfs_bmbt_rec_host
*leftp
, /* preceding extent */
5852 struct xfs_btree_cur
*cur
,
5853 int *logflags
) /* output */
5855 struct xfs_bmbt_irec got
;
5856 struct xfs_bmbt_irec left
;
5857 xfs_filblks_t blockcount
;
5859 struct xfs_mount
*mp
= ip
->i_mount
;
5861 xfs_bmbt_get_all(gotp
, &got
);
5862 xfs_bmbt_get_all(leftp
, &left
);
5863 blockcount
= left
.br_blockcount
+ got
.br_blockcount
;
5865 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
5866 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
5867 ASSERT(xfs_bmse_can_merge(&left
, &got
, shift
));
5870 * Merge the in-core extents. Note that the host record pointers and
5871 * current_ext index are invalid once the extent has been removed via
5872 * xfs_iext_remove().
5874 xfs_bmbt_set_blockcount(leftp
, blockcount
);
5875 xfs_iext_remove(ip
, current_ext
, 1, 0);
5878 * Update the on-disk extent count, the btree if necessary and log the
5881 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5882 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
5883 *logflags
|= XFS_ILOG_CORE
;
5885 *logflags
|= XFS_ILOG_DEXT
;
5889 /* lookup and remove the extent to merge */
5890 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
, got
.br_startblock
,
5891 got
.br_blockcount
, &i
);
5894 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5896 error
= xfs_btree_delete(cur
, &i
);
5899 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5901 /* lookup and update size of the previous extent */
5902 error
= xfs_bmbt_lookup_eq(cur
, left
.br_startoff
, left
.br_startblock
,
5903 left
.br_blockcount
, &i
);
5906 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5908 left
.br_blockcount
= blockcount
;
5910 return xfs_bmbt_update(cur
, left
.br_startoff
, left
.br_startblock
,
5911 left
.br_blockcount
, left
.br_state
);
5915 * Shift a single extent.
5919 struct xfs_inode
*ip
,
5921 xfs_fileoff_t offset_shift_fsb
,
5923 struct xfs_bmbt_rec_host
*gotp
,
5924 struct xfs_btree_cur
*cur
,
5926 enum shift_direction direction
,
5927 struct xfs_defer_ops
*dfops
)
5929 struct xfs_ifork
*ifp
;
5930 struct xfs_mount
*mp
;
5931 xfs_fileoff_t startoff
;
5932 struct xfs_bmbt_rec_host
*adj_irecp
;
5933 struct xfs_bmbt_irec got
;
5934 struct xfs_bmbt_irec adj_irec
;
5940 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5941 total_extents
= xfs_iext_count(ifp
);
5943 xfs_bmbt_get_all(gotp
, &got
);
5945 /* delalloc extents should be prevented by caller */
5946 XFS_WANT_CORRUPTED_RETURN(mp
, !isnullstartblock(got
.br_startblock
));
5948 if (direction
== SHIFT_LEFT
) {
5949 startoff
= got
.br_startoff
- offset_shift_fsb
;
5952 * Check for merge if we've got an extent to the left,
5953 * otherwise make sure there's enough room at the start
5954 * of the file for the shift.
5956 if (!*current_ext
) {
5957 if (got
.br_startoff
< offset_shift_fsb
)
5959 goto update_current_ext
;
5962 * grab the left extent and check for a large
5965 adj_irecp
= xfs_iext_get_ext(ifp
, *current_ext
- 1);
5966 xfs_bmbt_get_all(adj_irecp
, &adj_irec
);
5969 adj_irec
.br_startoff
+ adj_irec
.br_blockcount
)
5972 /* check whether to merge the extent or shift it down */
5973 if (xfs_bmse_can_merge(&adj_irec
, &got
,
5974 offset_shift_fsb
)) {
5975 error
= xfs_bmse_merge(ip
, whichfork
, offset_shift_fsb
,
5976 *current_ext
, gotp
, adj_irecp
,
5984 startoff
= got
.br_startoff
+ offset_shift_fsb
;
5985 /* nothing to move if this is the last extent */
5986 if (*current_ext
>= (total_extents
- 1))
5987 goto update_current_ext
;
5989 * If this is not the last extent in the file, make sure there
5990 * is enough room between current extent and next extent for
5991 * accommodating the shift.
5993 adj_irecp
= xfs_iext_get_ext(ifp
, *current_ext
+ 1);
5994 xfs_bmbt_get_all(adj_irecp
, &adj_irec
);
5995 if (startoff
+ got
.br_blockcount
> adj_irec
.br_startoff
)
5998 * Unlike a left shift (which involves a hole punch),
5999 * a right shift does not modify extent neighbors
6000 * in any way. We should never find mergeable extents
6001 * in this scenario. Check anyways and warn if we
6002 * encounter two extents that could be one.
6004 if (xfs_bmse_can_merge(&got
, &adj_irec
, offset_shift_fsb
))
6008 * Increment the extent index for the next iteration, update the start
6009 * offset of the in-core extent and update the btree if applicable.
6012 if (direction
== SHIFT_LEFT
)
6016 xfs_bmbt_set_startoff(gotp
, startoff
);
6017 *logflags
|= XFS_ILOG_CORE
;
6020 *logflags
|= XFS_ILOG_DEXT
;
6024 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
, got
.br_startblock
,
6025 got
.br_blockcount
, &i
);
6028 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
6030 got
.br_startoff
= startoff
;
6031 error
= xfs_bmbt_update(cur
, got
.br_startoff
, got
.br_startblock
,
6032 got
.br_blockcount
, got
.br_state
);
6037 /* update reverse mapping */
6038 error
= xfs_rmap_unmap_extent(mp
, dfops
, ip
, whichfork
, &adj_irec
);
6041 adj_irec
.br_startoff
= startoff
;
6042 return xfs_rmap_map_extent(mp
, dfops
, ip
, whichfork
, &adj_irec
);
6046 * Shift extent records to the left/right to cover/create a hole.
6048 * The maximum number of extents to be shifted in a single operation is
6049 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6050 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6051 * is the length by which each extent is shifted. If there is no hole to shift
6052 * the extents into, this will be considered invalid operation and we abort
6056 xfs_bmap_shift_extents(
6057 struct xfs_trans
*tp
,
6058 struct xfs_inode
*ip
,
6059 xfs_fileoff_t
*next_fsb
,
6060 xfs_fileoff_t offset_shift_fsb
,
6062 xfs_fileoff_t stop_fsb
,
6063 xfs_fsblock_t
*firstblock
,
6064 struct xfs_defer_ops
*dfops
,
6065 enum shift_direction direction
,
6068 struct xfs_btree_cur
*cur
= NULL
;
6069 struct xfs_bmbt_rec_host
*gotp
;
6070 struct xfs_bmbt_irec got
;
6071 struct xfs_mount
*mp
= ip
->i_mount
;
6072 struct xfs_ifork
*ifp
;
6073 xfs_extnum_t nexts
= 0;
6074 xfs_extnum_t current_ext
;
6075 xfs_extnum_t total_extents
;
6076 xfs_extnum_t stop_extent
;
6078 int whichfork
= XFS_DATA_FORK
;
6081 if (unlikely(XFS_TEST_ERROR(
6082 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
6083 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
6084 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
6085 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6086 XFS_ERRLEVEL_LOW
, mp
);
6087 return -EFSCORRUPTED
;
6090 if (XFS_FORCED_SHUTDOWN(mp
))
6093 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
6094 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
6095 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
6096 ASSERT(*next_fsb
!= NULLFSBLOCK
|| direction
== SHIFT_RIGHT
);
6098 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
6099 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
6100 /* Read in all the extents */
6101 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6106 if (ifp
->if_flags
& XFS_IFBROOT
) {
6107 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6108 cur
->bc_private
.b
.firstblock
= *firstblock
;
6109 cur
->bc_private
.b
.dfops
= dfops
;
6110 cur
->bc_private
.b
.flags
= 0;
6114 * There may be delalloc extents in the data fork before the range we
6115 * are collapsing out, so we cannot use the count of real extents here.
6116 * Instead we have to calculate it from the incore fork.
6118 total_extents
= xfs_iext_count(ifp
);
6119 if (total_extents
== 0) {
6125 * In case of first right shift, we need to initialize next_fsb
6127 if (*next_fsb
== NULLFSBLOCK
) {
6128 gotp
= xfs_iext_get_ext(ifp
, total_extents
- 1);
6129 xfs_bmbt_get_all(gotp
, &got
);
6130 *next_fsb
= got
.br_startoff
;
6131 if (stop_fsb
> *next_fsb
) {
6137 /* Lookup the extent index at which we have to stop */
6138 if (direction
== SHIFT_RIGHT
) {
6139 gotp
= xfs_iext_bno_to_ext(ifp
, stop_fsb
, &stop_extent
);
6140 /* Make stop_extent exclusive of shift range */
6143 stop_extent
= total_extents
;
6146 * Look up the extent index for the fsb where we start shifting. We can
6147 * henceforth iterate with current_ext as extent list changes are locked
6150 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6151 * *next_fsb lies in a hole beyond which there are no extents. Either
6154 gotp
= xfs_iext_bno_to_ext(ifp
, *next_fsb
, ¤t_ext
);
6160 /* some sanity checking before we finally start shifting extents */
6161 if ((direction
== SHIFT_LEFT
&& current_ext
>= stop_extent
) ||
6162 (direction
== SHIFT_RIGHT
&& current_ext
<= stop_extent
)) {
6167 while (nexts
++ < num_exts
) {
6168 error
= xfs_bmse_shift_one(ip
, whichfork
, offset_shift_fsb
,
6169 ¤t_ext
, gotp
, cur
, &logflags
,
6174 * If there was an extent merge during the shift, the extent
6175 * count can change. Update the total and grade the next record.
6177 if (direction
== SHIFT_LEFT
) {
6178 total_extents
= xfs_iext_count(ifp
);
6179 stop_extent
= total_extents
;
6182 if (current_ext
== stop_extent
) {
6184 *next_fsb
= NULLFSBLOCK
;
6187 gotp
= xfs_iext_get_ext(ifp
, current_ext
);
6191 xfs_bmbt_get_all(gotp
, &got
);
6192 *next_fsb
= got
.br_startoff
;
6197 xfs_btree_del_cursor(cur
,
6198 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
6201 xfs_trans_log_inode(tp
, ip
, logflags
);
6207 * Splits an extent into two extents at split_fsb block such that it is
6208 * the first block of the current_ext. @current_ext is a target extent
6209 * to be split. @split_fsb is a block where the extents is split.
6210 * If split_fsb lies in a hole or the first block of extents, just return 0.
6213 xfs_bmap_split_extent_at(
6214 struct xfs_trans
*tp
,
6215 struct xfs_inode
*ip
,
6216 xfs_fileoff_t split_fsb
,
6217 xfs_fsblock_t
*firstfsb
,
6218 struct xfs_defer_ops
*dfops
)
6220 int whichfork
= XFS_DATA_FORK
;
6221 struct xfs_btree_cur
*cur
= NULL
;
6222 struct xfs_bmbt_rec_host
*gotp
;
6223 struct xfs_bmbt_irec got
;
6224 struct xfs_bmbt_irec
new; /* split extent */
6225 struct xfs_mount
*mp
= ip
->i_mount
;
6226 struct xfs_ifork
*ifp
;
6227 xfs_fsblock_t gotblkcnt
; /* new block count for got */
6228 xfs_extnum_t current_ext
;
6233 if (unlikely(XFS_TEST_ERROR(
6234 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
6235 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
6236 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
6237 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6238 XFS_ERRLEVEL_LOW
, mp
);
6239 return -EFSCORRUPTED
;
6242 if (XFS_FORCED_SHUTDOWN(mp
))
6245 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
6246 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
6247 /* Read in all the extents */
6248 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6254 * gotp can be null in 2 cases: 1) if there are no extents
6255 * or 2) split_fsb lies in a hole beyond which there are
6256 * no extents. Either way, we are done.
6258 gotp
= xfs_iext_bno_to_ext(ifp
, split_fsb
, ¤t_ext
);
6262 xfs_bmbt_get_all(gotp
, &got
);
6265 * Check split_fsb lies in a hole or the start boundary offset
6268 if (got
.br_startoff
>= split_fsb
)
6271 gotblkcnt
= split_fsb
- got
.br_startoff
;
6272 new.br_startoff
= split_fsb
;
6273 new.br_startblock
= got
.br_startblock
+ gotblkcnt
;
6274 new.br_blockcount
= got
.br_blockcount
- gotblkcnt
;
6275 new.br_state
= got
.br_state
;
6277 if (ifp
->if_flags
& XFS_IFBROOT
) {
6278 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6279 cur
->bc_private
.b
.firstblock
= *firstfsb
;
6280 cur
->bc_private
.b
.dfops
= dfops
;
6281 cur
->bc_private
.b
.flags
= 0;
6282 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
,
6288 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, del_cursor
);
6291 xfs_bmbt_set_blockcount(gotp
, gotblkcnt
);
6292 got
.br_blockcount
= gotblkcnt
;
6294 logflags
= XFS_ILOG_CORE
;
6296 error
= xfs_bmbt_update(cur
, got
.br_startoff
,
6303 logflags
|= XFS_ILOG_DEXT
;
6305 /* Add new extent */
6307 xfs_iext_insert(ip
, current_ext
, 1, &new, 0);
6308 XFS_IFORK_NEXT_SET(ip
, whichfork
,
6309 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
6312 error
= xfs_bmbt_lookup_eq(cur
, new.br_startoff
,
6313 new.br_startblock
, new.br_blockcount
,
6317 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, del_cursor
);
6318 cur
->bc_rec
.b
.br_state
= new.br_state
;
6320 error
= xfs_btree_insert(cur
, &i
);
6323 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, del_cursor
);
6327 * Convert to a btree if necessary.
6329 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
6330 int tmp_logflags
; /* partial log flag return val */
6332 ASSERT(cur
== NULL
);
6333 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstfsb
, dfops
,
6334 &cur
, 0, &tmp_logflags
, whichfork
);
6335 logflags
|= tmp_logflags
;
6340 cur
->bc_private
.b
.allocated
= 0;
6341 xfs_btree_del_cursor(cur
,
6342 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
6346 xfs_trans_log_inode(tp
, ip
, logflags
);
6351 xfs_bmap_split_extent(
6352 struct xfs_inode
*ip
,
6353 xfs_fileoff_t split_fsb
)
6355 struct xfs_mount
*mp
= ip
->i_mount
;
6356 struct xfs_trans
*tp
;
6357 struct xfs_defer_ops dfops
;
6358 xfs_fsblock_t firstfsb
;
6361 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
,
6362 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0, 0, &tp
);
6366 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
6367 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
6369 xfs_defer_init(&dfops
, &firstfsb
);
6371 error
= xfs_bmap_split_extent_at(tp
, ip
, split_fsb
,
6376 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
6380 return xfs_trans_commit(tp
);
6383 xfs_defer_cancel(&dfops
);
6384 xfs_trans_cancel(tp
);
6388 /* Deferred mapping is only for real extents in the data fork. */
6390 xfs_bmap_is_update_needed(
6391 struct xfs_bmbt_irec
*bmap
)
6393 return bmap
->br_startblock
!= HOLESTARTBLOCK
&&
6394 bmap
->br_startblock
!= DELAYSTARTBLOCK
;
6397 /* Record a bmap intent. */
6400 struct xfs_mount
*mp
,
6401 struct xfs_defer_ops
*dfops
,
6402 enum xfs_bmap_intent_type type
,
6403 struct xfs_inode
*ip
,
6405 struct xfs_bmbt_irec
*bmap
)
6408 struct xfs_bmap_intent
*bi
;
6410 trace_xfs_bmap_defer(mp
,
6411 XFS_FSB_TO_AGNO(mp
, bmap
->br_startblock
),
6413 XFS_FSB_TO_AGBNO(mp
, bmap
->br_startblock
),
6414 ip
->i_ino
, whichfork
,
6416 bmap
->br_blockcount
,
6419 bi
= kmem_alloc(sizeof(struct xfs_bmap_intent
), KM_SLEEP
| KM_NOFS
);
6420 INIT_LIST_HEAD(&bi
->bi_list
);
6423 bi
->bi_whichfork
= whichfork
;
6424 bi
->bi_bmap
= *bmap
;
6426 error
= xfs_defer_join(dfops
, bi
->bi_owner
);
6432 xfs_defer_add(dfops
, XFS_DEFER_OPS_TYPE_BMAP
, &bi
->bi_list
);
6436 /* Map an extent into a file. */
6438 xfs_bmap_map_extent(
6439 struct xfs_mount
*mp
,
6440 struct xfs_defer_ops
*dfops
,
6441 struct xfs_inode
*ip
,
6442 struct xfs_bmbt_irec
*PREV
)
6444 if (!xfs_bmap_is_update_needed(PREV
))
6447 return __xfs_bmap_add(mp
, dfops
, XFS_BMAP_MAP
, ip
,
6448 XFS_DATA_FORK
, PREV
);
6451 /* Unmap an extent out of a file. */
6453 xfs_bmap_unmap_extent(
6454 struct xfs_mount
*mp
,
6455 struct xfs_defer_ops
*dfops
,
6456 struct xfs_inode
*ip
,
6457 struct xfs_bmbt_irec
*PREV
)
6459 if (!xfs_bmap_is_update_needed(PREV
))
6462 return __xfs_bmap_add(mp
, dfops
, XFS_BMAP_UNMAP
, ip
,
6463 XFS_DATA_FORK
, PREV
);
6467 * Process one of the deferred bmap operations. We pass back the
6468 * btree cursor to maintain our lock on the bmapbt between calls.
6471 xfs_bmap_finish_one(
6472 struct xfs_trans
*tp
,
6473 struct xfs_defer_ops
*dfops
,
6474 struct xfs_inode
*ip
,
6475 enum xfs_bmap_intent_type type
,
6477 xfs_fileoff_t startoff
,
6478 xfs_fsblock_t startblock
,
6479 xfs_filblks_t blockcount
,
6482 int error
= 0, done
;
6484 trace_xfs_bmap_deferred(tp
->t_mountp
,
6485 XFS_FSB_TO_AGNO(tp
->t_mountp
, startblock
), type
,
6486 XFS_FSB_TO_AGBNO(tp
->t_mountp
, startblock
),
6487 ip
->i_ino
, whichfork
, startoff
, blockcount
, state
);
6489 if (WARN_ON_ONCE(whichfork
!= XFS_DATA_FORK
))
6490 return -EFSCORRUPTED
;
6492 if (XFS_TEST_ERROR(false, tp
->t_mountp
,
6493 XFS_ERRTAG_BMAP_FINISH_ONE
,
6494 XFS_RANDOM_BMAP_FINISH_ONE
))
6499 error
= xfs_bmapi_remap(tp
, ip
, startoff
, blockcount
,
6502 case XFS_BMAP_UNMAP
:
6503 error
= xfs_bunmapi(tp
, ip
, startoff
, blockcount
,
6504 XFS_BMAPI_REMAP
, 1, &startblock
, dfops
, &done
);
6509 error
= -EFSCORRUPTED
;