2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "libxfs_priv.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_alloc.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_trans_space.h"
38 #include "xfs_trace.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_quota_defs.h"
42 #include "xfs_ag_resv.h"
43 #include "xfs_refcount.h"
44 #include "xfs_rmap_btree.h"
47 kmem_zone_t
*xfs_bmap_free_item_zone
;
50 * Miscellaneous helper functions
54 * Compute and fill in the value of the maximum depth of a bmap btree
55 * in this filesystem. Done once, during mount.
58 xfs_bmap_compute_maxlevels(
59 xfs_mount_t
*mp
, /* file system mount structure */
60 int whichfork
) /* data or attr fork */
62 int level
; /* btree level */
63 uint maxblocks
; /* max blocks at this level */
64 uint maxleafents
; /* max leaf entries possible */
65 int maxrootrecs
; /* max records in root block */
66 int minleafrecs
; /* min records in leaf block */
67 int minnoderecs
; /* min records in node block */
68 int sz
; /* root block size */
71 * The maximum number of extents in a file, hence the maximum
72 * number of leaf entries, is controlled by the type of di_nextents
73 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
74 * (a signed 16-bit number, xfs_aextnum_t).
76 * Note that we can no longer assume that if we are in ATTR1 that
77 * the fork offset of all the inodes will be
78 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
79 * with ATTR2 and then mounted back with ATTR1, keeping the
80 * di_forkoff's fixed but probably at various positions. Therefore,
81 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
82 * of a minimum size available.
84 if (whichfork
== XFS_DATA_FORK
) {
85 maxleafents
= MAXEXTNUM
;
86 sz
= XFS_BMDR_SPACE_CALC(MINDBTPTRS
);
88 maxleafents
= MAXAEXTNUM
;
89 sz
= XFS_BMDR_SPACE_CALC(MINABTPTRS
);
91 maxrootrecs
= xfs_bmdr_maxrecs(sz
, 0);
92 minleafrecs
= mp
->m_bmap_dmnr
[0];
93 minnoderecs
= mp
->m_bmap_dmnr
[1];
94 maxblocks
= (maxleafents
+ minleafrecs
- 1) / minleafrecs
;
95 for (level
= 1; maxblocks
> 1; level
++) {
96 if (maxblocks
<= maxrootrecs
)
99 maxblocks
= (maxblocks
+ minnoderecs
- 1) / minnoderecs
;
101 mp
->m_bm_maxlevels
[whichfork
] = level
;
104 STATIC
int /* error */
106 struct xfs_btree_cur
*cur
,
110 int *stat
) /* success/failure */
112 cur
->bc_rec
.b
.br_startoff
= off
;
113 cur
->bc_rec
.b
.br_startblock
= bno
;
114 cur
->bc_rec
.b
.br_blockcount
= len
;
115 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
118 STATIC
int /* error */
120 struct xfs_btree_cur
*cur
,
124 int *stat
) /* success/failure */
126 cur
->bc_rec
.b
.br_startoff
= off
;
127 cur
->bc_rec
.b
.br_startblock
= bno
;
128 cur
->bc_rec
.b
.br_blockcount
= len
;
129 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
133 * Check if the inode needs to be converted to btree format.
135 static inline bool xfs_bmap_needs_btree(struct xfs_inode
*ip
, int whichfork
)
137 return whichfork
!= XFS_COW_FORK
&&
138 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
&&
139 XFS_IFORK_NEXTENTS(ip
, whichfork
) >
140 XFS_IFORK_MAXEXT(ip
, whichfork
);
144 * Check if the inode should be converted to extent format.
146 static inline bool xfs_bmap_wants_extents(struct xfs_inode
*ip
, int whichfork
)
148 return whichfork
!= XFS_COW_FORK
&&
149 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
&&
150 XFS_IFORK_NEXTENTS(ip
, whichfork
) <=
151 XFS_IFORK_MAXEXT(ip
, whichfork
);
155 * Update the record referred to by cur to the value given
156 * by [off, bno, len, state].
157 * This either works (return 0) or gets an EFSCORRUPTED error.
161 struct xfs_btree_cur
*cur
,
167 union xfs_btree_rec rec
;
169 xfs_bmbt_disk_set_allf(&rec
.bmbt
, off
, bno
, len
, state
);
170 return xfs_btree_update(cur
, &rec
);
174 * Compute the worst-case number of indirect blocks that will be used
175 * for ip's delayed extent of length "len".
178 xfs_bmap_worst_indlen(
179 xfs_inode_t
*ip
, /* incore inode pointer */
180 xfs_filblks_t len
) /* delayed extent length */
182 int level
; /* btree level number */
183 int maxrecs
; /* maximum record count at this level */
184 xfs_mount_t
*mp
; /* mount structure */
185 xfs_filblks_t rval
; /* return value */
186 xfs_filblks_t orig_len
;
190 /* Calculate the worst-case size of the bmbt. */
192 maxrecs
= mp
->m_bmap_dmxr
[0];
193 for (level
= 0, rval
= 0;
194 level
< XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
);
197 do_div(len
, maxrecs
);
200 rval
+= XFS_BM_MAXLEVELS(mp
, XFS_DATA_FORK
) -
205 maxrecs
= mp
->m_bmap_dmxr
[1];
208 /* Calculate the worst-case size of the rmapbt. */
209 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
))
210 rval
+= 1 + xfs_rmapbt_calc_size(mp
, orig_len
) +
211 mp
->m_rmap_maxlevels
;
217 * Calculate the default attribute fork offset for newly created inodes.
220 xfs_default_attroffset(
221 struct xfs_inode
*ip
)
223 struct xfs_mount
*mp
= ip
->i_mount
;
226 if (mp
->m_sb
.sb_inodesize
== 256) {
227 offset
= XFS_LITINO(mp
, ip
->i_d
.di_version
) -
228 XFS_BMDR_SPACE_CALC(MINABTPTRS
);
230 offset
= XFS_BMDR_SPACE_CALC(6 * MINABTPTRS
);
233 ASSERT(offset
< XFS_LITINO(mp
, ip
->i_d
.di_version
));
238 * Helper routine to reset inode di_forkoff field when switching
239 * attribute fork from local to extent format - we reset it where
240 * possible to make space available for inline data fork extents.
243 xfs_bmap_forkoff_reset(
247 if (whichfork
== XFS_ATTR_FORK
&&
248 ip
->i_d
.di_format
!= XFS_DINODE_FMT_DEV
&&
249 ip
->i_d
.di_format
!= XFS_DINODE_FMT_UUID
&&
250 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) {
251 uint dfl_forkoff
= xfs_default_attroffset(ip
) >> 3;
253 if (dfl_forkoff
> ip
->i_d
.di_forkoff
)
254 ip
->i_d
.di_forkoff
= dfl_forkoff
;
259 STATIC
struct xfs_buf
*
261 struct xfs_btree_cur
*cur
,
264 struct xfs_log_item_desc
*lidp
;
270 for (i
= 0; i
< XFS_BTREE_MAXLEVELS
; i
++) {
271 if (!cur
->bc_bufs
[i
])
273 if (XFS_BUF_ADDR(cur
->bc_bufs
[i
]) == bno
)
274 return cur
->bc_bufs
[i
];
277 /* Chase down all the log items to see if the bp is there */
278 list_for_each_entry(lidp
, &cur
->bc_tp
->t_items
, lid_trans
) {
279 struct xfs_buf_log_item
*bip
;
280 bip
= (struct xfs_buf_log_item
*)lidp
->lid_item
;
281 if (bip
->bli_item
.li_type
== XFS_LI_BUF
&&
282 XFS_BUF_ADDR(bip
->bli_buf
) == bno
)
291 struct xfs_btree_block
*block
,
297 __be64
*pp
, *thispa
; /* pointer to block address */
298 xfs_bmbt_key_t
*prevp
, *keyp
;
300 ASSERT(be16_to_cpu(block
->bb_level
) > 0);
303 for( i
= 1; i
<= xfs_btree_get_numrecs(block
); i
++) {
304 dmxr
= mp
->m_bmap_dmxr
[0];
305 keyp
= XFS_BMBT_KEY_ADDR(mp
, block
, i
);
308 ASSERT(be64_to_cpu(prevp
->br_startoff
) <
309 be64_to_cpu(keyp
->br_startoff
));
314 * Compare the block numbers to see if there are dups.
317 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, i
, sz
);
319 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, i
, dmxr
);
321 for (j
= i
+1; j
<= be16_to_cpu(block
->bb_numrecs
); j
++) {
323 thispa
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, j
, sz
);
325 thispa
= XFS_BMBT_PTR_ADDR(mp
, block
, j
, dmxr
);
326 if (*thispa
== *pp
) {
327 xfs_warn(mp
, "%s: thispa(%d) == pp(%d) %Ld",
329 (unsigned long long)be64_to_cpu(*thispa
));
330 panic("%s: ptrs are equal in node\n",
338 * Check that the extents for the inode ip are in the right order in all
339 * btree leaves. THis becomes prohibitively expensive for large extent count
340 * files, so don't bother with inodes that have more than 10,000 extents in
341 * them. The btree record ordering checks will still be done, so for such large
342 * bmapbt constructs that is going to catch most corruptions.
345 xfs_bmap_check_leaf_extents(
346 xfs_btree_cur_t
*cur
, /* btree cursor or null */
347 xfs_inode_t
*ip
, /* incore inode pointer */
348 int whichfork
) /* data or attr fork */
350 struct xfs_btree_block
*block
; /* current btree block */
351 xfs_fsblock_t bno
; /* block # of "block" */
352 xfs_buf_t
*bp
; /* buffer for "block" */
353 int error
; /* error return value */
354 xfs_extnum_t i
=0, j
; /* index into the extents list */
355 xfs_ifork_t
*ifp
; /* fork structure */
356 int level
; /* btree level, for checking */
357 xfs_mount_t
*mp
; /* file system mount structure */
358 __be64
*pp
; /* pointer to block address */
359 xfs_bmbt_rec_t
*ep
; /* pointer to current extent */
360 xfs_bmbt_rec_t last
= {0, 0}; /* last extent in prev block */
361 xfs_bmbt_rec_t
*nextp
; /* pointer to next extent */
364 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
) {
368 /* skip large extent count inodes */
369 if (ip
->i_d
.di_nextents
> 10000)
374 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
375 block
= ifp
->if_broot
;
377 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
379 level
= be16_to_cpu(block
->bb_level
);
381 xfs_check_block(block
, mp
, 1, ifp
->if_broot_bytes
);
382 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
383 bno
= be64_to_cpu(*pp
);
385 ASSERT(bno
!= NULLFSBLOCK
);
386 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
387 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
390 * Go down the tree until leaf level is reached, following the first
391 * pointer (leftmost) at each level.
393 while (level
-- > 0) {
394 /* See if buf is in cur first */
396 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
399 error
= xfs_btree_read_bufl(mp
, NULL
, bno
, 0, &bp
,
405 block
= XFS_BUF_TO_BLOCK(bp
);
410 * Check this block for basic sanity (increasing keys and
411 * no duplicate blocks).
414 xfs_check_block(block
, mp
, 0, 0);
415 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
416 bno
= be64_to_cpu(*pp
);
417 XFS_WANT_CORRUPTED_GOTO(mp
,
418 XFS_FSB_SANITY_CHECK(mp
, bno
), error0
);
421 xfs_trans_brelse(NULL
, bp
);
426 * Here with bp and block set to the leftmost leaf node in the tree.
431 * Loop over all leaf nodes checking that all extents are in the right order.
434 xfs_fsblock_t nextbno
;
435 xfs_extnum_t num_recs
;
438 num_recs
= xfs_btree_get_numrecs(block
);
441 * Read-ahead the next leaf block, if any.
444 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
447 * Check all the extents to make sure they are OK.
448 * If we had a previous block, the last entry should
449 * conform with the first entry in this one.
452 ep
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
454 ASSERT(xfs_bmbt_disk_get_startoff(&last
) +
455 xfs_bmbt_disk_get_blockcount(&last
) <=
456 xfs_bmbt_disk_get_startoff(ep
));
458 for (j
= 1; j
< num_recs
; j
++) {
459 nextp
= XFS_BMBT_REC_ADDR(mp
, block
, j
+ 1);
460 ASSERT(xfs_bmbt_disk_get_startoff(ep
) +
461 xfs_bmbt_disk_get_blockcount(ep
) <=
462 xfs_bmbt_disk_get_startoff(nextp
));
470 xfs_trans_brelse(NULL
, bp
);
474 * If we've reached the end, stop.
476 if (bno
== NULLFSBLOCK
)
480 bp
= xfs_bmap_get_bp(cur
, XFS_FSB_TO_DADDR(mp
, bno
));
483 error
= xfs_btree_read_bufl(mp
, NULL
, bno
, 0, &bp
,
489 block
= XFS_BUF_TO_BLOCK(bp
);
495 xfs_warn(mp
, "%s: at error0", __func__
);
497 xfs_trans_brelse(NULL
, bp
);
499 xfs_warn(mp
, "%s: BAD after btree leaves for %d extents",
501 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__
);
506 * Add bmap trace insert entries for all the contents of the extent records.
509 xfs_bmap_trace_exlist(
510 xfs_inode_t
*ip
, /* incore inode pointer */
511 xfs_extnum_t cnt
, /* count of entries in the list */
512 int whichfork
, /* data or attr fork */
513 unsigned long caller_ip
)
515 xfs_extnum_t idx
; /* extent record index */
516 xfs_ifork_t
*ifp
; /* inode fork pointer */
519 if (whichfork
== XFS_ATTR_FORK
)
520 state
|= BMAP_ATTRFORK
;
522 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
523 ASSERT(cnt
== xfs_iext_count(ifp
));
524 for (idx
= 0; idx
< cnt
; idx
++)
525 trace_xfs_extlist(ip
, idx
, whichfork
, caller_ip
);
529 * Validate that the bmbt_irecs being returned from bmapi are valid
530 * given the caller's original parameters. Specifically check the
531 * ranges of the returned irecs to ensure that they only extend beyond
532 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
535 xfs_bmap_validate_ret(
539 xfs_bmbt_irec_t
*mval
,
543 int i
; /* index to map values */
545 ASSERT(ret_nmap
<= nmap
);
547 for (i
= 0; i
< ret_nmap
; i
++) {
548 ASSERT(mval
[i
].br_blockcount
> 0);
549 if (!(flags
& XFS_BMAPI_ENTIRE
)) {
550 ASSERT(mval
[i
].br_startoff
>= bno
);
551 ASSERT(mval
[i
].br_blockcount
<= len
);
552 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
<=
555 ASSERT(mval
[i
].br_startoff
< bno
+ len
);
556 ASSERT(mval
[i
].br_startoff
+ mval
[i
].br_blockcount
>
560 mval
[i
- 1].br_startoff
+ mval
[i
- 1].br_blockcount
==
561 mval
[i
].br_startoff
);
562 ASSERT(mval
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
563 mval
[i
].br_startblock
!= HOLESTARTBLOCK
);
564 ASSERT(mval
[i
].br_state
== XFS_EXT_NORM
||
565 mval
[i
].br_state
== XFS_EXT_UNWRITTEN
);
570 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
571 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
575 * bmap free list manipulation functions
579 * Add the extent to the list of extents to be free at transaction end.
580 * The list is maintained sorted (by block number).
584 struct xfs_mount
*mp
,
585 struct xfs_defer_ops
*dfops
,
588 struct xfs_owner_info
*oinfo
)
590 struct xfs_extent_free_item
*new; /* new element */
595 ASSERT(bno
!= NULLFSBLOCK
);
597 ASSERT(len
<= MAXEXTLEN
);
598 ASSERT(!isnullstartblock(bno
));
599 agno
= XFS_FSB_TO_AGNO(mp
, bno
);
600 agbno
= XFS_FSB_TO_AGBNO(mp
, bno
);
601 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
602 ASSERT(agbno
< mp
->m_sb
.sb_agblocks
);
603 ASSERT(len
< mp
->m_sb
.sb_agblocks
);
604 ASSERT(agbno
+ len
<= mp
->m_sb
.sb_agblocks
);
606 ASSERT(xfs_bmap_free_item_zone
!= NULL
);
608 new = kmem_zone_alloc(xfs_bmap_free_item_zone
, KM_SLEEP
);
609 new->xefi_startblock
= bno
;
610 new->xefi_blockcount
= (xfs_extlen_t
)len
;
612 new->xefi_oinfo
= *oinfo
;
614 xfs_rmap_skip_owner_update(&new->xefi_oinfo
);
615 trace_xfs_bmap_free_defer(mp
, XFS_FSB_TO_AGNO(mp
, bno
), 0,
616 XFS_FSB_TO_AGBNO(mp
, bno
), len
);
617 xfs_defer_add(dfops
, XFS_DEFER_OPS_TYPE_FREE
, &new->xefi_list
);
621 * Inode fork format manipulation functions
625 * Transform a btree format file with only one leaf node, where the
626 * extents list will fit in the inode, into an extents format file.
627 * Since the file extents are already in-core, all we have to do is
628 * give up the space for the btree root and pitch the leaf block.
630 STATIC
int /* error */
631 xfs_bmap_btree_to_extents(
632 xfs_trans_t
*tp
, /* transaction pointer */
633 xfs_inode_t
*ip
, /* incore inode pointer */
634 xfs_btree_cur_t
*cur
, /* btree cursor */
635 int *logflagsp
, /* inode logging flags */
636 int whichfork
) /* data or attr fork */
639 struct xfs_btree_block
*cblock
;/* child btree block */
640 xfs_fsblock_t cbno
; /* child block number */
641 xfs_buf_t
*cbp
; /* child block's buffer */
642 int error
; /* error return value */
643 xfs_ifork_t
*ifp
; /* inode fork data */
644 xfs_mount_t
*mp
; /* mount point structure */
645 __be64
*pp
; /* ptr to block address */
646 struct xfs_btree_block
*rblock
;/* root btree block */
647 struct xfs_owner_info oinfo
;
650 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
651 ASSERT(whichfork
!= XFS_COW_FORK
);
652 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
653 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
);
654 rblock
= ifp
->if_broot
;
655 ASSERT(be16_to_cpu(rblock
->bb_level
) == 1);
656 ASSERT(be16_to_cpu(rblock
->bb_numrecs
) == 1);
657 ASSERT(xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0) == 1);
658 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, rblock
, 1, ifp
->if_broot_bytes
);
659 cbno
= be64_to_cpu(*pp
);
662 if ((error
= xfs_btree_check_lptr(cur
, cbno
, 1)))
665 error
= xfs_btree_read_bufl(mp
, tp
, cbno
, 0, &cbp
, XFS_BMAP_BTREE_REF
,
669 cblock
= XFS_BUF_TO_BLOCK(cbp
);
670 if ((error
= xfs_btree_check_block(cur
, cblock
, 0, cbp
)))
672 xfs_rmap_ino_bmbt_owner(&oinfo
, ip
->i_ino
, whichfork
);
673 xfs_bmap_add_free(mp
, cur
->bc_private
.b
.dfops
, cbno
, 1, &oinfo
);
674 ip
->i_d
.di_nblocks
--;
675 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, -1L);
676 xfs_trans_binval(tp
, cbp
);
677 if (cur
->bc_bufs
[0] == cbp
)
678 cur
->bc_bufs
[0] = NULL
;
679 xfs_iroot_realloc(ip
, -1, whichfork
);
680 ASSERT(ifp
->if_broot
== NULL
);
681 ASSERT((ifp
->if_flags
& XFS_IFBROOT
) == 0);
682 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_EXTENTS
);
683 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
688 * Convert an extents-format file into a btree-format file.
689 * The new file will have a root block (in the inode) and a single child block.
691 STATIC
int /* error */
692 xfs_bmap_extents_to_btree(
693 xfs_trans_t
*tp
, /* transaction pointer */
694 xfs_inode_t
*ip
, /* incore inode pointer */
695 xfs_fsblock_t
*firstblock
, /* first-block-allocated */
696 struct xfs_defer_ops
*dfops
, /* blocks freed in xaction */
697 xfs_btree_cur_t
**curp
, /* cursor returned to caller */
698 int wasdel
, /* converting a delayed alloc */
699 int *logflagsp
, /* inode logging flags */
700 int whichfork
) /* data or attr fork */
702 struct xfs_btree_block
*ablock
; /* allocated (child) bt block */
703 xfs_buf_t
*abp
; /* buffer for ablock */
704 xfs_alloc_arg_t args
; /* allocation arguments */
705 xfs_bmbt_rec_t
*arp
; /* child record pointer */
706 struct xfs_btree_block
*block
; /* btree root block */
707 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
708 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
709 int error
; /* error return value */
710 xfs_extnum_t i
, cnt
; /* extent record index */
711 xfs_ifork_t
*ifp
; /* inode fork pointer */
712 xfs_bmbt_key_t
*kp
; /* root block key pointer */
713 xfs_mount_t
*mp
; /* mount structure */
714 xfs_extnum_t nextents
; /* number of file extents */
715 xfs_bmbt_ptr_t
*pp
; /* root block address pointer */
718 ASSERT(whichfork
!= XFS_COW_FORK
);
719 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
720 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
);
723 * Make space in the inode incore.
725 xfs_iroot_realloc(ip
, 1, whichfork
);
726 ifp
->if_flags
|= XFS_IFBROOT
;
731 block
= ifp
->if_broot
;
732 if (xfs_sb_version_hascrc(&mp
->m_sb
))
733 xfs_btree_init_block_int(mp
, block
, XFS_BUF_DADDR_NULL
,
734 XFS_BMAP_CRC_MAGIC
, 1, 1, ip
->i_ino
,
735 XFS_BTREE_LONG_PTRS
| XFS_BTREE_CRC_BLOCKS
);
737 xfs_btree_init_block_int(mp
, block
, XFS_BUF_DADDR_NULL
,
738 XFS_BMAP_MAGIC
, 1, 1, ip
->i_ino
,
739 XFS_BTREE_LONG_PTRS
);
742 * Need a cursor. Can't allocate until bb_level is filled in.
744 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
745 cur
->bc_private
.b
.firstblock
= *firstblock
;
746 cur
->bc_private
.b
.dfops
= dfops
;
747 cur
->bc_private
.b
.flags
= wasdel
? XFS_BTCUR_BPRV_WASDEL
: 0;
749 * Convert to a btree with two levels, one record in root.
751 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_BTREE
);
752 memset(&args
, 0, sizeof(args
));
755 xfs_rmap_ino_bmbt_owner(&args
.oinfo
, ip
->i_ino
, whichfork
);
756 args
.firstblock
= *firstblock
;
757 if (*firstblock
== NULLFSBLOCK
) {
758 args
.type
= XFS_ALLOCTYPE_START_BNO
;
759 args
.fsbno
= XFS_INO_TO_FSB(mp
, ip
->i_ino
);
760 } else if (dfops
->dop_low
) {
762 args
.type
= XFS_ALLOCTYPE_START_BNO
;
763 args
.fsbno
= *firstblock
;
765 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
766 args
.fsbno
= *firstblock
;
768 args
.minlen
= args
.maxlen
= args
.prod
= 1;
769 args
.wasdel
= wasdel
;
771 if ((error
= xfs_alloc_vextent(&args
))) {
772 xfs_iroot_realloc(ip
, -1, whichfork
);
773 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
778 * During a CoW operation, the allocation and bmbt updates occur in
779 * different transactions. The mapping code tries to put new bmbt
780 * blocks near extents being mapped, but the only way to guarantee this
781 * is if the alloc and the mapping happen in a single transaction that
782 * has a block reservation. That isn't the case here, so if we run out
783 * of space we'll try again with another AG.
785 if (xfs_sb_version_hasreflink(&cur
->bc_mp
->m_sb
) &&
786 args
.fsbno
== NULLFSBLOCK
&&
787 args
.type
== XFS_ALLOCTYPE_NEAR_BNO
) {
788 dfops
->dop_low
= true;
792 * Allocation can't fail, the space was reserved.
794 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
795 ASSERT(*firstblock
== NULLFSBLOCK
||
796 args
.agno
== XFS_FSB_TO_AGNO(mp
, *firstblock
) ||
798 args
.agno
> XFS_FSB_TO_AGNO(mp
, *firstblock
)));
799 *firstblock
= cur
->bc_private
.b
.firstblock
= args
.fsbno
;
800 cur
->bc_private
.b
.allocated
++;
801 ip
->i_d
.di_nblocks
++;
802 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, 1L);
803 abp
= xfs_btree_get_bufl(mp
, tp
, args
.fsbno
, 0);
805 * Fill in the child block.
807 abp
->b_ops
= &xfs_bmbt_buf_ops
;
808 ablock
= XFS_BUF_TO_BLOCK(abp
);
809 if (xfs_sb_version_hascrc(&mp
->m_sb
))
810 xfs_btree_init_block_int(mp
, ablock
, abp
->b_bn
,
811 XFS_BMAP_CRC_MAGIC
, 0, 0, ip
->i_ino
,
812 XFS_BTREE_LONG_PTRS
| XFS_BTREE_CRC_BLOCKS
);
814 xfs_btree_init_block_int(mp
, ablock
, abp
->b_bn
,
815 XFS_BMAP_MAGIC
, 0, 0, ip
->i_ino
,
816 XFS_BTREE_LONG_PTRS
);
818 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1);
819 nextents
= xfs_iext_count(ifp
);
820 for (cnt
= i
= 0; i
< nextents
; i
++) {
821 ep
= xfs_iext_get_ext(ifp
, i
);
822 if (!isnullstartblock(xfs_bmbt_get_startblock(ep
))) {
823 arp
->l0
= cpu_to_be64(ep
->l0
);
824 arp
->l1
= cpu_to_be64(ep
->l1
);
828 ASSERT(cnt
== XFS_IFORK_NEXTENTS(ip
, whichfork
));
829 xfs_btree_set_numrecs(ablock
, cnt
);
832 * Fill in the root key and pointer.
834 kp
= XFS_BMBT_KEY_ADDR(mp
, block
, 1);
835 arp
= XFS_BMBT_REC_ADDR(mp
, ablock
, 1);
836 kp
->br_startoff
= cpu_to_be64(xfs_bmbt_disk_get_startoff(arp
));
837 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, xfs_bmbt_get_maxrecs(cur
,
838 be16_to_cpu(block
->bb_level
)));
839 *pp
= cpu_to_be64(args
.fsbno
);
842 * Do all this logging at the end so that
843 * the root is at the right level.
845 xfs_btree_log_block(cur
, abp
, XFS_BB_ALL_BITS
);
846 xfs_btree_log_recs(cur
, abp
, 1, be16_to_cpu(ablock
->bb_numrecs
));
847 ASSERT(*curp
== NULL
);
849 *logflagsp
= XFS_ILOG_CORE
| xfs_ilog_fbroot(whichfork
);
854 * Convert a local file to an extents file.
855 * This code is out of bounds for data forks of regular files,
856 * since the file data needs to get logged so things will stay consistent.
857 * (The bmap-level manipulations are ok, though).
860 xfs_bmap_local_to_extents_empty(
861 struct xfs_inode
*ip
,
864 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
866 ASSERT(whichfork
!= XFS_COW_FORK
);
867 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
868 ASSERT(ifp
->if_bytes
== 0);
869 ASSERT(XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0);
871 xfs_bmap_forkoff_reset(ip
, whichfork
);
872 ifp
->if_flags
&= ~XFS_IFINLINE
;
873 ifp
->if_flags
|= XFS_IFEXTENTS
;
874 XFS_IFORK_FMT_SET(ip
, whichfork
, XFS_DINODE_FMT_EXTENTS
);
878 STATIC
int /* error */
879 xfs_bmap_local_to_extents(
880 xfs_trans_t
*tp
, /* transaction pointer */
881 xfs_inode_t
*ip
, /* incore inode pointer */
882 xfs_fsblock_t
*firstblock
, /* first block allocated in xaction */
883 xfs_extlen_t total
, /* total blocks needed by transaction */
884 int *logflagsp
, /* inode logging flags */
886 void (*init_fn
)(struct xfs_trans
*tp
,
888 struct xfs_inode
*ip
,
889 struct xfs_ifork
*ifp
))
892 int flags
; /* logging flags returned */
893 xfs_ifork_t
*ifp
; /* inode fork pointer */
894 xfs_alloc_arg_t args
; /* allocation arguments */
895 xfs_buf_t
*bp
; /* buffer for extent block */
896 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
899 * We don't want to deal with the case of keeping inode data inline yet.
900 * So sending the data fork of a regular inode is invalid.
902 ASSERT(!(S_ISREG(VFS_I(ip
)->i_mode
) && whichfork
== XFS_DATA_FORK
));
903 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
904 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
906 if (!ifp
->if_bytes
) {
907 xfs_bmap_local_to_extents_empty(ip
, whichfork
);
908 flags
= XFS_ILOG_CORE
;
914 ASSERT((ifp
->if_flags
& (XFS_IFINLINE
|XFS_IFEXTENTS
|XFS_IFEXTIREC
)) ==
916 memset(&args
, 0, sizeof(args
));
918 args
.mp
= ip
->i_mount
;
919 xfs_rmap_ino_owner(&args
.oinfo
, ip
->i_ino
, whichfork
, 0);
920 args
.firstblock
= *firstblock
;
922 * Allocate a block. We know we need only one, since the
923 * file currently fits in an inode.
925 if (*firstblock
== NULLFSBLOCK
) {
927 args
.fsbno
= XFS_INO_TO_FSB(args
.mp
, ip
->i_ino
);
928 args
.type
= XFS_ALLOCTYPE_START_BNO
;
930 args
.fsbno
= *firstblock
;
931 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
934 args
.minlen
= args
.maxlen
= args
.prod
= 1;
935 error
= xfs_alloc_vextent(&args
);
940 * During a CoW operation, the allocation and bmbt updates occur in
941 * different transactions. The mapping code tries to put new bmbt
942 * blocks near extents being mapped, but the only way to guarantee this
943 * is if the alloc and the mapping happen in a single transaction that
944 * has a block reservation. That isn't the case here, so if we run out
945 * of space we'll try again with another AG.
947 if (xfs_sb_version_hasreflink(&ip
->i_mount
->m_sb
) &&
948 args
.fsbno
== NULLFSBLOCK
&&
949 args
.type
== XFS_ALLOCTYPE_NEAR_BNO
) {
952 /* Can't fail, the space was reserved. */
953 ASSERT(args
.fsbno
!= NULLFSBLOCK
);
954 ASSERT(args
.len
== 1);
955 *firstblock
= args
.fsbno
;
956 bp
= xfs_btree_get_bufl(args
.mp
, tp
, args
.fsbno
, 0);
959 * Initialize the block, copy the data and log the remote buffer.
961 * The callout is responsible for logging because the remote format
962 * might differ from the local format and thus we don't know how much to
963 * log here. Note that init_fn must also set the buffer log item type
966 init_fn(tp
, bp
, ip
, ifp
);
968 /* account for the change in fork size */
969 xfs_idata_realloc(ip
, -ifp
->if_bytes
, whichfork
);
970 xfs_bmap_local_to_extents_empty(ip
, whichfork
);
971 flags
|= XFS_ILOG_CORE
;
973 xfs_iext_add(ifp
, 0, 1);
974 ep
= xfs_iext_get_ext(ifp
, 0);
975 xfs_bmbt_set_allf(ep
, 0, args
.fsbno
, 1, XFS_EXT_NORM
);
976 trace_xfs_bmap_post_update(ip
, 0,
977 whichfork
== XFS_ATTR_FORK
? BMAP_ATTRFORK
: 0,
979 XFS_IFORK_NEXT_SET(ip
, whichfork
, 1);
980 ip
->i_d
.di_nblocks
= 1;
981 xfs_trans_mod_dquot_byino(tp
, ip
,
982 XFS_TRANS_DQ_BCOUNT
, 1L);
983 flags
|= xfs_ilog_fext(whichfork
);
991 * Called from xfs_bmap_add_attrfork to handle btree format files.
993 STATIC
int /* error */
994 xfs_bmap_add_attrfork_btree(
995 xfs_trans_t
*tp
, /* transaction pointer */
996 xfs_inode_t
*ip
, /* incore inode pointer */
997 xfs_fsblock_t
*firstblock
, /* first block allocated */
998 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
999 int *flags
) /* inode logging flags */
1001 xfs_btree_cur_t
*cur
; /* btree cursor */
1002 int error
; /* error return value */
1003 xfs_mount_t
*mp
; /* file system mount struct */
1004 int stat
; /* newroot status */
1007 if (ip
->i_df
.if_broot_bytes
<= XFS_IFORK_DSIZE(ip
))
1008 *flags
|= XFS_ILOG_DBROOT
;
1010 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, XFS_DATA_FORK
);
1011 cur
->bc_private
.b
.dfops
= dfops
;
1012 cur
->bc_private
.b
.firstblock
= *firstblock
;
1013 if ((error
= xfs_bmbt_lookup_ge(cur
, 0, 0, 0, &stat
)))
1015 /* must be at least one entry */
1016 XFS_WANT_CORRUPTED_GOTO(mp
, stat
== 1, error0
);
1017 if ((error
= xfs_btree_new_iroot(cur
, flags
, &stat
)))
1020 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1023 *firstblock
= cur
->bc_private
.b
.firstblock
;
1024 cur
->bc_private
.b
.allocated
= 0;
1025 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1029 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
1034 * Called from xfs_bmap_add_attrfork to handle extents format files.
1036 STATIC
int /* error */
1037 xfs_bmap_add_attrfork_extents(
1038 xfs_trans_t
*tp
, /* transaction pointer */
1039 xfs_inode_t
*ip
, /* incore inode pointer */
1040 xfs_fsblock_t
*firstblock
, /* first block allocated */
1041 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
1042 int *flags
) /* inode logging flags */
1044 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
1045 int error
; /* error return value */
1047 if (ip
->i_d
.di_nextents
* sizeof(xfs_bmbt_rec_t
) <= XFS_IFORK_DSIZE(ip
))
1050 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstblock
, dfops
, &cur
, 0,
1051 flags
, XFS_DATA_FORK
);
1053 cur
->bc_private
.b
.allocated
= 0;
1054 xfs_btree_del_cursor(cur
,
1055 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
1061 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1062 * different data fork content type needs a different callout to do the
1063 * conversion. Some are basic and only require special block initialisation
1064 * callouts for the data formating, others (directories) are so specialised they
1065 * handle everything themselves.
1067 * XXX (dgc): investigate whether directory conversion can use the generic
1068 * formatting callout. It should be possible - it's just a very complex
1071 STATIC
int /* error */
1072 xfs_bmap_add_attrfork_local(
1073 xfs_trans_t
*tp
, /* transaction pointer */
1074 xfs_inode_t
*ip
, /* incore inode pointer */
1075 xfs_fsblock_t
*firstblock
, /* first block allocated */
1076 struct xfs_defer_ops
*dfops
, /* blocks to free at commit */
1077 int *flags
) /* inode logging flags */
1079 xfs_da_args_t dargs
; /* args for dir/attr code */
1081 if (ip
->i_df
.if_bytes
<= XFS_IFORK_DSIZE(ip
))
1084 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
1085 memset(&dargs
, 0, sizeof(dargs
));
1086 dargs
.geo
= ip
->i_mount
->m_dir_geo
;
1088 dargs
.firstblock
= firstblock
;
1089 dargs
.dfops
= dfops
;
1090 dargs
.total
= dargs
.geo
->fsbcount
;
1091 dargs
.whichfork
= XFS_DATA_FORK
;
1093 return xfs_dir2_sf_to_block(&dargs
);
1096 if (S_ISLNK(VFS_I(ip
)->i_mode
))
1097 return xfs_bmap_local_to_extents(tp
, ip
, firstblock
, 1,
1098 flags
, XFS_DATA_FORK
,
1099 xfs_symlink_local_to_remote
);
1101 /* should only be called for types that support local format data */
1103 return -EFSCORRUPTED
;
1107 * Convert inode from non-attributed to attributed.
1108 * Must not be in a transaction, ip must not be locked.
1110 int /* error code */
1111 xfs_bmap_add_attrfork(
1112 xfs_inode_t
*ip
, /* incore inode pointer */
1113 int size
, /* space new attribute needs */
1114 int rsvd
) /* xact may use reserved blks */
1116 xfs_fsblock_t firstblock
; /* 1st block/ag allocated */
1117 struct xfs_defer_ops dfops
; /* freed extent records */
1118 xfs_mount_t
*mp
; /* mount structure */
1119 xfs_trans_t
*tp
; /* transaction pointer */
1120 int blks
; /* space reservation */
1121 int version
= 1; /* superblock attr version */
1122 int logflags
; /* logging flags */
1123 int error
; /* error return value */
1125 ASSERT(XFS_IFORK_Q(ip
) == 0);
1128 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1130 blks
= XFS_ADDAFORK_SPACE_RES(mp
);
1132 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_addafork
, blks
, 0,
1133 rsvd
? XFS_TRANS_RESERVE
: 0, &tp
);
1137 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1138 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, blks
, 0, rsvd
?
1139 XFS_QMOPT_RES_REGBLKS
| XFS_QMOPT_FORCE_RES
:
1140 XFS_QMOPT_RES_REGBLKS
);
1143 if (XFS_IFORK_Q(ip
))
1145 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
) {
1147 * For inodes coming from pre-6.2 filesystems.
1149 ASSERT(ip
->i_d
.di_aformat
== 0);
1150 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1152 ASSERT(ip
->i_d
.di_anextents
== 0);
1154 xfs_trans_ijoin(tp
, ip
, 0);
1155 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1157 switch (ip
->i_d
.di_format
) {
1158 case XFS_DINODE_FMT_DEV
:
1159 ip
->i_d
.di_forkoff
= roundup(sizeof(xfs_dev_t
), 8) >> 3;
1161 case XFS_DINODE_FMT_UUID
:
1162 ip
->i_d
.di_forkoff
= roundup(sizeof(uuid_t
), 8) >> 3;
1164 case XFS_DINODE_FMT_LOCAL
:
1165 case XFS_DINODE_FMT_EXTENTS
:
1166 case XFS_DINODE_FMT_BTREE
:
1167 ip
->i_d
.di_forkoff
= xfs_attr_shortform_bytesfit(ip
, size
);
1168 if (!ip
->i_d
.di_forkoff
)
1169 ip
->i_d
.di_forkoff
= xfs_default_attroffset(ip
) >> 3;
1170 else if (mp
->m_flags
& XFS_MOUNT_ATTR2
)
1179 ASSERT(ip
->i_afp
== NULL
);
1180 ip
->i_afp
= kmem_zone_zalloc(xfs_ifork_zone
, KM_SLEEP
);
1181 ip
->i_afp
->if_flags
= XFS_IFEXTENTS
;
1183 xfs_defer_init(&dfops
, &firstblock
);
1184 switch (ip
->i_d
.di_format
) {
1185 case XFS_DINODE_FMT_LOCAL
:
1186 error
= xfs_bmap_add_attrfork_local(tp
, ip
, &firstblock
, &dfops
,
1189 case XFS_DINODE_FMT_EXTENTS
:
1190 error
= xfs_bmap_add_attrfork_extents(tp
, ip
, &firstblock
,
1193 case XFS_DINODE_FMT_BTREE
:
1194 error
= xfs_bmap_add_attrfork_btree(tp
, ip
, &firstblock
, &dfops
,
1202 xfs_trans_log_inode(tp
, ip
, logflags
);
1205 if (!xfs_sb_version_hasattr(&mp
->m_sb
) ||
1206 (!xfs_sb_version_hasattr2(&mp
->m_sb
) && version
== 2)) {
1207 bool log_sb
= false;
1209 spin_lock(&mp
->m_sb_lock
);
1210 if (!xfs_sb_version_hasattr(&mp
->m_sb
)) {
1211 xfs_sb_version_addattr(&mp
->m_sb
);
1214 if (!xfs_sb_version_hasattr2(&mp
->m_sb
) && version
== 2) {
1215 xfs_sb_version_addattr2(&mp
->m_sb
);
1218 spin_unlock(&mp
->m_sb_lock
);
1223 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
1226 error
= xfs_trans_commit(tp
);
1227 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1231 xfs_defer_cancel(&dfops
);
1233 xfs_trans_cancel(tp
);
1234 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1239 * Internal and external extent tree search functions.
1243 * Read in the extents to if_extents.
1244 * All inode fields are set up by caller, we just traverse the btree
1245 * and copy the records in. If the file system cannot contain unwritten
1246 * extents, the records are checked for no "state" flags.
1249 xfs_bmap_read_extents(
1250 xfs_trans_t
*tp
, /* transaction pointer */
1251 xfs_inode_t
*ip
, /* incore inode */
1252 int whichfork
) /* data or attr fork */
1254 struct xfs_btree_block
*block
; /* current btree block */
1255 xfs_fsblock_t bno
; /* block # of "block" */
1256 xfs_buf_t
*bp
; /* buffer for "block" */
1257 int error
; /* error return value */
1258 xfs_exntfmt_t exntf
; /* XFS_EXTFMT_NOSTATE, if checking */
1259 xfs_extnum_t i
, j
; /* index into the extents list */
1260 xfs_ifork_t
*ifp
; /* fork structure */
1261 int level
; /* btree level, for checking */
1262 xfs_mount_t
*mp
; /* file system mount structure */
1263 __be64
*pp
; /* pointer to block address */
1265 xfs_extnum_t room
; /* number of entries there's room for */
1269 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1270 exntf
= (whichfork
!= XFS_DATA_FORK
) ? XFS_EXTFMT_NOSTATE
:
1271 XFS_EXTFMT_INODE(ip
);
1272 block
= ifp
->if_broot
;
1274 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1276 level
= be16_to_cpu(block
->bb_level
);
1278 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
1279 bno
= be64_to_cpu(*pp
);
1280 ASSERT(bno
!= NULLFSBLOCK
);
1281 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
1282 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
1284 * Go down the tree until leaf level is reached, following the first
1285 * pointer (leftmost) at each level.
1287 while (level
-- > 0) {
1288 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
1289 XFS_BMAP_BTREE_REF
, &xfs_bmbt_buf_ops
);
1292 block
= XFS_BUF_TO_BLOCK(bp
);
1295 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
1296 bno
= be64_to_cpu(*pp
);
1297 XFS_WANT_CORRUPTED_GOTO(mp
,
1298 XFS_FSB_SANITY_CHECK(mp
, bno
), error0
);
1299 xfs_trans_brelse(tp
, bp
);
1302 * Here with bp and block set to the leftmost leaf node in the tree.
1304 room
= xfs_iext_count(ifp
);
1307 * Loop over all leaf nodes. Copy information to the extent records.
1310 xfs_bmbt_rec_t
*frp
;
1311 xfs_fsblock_t nextbno
;
1312 xfs_extnum_t num_recs
;
1315 num_recs
= xfs_btree_get_numrecs(block
);
1316 if (unlikely(i
+ num_recs
> room
)) {
1317 ASSERT(i
+ num_recs
<= room
);
1318 xfs_warn(ip
->i_mount
,
1319 "corrupt dinode %Lu, (btree extents).",
1320 (unsigned long long) ip
->i_ino
);
1321 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1322 XFS_ERRLEVEL_LOW
, ip
->i_mount
, block
);
1326 * Read-ahead the next leaf block, if any.
1328 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
1329 if (nextbno
!= NULLFSBLOCK
)
1330 xfs_btree_reada_bufl(mp
, nextbno
, 1,
1333 * Copy records into the extent records.
1335 frp
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
1337 for (j
= 0; j
< num_recs
; j
++, i
++, frp
++) {
1338 xfs_bmbt_rec_host_t
*trp
= xfs_iext_get_ext(ifp
, i
);
1339 trp
->l0
= be64_to_cpu(frp
->l0
);
1340 trp
->l1
= be64_to_cpu(frp
->l1
);
1342 if (exntf
== XFS_EXTFMT_NOSTATE
) {
1344 * Check all attribute bmap btree records and
1345 * any "older" data bmap btree records for a
1346 * set bit in the "extent flag" position.
1348 if (unlikely(xfs_check_nostate_extents(ifp
,
1349 start
, num_recs
))) {
1350 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1356 xfs_trans_brelse(tp
, bp
);
1359 * If we've reached the end, stop.
1361 if (bno
== NULLFSBLOCK
)
1363 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
1364 XFS_BMAP_BTREE_REF
, &xfs_bmbt_buf_ops
);
1367 block
= XFS_BUF_TO_BLOCK(bp
);
1369 ASSERT(i
== xfs_iext_count(ifp
));
1370 ASSERT(i
== XFS_IFORK_NEXTENTS(ip
, whichfork
));
1371 XFS_BMAP_TRACE_EXLIST(ip
, i
, whichfork
);
1374 xfs_trans_brelse(tp
, bp
);
1375 return -EFSCORRUPTED
;
1379 * Returns the file-relative block number of the first unused block(s)
1380 * in the file with at least "len" logically contiguous blocks free.
1381 * This is the lowest-address hole if the file has holes, else the first block
1382 * past the end of file.
1383 * Return 0 if the file is currently local (in-inode).
1386 xfs_bmap_first_unused(
1387 xfs_trans_t
*tp
, /* transaction pointer */
1388 xfs_inode_t
*ip
, /* incore inode */
1389 xfs_extlen_t len
, /* size of hole to find */
1390 xfs_fileoff_t
*first_unused
, /* unused block */
1391 int whichfork
) /* data or attr fork */
1393 int error
; /* error return value */
1394 int idx
; /* extent record index */
1395 xfs_ifork_t
*ifp
; /* inode fork pointer */
1396 xfs_fileoff_t lastaddr
; /* last block number seen */
1397 xfs_fileoff_t lowest
; /* lowest useful block */
1398 xfs_fileoff_t max
; /* starting useful block */
1399 xfs_fileoff_t off
; /* offset for this block */
1400 xfs_extnum_t nextents
; /* number of extent entries */
1402 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
||
1403 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
||
1404 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
);
1405 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
) {
1409 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1410 if (!(ifp
->if_flags
& XFS_IFEXTENTS
) &&
1411 (error
= xfs_iread_extents(tp
, ip
, whichfork
)))
1413 lowest
= *first_unused
;
1414 nextents
= xfs_iext_count(ifp
);
1415 for (idx
= 0, lastaddr
= 0, max
= lowest
; idx
< nextents
; idx
++) {
1416 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, idx
);
1417 off
= xfs_bmbt_get_startoff(ep
);
1419 * See if the hole before this extent will work.
1421 if (off
>= lowest
+ len
&& off
- max
>= len
) {
1422 *first_unused
= max
;
1425 lastaddr
= off
+ xfs_bmbt_get_blockcount(ep
);
1426 max
= XFS_FILEOFF_MAX(lastaddr
, lowest
);
1428 *first_unused
= max
;
1433 * Returns the file-relative block number of the last block - 1 before
1434 * last_block (input value) in the file.
1435 * This is not based on i_size, it is based on the extent records.
1436 * Returns 0 for local files, as they do not have extent records.
1439 xfs_bmap_last_before(
1440 struct xfs_trans
*tp
, /* transaction pointer */
1441 struct xfs_inode
*ip
, /* incore inode */
1442 xfs_fileoff_t
*last_block
, /* last block */
1443 int whichfork
) /* data or attr fork */
1445 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1446 struct xfs_bmbt_irec got
;
1450 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
1451 case XFS_DINODE_FMT_LOCAL
:
1454 case XFS_DINODE_FMT_BTREE
:
1455 case XFS_DINODE_FMT_EXTENTS
:
1461 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1462 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1467 if (xfs_iext_lookup_extent(ip
, ifp
, *last_block
- 1, &idx
, &got
)) {
1468 if (got
.br_startoff
<= *last_block
- 1)
1472 if (xfs_iext_get_extent(ifp
, idx
- 1, &got
)) {
1473 *last_block
= got
.br_startoff
+ got
.br_blockcount
;
1482 xfs_bmap_last_extent(
1483 struct xfs_trans
*tp
,
1484 struct xfs_inode
*ip
,
1486 struct xfs_bmbt_irec
*rec
,
1489 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1493 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1494 error
= xfs_iread_extents(tp
, ip
, whichfork
);
1499 nextents
= xfs_iext_count(ifp
);
1500 if (nextents
== 0) {
1505 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, nextents
- 1), rec
);
1511 * Check the last inode extent to determine whether this allocation will result
1512 * in blocks being allocated at the end of the file. When we allocate new data
1513 * blocks at the end of the file which do not start at the previous data block,
1514 * we will try to align the new blocks at stripe unit boundaries.
1516 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1517 * at, or past the EOF.
1521 struct xfs_bmalloca
*bma
,
1524 struct xfs_bmbt_irec rec
;
1529 error
= xfs_bmap_last_extent(NULL
, bma
->ip
, whichfork
, &rec
,
1540 * Check if we are allocation or past the last extent, or at least into
1541 * the last delayed allocated extent.
1543 bma
->aeof
= bma
->offset
>= rec
.br_startoff
+ rec
.br_blockcount
||
1544 (bma
->offset
>= rec
.br_startoff
&&
1545 isnullstartblock(rec
.br_startblock
));
1550 * Returns the file-relative block number of the first block past eof in
1551 * the file. This is not based on i_size, it is based on the extent records.
1552 * Returns 0 for local files, as they do not have extent records.
1555 xfs_bmap_last_offset(
1556 struct xfs_inode
*ip
,
1557 xfs_fileoff_t
*last_block
,
1560 struct xfs_bmbt_irec rec
;
1566 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
)
1569 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
&&
1570 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
1573 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, &is_empty
);
1574 if (error
|| is_empty
)
1577 *last_block
= rec
.br_startoff
+ rec
.br_blockcount
;
1582 * Returns whether the selected fork of the inode has exactly one
1583 * block or not. For the data fork we check this matches di_size,
1584 * implying the file's range is 0..bsize-1.
1586 int /* 1=>1 block, 0=>otherwise */
1588 xfs_inode_t
*ip
, /* incore inode */
1589 int whichfork
) /* data or attr fork */
1591 xfs_bmbt_rec_host_t
*ep
; /* ptr to fork's extent */
1592 xfs_ifork_t
*ifp
; /* inode fork pointer */
1593 int rval
; /* return value */
1594 xfs_bmbt_irec_t s
; /* internal version of extent */
1597 if (whichfork
== XFS_DATA_FORK
)
1598 return XFS_ISIZE(ip
) == ip
->i_mount
->m_sb
.sb_blocksize
;
1600 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) != 1)
1602 if (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
1604 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1605 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
1606 ep
= xfs_iext_get_ext(ifp
, 0);
1607 xfs_bmbt_get_all(ep
, &s
);
1608 rval
= s
.br_startoff
== 0 && s
.br_blockcount
== 1;
1609 if (rval
&& whichfork
== XFS_DATA_FORK
)
1610 ASSERT(XFS_ISIZE(ip
) == ip
->i_mount
->m_sb
.sb_blocksize
);
1615 * Extent tree manipulation functions used during allocation.
1619 * Convert a delayed allocation to a real allocation.
1621 STATIC
int /* error */
1622 xfs_bmap_add_extent_delay_real(
1623 struct xfs_bmalloca
*bma
,
1626 struct xfs_bmbt_irec
*new = &bma
->got
;
1627 int diff
; /* temp value */
1628 xfs_bmbt_rec_host_t
*ep
; /* extent entry for idx */
1629 int error
; /* error return value */
1630 int i
; /* temp state */
1631 xfs_ifork_t
*ifp
; /* inode fork pointer */
1632 xfs_fileoff_t new_endoff
; /* end offset of new entry */
1633 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
1634 /* left is 0, right is 1, prev is 2 */
1635 int rval
=0; /* return value (logging flags) */
1636 int state
= 0;/* state bits, accessed thru macros */
1637 xfs_filblks_t da_new
; /* new count del alloc blocks used */
1638 xfs_filblks_t da_old
; /* old count del alloc blocks used */
1639 xfs_filblks_t temp
=0; /* value for da_new calculations */
1640 xfs_filblks_t temp2
=0;/* value for da_new calculations */
1641 int tmp_rval
; /* partial logging flags */
1642 struct xfs_mount
*mp
;
1643 xfs_extnum_t
*nextents
;
1645 mp
= bma
->ip
->i_mount
;
1646 ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
1647 ASSERT(whichfork
!= XFS_ATTR_FORK
);
1648 nextents
= (whichfork
== XFS_COW_FORK
? &bma
->ip
->i_cnextents
:
1649 &bma
->ip
->i_d
.di_nextents
);
1651 ASSERT(bma
->idx
>= 0);
1652 ASSERT(bma
->idx
<= xfs_iext_count(ifp
));
1653 ASSERT(!isnullstartblock(new->br_startblock
));
1655 (bma
->cur
->bc_private
.b
.flags
& XFS_BTCUR_BPRV_WASDEL
));
1657 XFS_STATS_INC(mp
, xs_add_exlist
);
1663 if (whichfork
== XFS_COW_FORK
)
1664 state
|= BMAP_COWFORK
;
1667 * Set up a bunch of variables to make the tests simpler.
1669 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
1670 xfs_bmbt_get_all(ep
, &PREV
);
1671 new_endoff
= new->br_startoff
+ new->br_blockcount
;
1672 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
1673 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
1675 da_old
= startblockval(PREV
.br_startblock
);
1679 * Set flags determining what part of the previous delayed allocation
1680 * extent is being replaced by a real allocation.
1682 if (PREV
.br_startoff
== new->br_startoff
)
1683 state
|= BMAP_LEFT_FILLING
;
1684 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
1685 state
|= BMAP_RIGHT_FILLING
;
1688 * Check and set flags if this segment has a left neighbor.
1689 * Don't set contiguous if the combined extent would be too large.
1692 state
|= BMAP_LEFT_VALID
;
1693 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
- 1), &LEFT
);
1695 if (isnullstartblock(LEFT
.br_startblock
))
1696 state
|= BMAP_LEFT_DELAY
;
1699 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
1700 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
1701 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
1702 LEFT
.br_state
== new->br_state
&&
1703 LEFT
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
1704 state
|= BMAP_LEFT_CONTIG
;
1707 * Check and set flags if this segment has a right neighbor.
1708 * Don't set contiguous if the combined extent would be too large.
1709 * Also check for all-three-contiguous being too large.
1711 if (bma
->idx
< xfs_iext_count(ifp
) - 1) {
1712 state
|= BMAP_RIGHT_VALID
;
1713 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
+ 1), &RIGHT
);
1715 if (isnullstartblock(RIGHT
.br_startblock
))
1716 state
|= BMAP_RIGHT_DELAY
;
1719 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
1720 new_endoff
== RIGHT
.br_startoff
&&
1721 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
1722 new->br_state
== RIGHT
.br_state
&&
1723 new->br_blockcount
+ RIGHT
.br_blockcount
<= MAXEXTLEN
&&
1724 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1725 BMAP_RIGHT_FILLING
)) !=
1726 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
1727 BMAP_RIGHT_FILLING
) ||
1728 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
1730 state
|= BMAP_RIGHT_CONTIG
;
1734 * Switch out based on the FILLING and CONTIG state bits.
1736 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1737 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
1738 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
1739 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1741 * Filling in all of a previously delayed allocation extent.
1742 * The left and right neighbors are both contiguous with new.
1745 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1746 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
1747 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
1748 RIGHT
.br_blockcount
);
1749 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1751 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 2, state
);
1753 if (bma
->cur
== NULL
)
1754 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1756 rval
= XFS_ILOG_CORE
;
1757 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1758 RIGHT
.br_startblock
,
1759 RIGHT
.br_blockcount
, &i
);
1762 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1763 error
= xfs_btree_delete(bma
->cur
, &i
);
1766 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1767 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
1770 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1771 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1773 LEFT
.br_blockcount
+
1774 PREV
.br_blockcount
+
1775 RIGHT
.br_blockcount
, LEFT
.br_state
);
1781 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
1783 * Filling in all of a previously delayed allocation extent.
1784 * The left neighbor is contiguous, the right is not.
1788 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1789 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
1790 LEFT
.br_blockcount
+ PREV
.br_blockcount
);
1791 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1793 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 1, state
);
1794 if (bma
->cur
== NULL
)
1795 rval
= XFS_ILOG_DEXT
;
1798 error
= xfs_bmbt_lookup_eq(bma
->cur
, LEFT
.br_startoff
,
1799 LEFT
.br_startblock
, LEFT
.br_blockcount
,
1803 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1804 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1806 LEFT
.br_blockcount
+
1807 PREV
.br_blockcount
, LEFT
.br_state
);
1813 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1815 * Filling in all of a previously delayed allocation extent.
1816 * The right neighbor is contiguous, the left is not.
1818 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1819 xfs_bmbt_set_startblock(ep
, new->br_startblock
);
1820 xfs_bmbt_set_blockcount(ep
,
1821 PREV
.br_blockcount
+ RIGHT
.br_blockcount
);
1822 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1824 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 1, state
);
1825 if (bma
->cur
== NULL
)
1826 rval
= XFS_ILOG_DEXT
;
1829 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1830 RIGHT
.br_startblock
,
1831 RIGHT
.br_blockcount
, &i
);
1834 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1835 error
= xfs_bmbt_update(bma
->cur
, PREV
.br_startoff
,
1837 PREV
.br_blockcount
+
1838 RIGHT
.br_blockcount
, PREV
.br_state
);
1844 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
1846 * Filling in all of a previously delayed allocation extent.
1847 * Neither the left nor right neighbors are contiguous with
1850 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1851 xfs_bmbt_set_startblock(ep
, new->br_startblock
);
1852 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1855 if (bma
->cur
== NULL
)
1856 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1858 rval
= XFS_ILOG_CORE
;
1859 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
1860 new->br_startblock
, new->br_blockcount
,
1864 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
1865 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
1866 error
= xfs_btree_insert(bma
->cur
, &i
);
1869 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1873 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
1875 * Filling in the first part of a previous delayed allocation.
1876 * The left neighbor is contiguous.
1878 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
- 1, state
, _THIS_IP_
);
1879 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
- 1),
1880 LEFT
.br_blockcount
+ new->br_blockcount
);
1881 xfs_bmbt_set_startoff(ep
,
1882 PREV
.br_startoff
+ new->br_blockcount
);
1883 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
- 1, state
, _THIS_IP_
);
1885 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1886 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1887 xfs_bmbt_set_blockcount(ep
, temp
);
1888 if (bma
->cur
== NULL
)
1889 rval
= XFS_ILOG_DEXT
;
1892 error
= xfs_bmbt_lookup_eq(bma
->cur
, LEFT
.br_startoff
,
1893 LEFT
.br_startblock
, LEFT
.br_blockcount
,
1897 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1898 error
= xfs_bmbt_update(bma
->cur
, LEFT
.br_startoff
,
1900 LEFT
.br_blockcount
+
1906 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1907 startblockval(PREV
.br_startblock
));
1908 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1909 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1914 case BMAP_LEFT_FILLING
:
1916 * Filling in the first part of a previous delayed allocation.
1917 * The left neighbor is not contiguous.
1919 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1920 xfs_bmbt_set_startoff(ep
, new_endoff
);
1921 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1922 xfs_bmbt_set_blockcount(ep
, temp
);
1923 xfs_iext_insert(bma
->ip
, bma
->idx
, 1, new, state
);
1925 if (bma
->cur
== NULL
)
1926 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
1928 rval
= XFS_ILOG_CORE
;
1929 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
1930 new->br_startblock
, new->br_blockcount
,
1934 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
1935 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
1936 error
= xfs_btree_insert(bma
->cur
, &i
);
1939 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1942 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
1943 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
1944 bma
->firstblock
, bma
->dfops
,
1945 &bma
->cur
, 1, &tmp_rval
, whichfork
);
1950 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1951 startblockval(PREV
.br_startblock
) -
1952 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
1953 ep
= xfs_iext_get_ext(ifp
, bma
->idx
+ 1);
1954 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1955 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1958 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
1960 * Filling in the last part of a previous delayed allocation.
1961 * The right neighbor is contiguous with the new allocation.
1963 temp
= PREV
.br_blockcount
- new->br_blockcount
;
1964 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1965 xfs_bmbt_set_blockcount(ep
, temp
);
1966 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, bma
->idx
+ 1),
1967 new->br_startoff
, new->br_startblock
,
1968 new->br_blockcount
+ RIGHT
.br_blockcount
,
1970 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 1, state
, _THIS_IP_
);
1971 if (bma
->cur
== NULL
)
1972 rval
= XFS_ILOG_DEXT
;
1975 error
= xfs_bmbt_lookup_eq(bma
->cur
, RIGHT
.br_startoff
,
1976 RIGHT
.br_startblock
,
1977 RIGHT
.br_blockcount
, &i
);
1980 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
1981 error
= xfs_bmbt_update(bma
->cur
, new->br_startoff
,
1983 new->br_blockcount
+
1984 RIGHT
.br_blockcount
,
1990 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
1991 startblockval(PREV
.br_startblock
));
1992 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1993 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
1994 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
1999 case BMAP_RIGHT_FILLING
:
2001 * Filling in the last part of a previous delayed allocation.
2002 * The right neighbor is not contiguous.
2004 temp
= PREV
.br_blockcount
- new->br_blockcount
;
2005 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2006 xfs_bmbt_set_blockcount(ep
, temp
);
2007 xfs_iext_insert(bma
->ip
, bma
->idx
+ 1, 1, new, state
);
2009 if (bma
->cur
== NULL
)
2010 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2012 rval
= XFS_ILOG_CORE
;
2013 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
2014 new->br_startblock
, new->br_blockcount
,
2018 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2019 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
2020 error
= xfs_btree_insert(bma
->cur
, &i
);
2023 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2026 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
2027 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
2028 bma
->firstblock
, bma
->dfops
, &bma
->cur
, 1,
2029 &tmp_rval
, whichfork
);
2034 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma
->ip
, temp
),
2035 startblockval(PREV
.br_startblock
) -
2036 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
2037 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
2038 xfs_bmbt_set_startblock(ep
, nullstartblock(da_new
));
2039 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2046 * Filling in the middle part of a previous delayed allocation.
2047 * Contiguity is impossible here.
2048 * This case is avoided almost all the time.
2050 * We start with a delayed allocation:
2052 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2055 * and we are allocating:
2056 * +rrrrrrrrrrrrrrrrr+
2059 * and we set it up for insertion as:
2060 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2062 * PREV @ idx LEFT RIGHT
2063 * inserted at idx + 1
2065 temp
= new->br_startoff
- PREV
.br_startoff
;
2066 temp2
= PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
2067 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, 0, _THIS_IP_
);
2068 xfs_bmbt_set_blockcount(ep
, temp
); /* truncate PREV */
2070 RIGHT
.br_state
= PREV
.br_state
;
2071 RIGHT
.br_startblock
= nullstartblock(
2072 (int)xfs_bmap_worst_indlen(bma
->ip
, temp2
));
2073 RIGHT
.br_startoff
= new_endoff
;
2074 RIGHT
.br_blockcount
= temp2
;
2075 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2076 xfs_iext_insert(bma
->ip
, bma
->idx
+ 1, 2, &LEFT
, state
);
2078 if (bma
->cur
== NULL
)
2079 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2081 rval
= XFS_ILOG_CORE
;
2082 error
= xfs_bmbt_lookup_eq(bma
->cur
, new->br_startoff
,
2083 new->br_startblock
, new->br_blockcount
,
2087 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2088 bma
->cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
2089 error
= xfs_btree_insert(bma
->cur
, &i
);
2092 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2095 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
2096 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
2097 bma
->firstblock
, bma
->dfops
, &bma
->cur
,
2098 1, &tmp_rval
, whichfork
);
2103 temp
= xfs_bmap_worst_indlen(bma
->ip
, temp
);
2104 temp2
= xfs_bmap_worst_indlen(bma
->ip
, temp2
);
2105 diff
= (int)(temp
+ temp2
- startblockval(PREV
.br_startblock
) -
2106 (bma
->cur
? bma
->cur
->bc_private
.b
.allocated
: 0));
2108 error
= xfs_mod_fdblocks(bma
->ip
->i_mount
,
2109 -((int64_t)diff
), false);
2115 ep
= xfs_iext_get_ext(ifp
, bma
->idx
);
2116 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
2117 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2118 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
+ 2, state
, _THIS_IP_
);
2119 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, bma
->idx
+ 2),
2120 nullstartblock((int)temp2
));
2121 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
+ 2, state
, _THIS_IP_
);
2124 da_new
= temp
+ temp2
;
2127 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2128 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2129 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2130 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2131 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2132 case BMAP_LEFT_CONTIG
:
2133 case BMAP_RIGHT_CONTIG
:
2135 * These cases are all impossible.
2140 /* add reverse mapping */
2141 error
= xfs_rmap_map_extent(mp
, bma
->dfops
, bma
->ip
, whichfork
, new);
2145 /* convert to a btree if necessary */
2146 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
2147 int tmp_logflags
; /* partial log flag return val */
2149 ASSERT(bma
->cur
== NULL
);
2150 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
2151 bma
->firstblock
, bma
->dfops
, &bma
->cur
,
2152 da_old
> 0, &tmp_logflags
, whichfork
);
2153 bma
->logflags
|= tmp_logflags
;
2158 /* adjust for changes in reserved delayed indirect blocks */
2159 if (da_old
|| da_new
) {
2162 temp
+= bma
->cur
->bc_private
.b
.allocated
;
2163 ASSERT(temp
<= da_old
);
2165 xfs_mod_fdblocks(bma
->ip
->i_mount
,
2166 (int64_t)(da_old
- temp
), false);
2169 /* clear out the allocated field, done with it now in any case. */
2171 bma
->cur
->bc_private
.b
.allocated
= 0;
2173 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
2175 if (whichfork
!= XFS_COW_FORK
)
2176 bma
->logflags
|= rval
;
2184 * Convert an unwritten allocation to a real allocation or vice versa.
2186 STATIC
int /* error */
2187 xfs_bmap_add_extent_unwritten_real(
2188 struct xfs_trans
*tp
,
2189 xfs_inode_t
*ip
, /* incore inode pointer */
2190 xfs_extnum_t
*idx
, /* extent number to update/insert */
2191 xfs_btree_cur_t
**curp
, /* if *curp is null, not a btree */
2192 xfs_bmbt_irec_t
*new, /* new data to add to file extents */
2193 xfs_fsblock_t
*first
, /* pointer to firstblock variable */
2194 struct xfs_defer_ops
*dfops
, /* list of extents to be freed */
2195 int *logflagsp
) /* inode logging flags */
2197 xfs_btree_cur_t
*cur
; /* btree cursor */
2198 xfs_bmbt_rec_host_t
*ep
; /* extent entry for idx */
2199 int error
; /* error return value */
2200 int i
; /* temp state */
2201 xfs_ifork_t
*ifp
; /* inode fork pointer */
2202 xfs_fileoff_t new_endoff
; /* end offset of new entry */
2203 xfs_exntst_t newext
; /* new extent state */
2204 xfs_exntst_t oldext
; /* old extent state */
2205 xfs_bmbt_irec_t r
[3]; /* neighbor extent entries */
2206 /* left is 0, right is 1, prev is 2 */
2207 int rval
=0; /* return value (logging flags) */
2208 int state
= 0;/* state bits, accessed thru macros */
2209 struct xfs_mount
*mp
= tp
->t_mountp
;
2214 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
2217 ASSERT(*idx
<= xfs_iext_count(ifp
));
2218 ASSERT(!isnullstartblock(new->br_startblock
));
2220 XFS_STATS_INC(mp
, xs_add_exlist
);
2227 * Set up a bunch of variables to make the tests simpler.
2230 ep
= xfs_iext_get_ext(ifp
, *idx
);
2231 xfs_bmbt_get_all(ep
, &PREV
);
2232 newext
= new->br_state
;
2233 oldext
= (newext
== XFS_EXT_UNWRITTEN
) ?
2234 XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
2235 ASSERT(PREV
.br_state
== oldext
);
2236 new_endoff
= new->br_startoff
+ new->br_blockcount
;
2237 ASSERT(PREV
.br_startoff
<= new->br_startoff
);
2238 ASSERT(PREV
.br_startoff
+ PREV
.br_blockcount
>= new_endoff
);
2241 * Set flags determining what part of the previous oldext allocation
2242 * extent is being replaced by a newext allocation.
2244 if (PREV
.br_startoff
== new->br_startoff
)
2245 state
|= BMAP_LEFT_FILLING
;
2246 if (PREV
.br_startoff
+ PREV
.br_blockcount
== new_endoff
)
2247 state
|= BMAP_RIGHT_FILLING
;
2250 * Check and set flags if this segment has a left neighbor.
2251 * Don't set contiguous if the combined extent would be too large.
2254 state
|= BMAP_LEFT_VALID
;
2255 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
- 1), &LEFT
);
2257 if (isnullstartblock(LEFT
.br_startblock
))
2258 state
|= BMAP_LEFT_DELAY
;
2261 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2262 LEFT
.br_startoff
+ LEFT
.br_blockcount
== new->br_startoff
&&
2263 LEFT
.br_startblock
+ LEFT
.br_blockcount
== new->br_startblock
&&
2264 LEFT
.br_state
== newext
&&
2265 LEFT
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2266 state
|= BMAP_LEFT_CONTIG
;
2269 * Check and set flags if this segment has a right neighbor.
2270 * Don't set contiguous if the combined extent would be too large.
2271 * Also check for all-three-contiguous being too large.
2273 if (*idx
< xfs_iext_count(&ip
->i_df
) - 1) {
2274 state
|= BMAP_RIGHT_VALID
;
2275 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
+ 1), &RIGHT
);
2276 if (isnullstartblock(RIGHT
.br_startblock
))
2277 state
|= BMAP_RIGHT_DELAY
;
2280 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2281 new_endoff
== RIGHT
.br_startoff
&&
2282 new->br_startblock
+ new->br_blockcount
== RIGHT
.br_startblock
&&
2283 newext
== RIGHT
.br_state
&&
2284 new->br_blockcount
+ RIGHT
.br_blockcount
<= MAXEXTLEN
&&
2285 ((state
& (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2286 BMAP_RIGHT_FILLING
)) !=
2287 (BMAP_LEFT_CONTIG
| BMAP_LEFT_FILLING
|
2288 BMAP_RIGHT_FILLING
) ||
2289 LEFT
.br_blockcount
+ new->br_blockcount
+ RIGHT
.br_blockcount
2291 state
|= BMAP_RIGHT_CONTIG
;
2294 * Switch out based on the FILLING and CONTIG state bits.
2296 switch (state
& (BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2297 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
)) {
2298 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
|
2299 BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2301 * Setting all of a previous oldext extent to newext.
2302 * The left and right neighbors are both contiguous with new.
2306 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2307 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2308 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
2309 RIGHT
.br_blockcount
);
2310 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2312 xfs_iext_remove(ip
, *idx
+ 1, 2, state
);
2313 ip
->i_d
.di_nextents
-= 2;
2315 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2317 rval
= XFS_ILOG_CORE
;
2318 if ((error
= xfs_bmbt_lookup_eq(cur
, RIGHT
.br_startoff
,
2319 RIGHT
.br_startblock
,
2320 RIGHT
.br_blockcount
, &i
)))
2322 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2323 if ((error
= xfs_btree_delete(cur
, &i
)))
2325 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2326 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2328 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2329 if ((error
= xfs_btree_delete(cur
, &i
)))
2331 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2332 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2334 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2335 if ((error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2337 LEFT
.br_blockcount
+ PREV
.br_blockcount
+
2338 RIGHT
.br_blockcount
, LEFT
.br_state
)))
2343 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2345 * Setting all of a previous oldext extent to newext.
2346 * The left neighbor is contiguous, the right is not.
2350 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2351 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
),
2352 LEFT
.br_blockcount
+ PREV
.br_blockcount
);
2353 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2355 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2356 ip
->i_d
.di_nextents
--;
2358 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2360 rval
= XFS_ILOG_CORE
;
2361 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2362 PREV
.br_startblock
, PREV
.br_blockcount
,
2365 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2366 if ((error
= xfs_btree_delete(cur
, &i
)))
2368 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2369 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2371 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2372 if ((error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2374 LEFT
.br_blockcount
+ PREV
.br_blockcount
,
2380 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2382 * Setting all of a previous oldext extent to newext.
2383 * The right neighbor is contiguous, the left is not.
2385 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2386 xfs_bmbt_set_blockcount(ep
,
2387 PREV
.br_blockcount
+ RIGHT
.br_blockcount
);
2388 xfs_bmbt_set_state(ep
, newext
);
2389 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2390 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2391 ip
->i_d
.di_nextents
--;
2393 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2395 rval
= XFS_ILOG_CORE
;
2396 if ((error
= xfs_bmbt_lookup_eq(cur
, RIGHT
.br_startoff
,
2397 RIGHT
.br_startblock
,
2398 RIGHT
.br_blockcount
, &i
)))
2400 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2401 if ((error
= xfs_btree_delete(cur
, &i
)))
2403 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2404 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2406 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2407 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2409 new->br_blockcount
+ RIGHT
.br_blockcount
,
2415 case BMAP_LEFT_FILLING
| BMAP_RIGHT_FILLING
:
2417 * Setting all of a previous oldext extent to newext.
2418 * Neither the left nor right neighbors are contiguous with
2421 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2422 xfs_bmbt_set_state(ep
, newext
);
2423 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2426 rval
= XFS_ILOG_DEXT
;
2429 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2430 new->br_startblock
, new->br_blockcount
,
2433 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2434 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2435 new->br_startblock
, new->br_blockcount
,
2441 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
:
2443 * Setting the first part of a previous oldext extent to newext.
2444 * The left neighbor is contiguous.
2446 trace_xfs_bmap_pre_update(ip
, *idx
- 1, state
, _THIS_IP_
);
2447 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
- 1),
2448 LEFT
.br_blockcount
+ new->br_blockcount
);
2449 xfs_bmbt_set_startoff(ep
,
2450 PREV
.br_startoff
+ new->br_blockcount
);
2451 trace_xfs_bmap_post_update(ip
, *idx
- 1, state
, _THIS_IP_
);
2453 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2454 xfs_bmbt_set_startblock(ep
,
2455 new->br_startblock
+ new->br_blockcount
);
2456 xfs_bmbt_set_blockcount(ep
,
2457 PREV
.br_blockcount
- new->br_blockcount
);
2458 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2463 rval
= XFS_ILOG_DEXT
;
2466 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2467 PREV
.br_startblock
, PREV
.br_blockcount
,
2470 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2471 if ((error
= xfs_bmbt_update(cur
,
2472 PREV
.br_startoff
+ new->br_blockcount
,
2473 PREV
.br_startblock
+ new->br_blockcount
,
2474 PREV
.br_blockcount
- new->br_blockcount
,
2477 if ((error
= xfs_btree_decrement(cur
, 0, &i
)))
2479 error
= xfs_bmbt_update(cur
, LEFT
.br_startoff
,
2481 LEFT
.br_blockcount
+ new->br_blockcount
,
2488 case BMAP_LEFT_FILLING
:
2490 * Setting the first part of a previous oldext extent to newext.
2491 * The left neighbor is not contiguous.
2493 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2494 ASSERT(ep
&& xfs_bmbt_get_state(ep
) == oldext
);
2495 xfs_bmbt_set_startoff(ep
, new_endoff
);
2496 xfs_bmbt_set_blockcount(ep
,
2497 PREV
.br_blockcount
- new->br_blockcount
);
2498 xfs_bmbt_set_startblock(ep
,
2499 new->br_startblock
+ new->br_blockcount
);
2500 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2502 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2503 ip
->i_d
.di_nextents
++;
2505 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2507 rval
= XFS_ILOG_CORE
;
2508 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2509 PREV
.br_startblock
, PREV
.br_blockcount
,
2512 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2513 if ((error
= xfs_bmbt_update(cur
,
2514 PREV
.br_startoff
+ new->br_blockcount
,
2515 PREV
.br_startblock
+ new->br_blockcount
,
2516 PREV
.br_blockcount
- new->br_blockcount
,
2519 cur
->bc_rec
.b
= *new;
2520 if ((error
= xfs_btree_insert(cur
, &i
)))
2522 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2526 case BMAP_RIGHT_FILLING
| BMAP_RIGHT_CONTIG
:
2528 * Setting the last part of a previous oldext extent to newext.
2529 * The right neighbor is contiguous with the new allocation.
2531 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2532 xfs_bmbt_set_blockcount(ep
,
2533 PREV
.br_blockcount
- new->br_blockcount
);
2534 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2538 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2539 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, *idx
),
2540 new->br_startoff
, new->br_startblock
,
2541 new->br_blockcount
+ RIGHT
.br_blockcount
, newext
);
2542 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2545 rval
= XFS_ILOG_DEXT
;
2548 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2550 PREV
.br_blockcount
, &i
)))
2552 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2553 if ((error
= xfs_bmbt_update(cur
, PREV
.br_startoff
,
2555 PREV
.br_blockcount
- new->br_blockcount
,
2558 if ((error
= xfs_btree_increment(cur
, 0, &i
)))
2560 if ((error
= xfs_bmbt_update(cur
, new->br_startoff
,
2562 new->br_blockcount
+ RIGHT
.br_blockcount
,
2568 case BMAP_RIGHT_FILLING
:
2570 * Setting the last part of a previous oldext extent to newext.
2571 * The right neighbor is not contiguous.
2573 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2574 xfs_bmbt_set_blockcount(ep
,
2575 PREV
.br_blockcount
- new->br_blockcount
);
2576 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2579 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2581 ip
->i_d
.di_nextents
++;
2583 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2585 rval
= XFS_ILOG_CORE
;
2586 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2587 PREV
.br_startblock
, PREV
.br_blockcount
,
2590 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2591 if ((error
= xfs_bmbt_update(cur
, PREV
.br_startoff
,
2593 PREV
.br_blockcount
- new->br_blockcount
,
2596 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2597 new->br_startblock
, new->br_blockcount
,
2600 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2601 cur
->bc_rec
.b
.br_state
= XFS_EXT_NORM
;
2602 if ((error
= xfs_btree_insert(cur
, &i
)))
2604 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2610 * Setting the middle part of a previous oldext extent to
2611 * newext. Contiguity is impossible here.
2612 * One extent becomes three extents.
2614 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2615 xfs_bmbt_set_blockcount(ep
,
2616 new->br_startoff
- PREV
.br_startoff
);
2617 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2620 r
[1].br_startoff
= new_endoff
;
2621 r
[1].br_blockcount
=
2622 PREV
.br_startoff
+ PREV
.br_blockcount
- new_endoff
;
2623 r
[1].br_startblock
= new->br_startblock
+ new->br_blockcount
;
2624 r
[1].br_state
= oldext
;
2627 xfs_iext_insert(ip
, *idx
, 2, &r
[0], state
);
2629 ip
->i_d
.di_nextents
+= 2;
2631 rval
= XFS_ILOG_CORE
| XFS_ILOG_DEXT
;
2633 rval
= XFS_ILOG_CORE
;
2634 if ((error
= xfs_bmbt_lookup_eq(cur
, PREV
.br_startoff
,
2635 PREV
.br_startblock
, PREV
.br_blockcount
,
2638 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2639 /* new right extent - oldext */
2640 if ((error
= xfs_bmbt_update(cur
, r
[1].br_startoff
,
2641 r
[1].br_startblock
, r
[1].br_blockcount
,
2644 /* new left extent - oldext */
2645 cur
->bc_rec
.b
= PREV
;
2646 cur
->bc_rec
.b
.br_blockcount
=
2647 new->br_startoff
- PREV
.br_startoff
;
2648 if ((error
= xfs_btree_insert(cur
, &i
)))
2650 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2652 * Reset the cursor to the position of the new extent
2653 * we are about to insert as we can't trust it after
2654 * the previous insert.
2656 if ((error
= xfs_bmbt_lookup_eq(cur
, new->br_startoff
,
2657 new->br_startblock
, new->br_blockcount
,
2660 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
2661 /* new middle extent - newext */
2662 cur
->bc_rec
.b
.br_state
= new->br_state
;
2663 if ((error
= xfs_btree_insert(cur
, &i
)))
2665 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2669 case BMAP_LEFT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2670 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2671 case BMAP_LEFT_FILLING
| BMAP_RIGHT_CONTIG
:
2672 case BMAP_RIGHT_FILLING
| BMAP_LEFT_CONTIG
:
2673 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2674 case BMAP_LEFT_CONTIG
:
2675 case BMAP_RIGHT_CONTIG
:
2677 * These cases are all impossible.
2682 /* update reverse mappings */
2683 error
= xfs_rmap_convert_extent(mp
, dfops
, ip
, XFS_DATA_FORK
, new);
2687 /* convert to a btree if necessary */
2688 if (xfs_bmap_needs_btree(ip
, XFS_DATA_FORK
)) {
2689 int tmp_logflags
; /* partial log flag return val */
2691 ASSERT(cur
== NULL
);
2692 error
= xfs_bmap_extents_to_btree(tp
, ip
, first
, dfops
, &cur
,
2693 0, &tmp_logflags
, XFS_DATA_FORK
);
2694 *logflagsp
|= tmp_logflags
;
2699 /* clear out the allocated field, done with it now in any case. */
2701 cur
->bc_private
.b
.allocated
= 0;
2705 xfs_bmap_check_leaf_extents(*curp
, ip
, XFS_DATA_FORK
);
2715 * Convert a hole to a delayed allocation.
2718 xfs_bmap_add_extent_hole_delay(
2719 xfs_inode_t
*ip
, /* incore inode pointer */
2721 xfs_extnum_t
*idx
, /* extent number to update/insert */
2722 xfs_bmbt_irec_t
*new) /* new data to add to file extents */
2724 xfs_ifork_t
*ifp
; /* inode fork pointer */
2725 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2726 xfs_filblks_t newlen
=0; /* new indirect size */
2727 xfs_filblks_t oldlen
=0; /* old indirect size */
2728 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2729 int state
; /* state bits, accessed thru macros */
2730 xfs_filblks_t temp
=0; /* temp for indirect calculations */
2732 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2734 if (whichfork
== XFS_COW_FORK
)
2735 state
|= BMAP_COWFORK
;
2736 ASSERT(isnullstartblock(new->br_startblock
));
2739 * Check and set flags if this segment has a left neighbor
2742 state
|= BMAP_LEFT_VALID
;
2743 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
- 1), &left
);
2745 if (isnullstartblock(left
.br_startblock
))
2746 state
|= BMAP_LEFT_DELAY
;
2750 * Check and set flags if the current (right) segment exists.
2751 * If it doesn't exist, we're converting the hole at end-of-file.
2753 if (*idx
< xfs_iext_count(ifp
)) {
2754 state
|= BMAP_RIGHT_VALID
;
2755 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *idx
), &right
);
2757 if (isnullstartblock(right
.br_startblock
))
2758 state
|= BMAP_RIGHT_DELAY
;
2762 * Set contiguity flags on the left and right neighbors.
2763 * Don't let extents get too large, even if the pieces are contiguous.
2765 if ((state
& BMAP_LEFT_VALID
) && (state
& BMAP_LEFT_DELAY
) &&
2766 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2767 left
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2768 state
|= BMAP_LEFT_CONTIG
;
2770 if ((state
& BMAP_RIGHT_VALID
) && (state
& BMAP_RIGHT_DELAY
) &&
2771 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2772 new->br_blockcount
+ right
.br_blockcount
<= MAXEXTLEN
&&
2773 (!(state
& BMAP_LEFT_CONTIG
) ||
2774 (left
.br_blockcount
+ new->br_blockcount
+
2775 right
.br_blockcount
<= MAXEXTLEN
)))
2776 state
|= BMAP_RIGHT_CONTIG
;
2779 * Switch out based on the contiguity flags.
2781 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2782 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2784 * New allocation is contiguous with delayed allocations
2785 * on the left and on the right.
2786 * Merge all three into a single extent record.
2789 temp
= left
.br_blockcount
+ new->br_blockcount
+
2790 right
.br_blockcount
;
2792 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2793 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
), temp
);
2794 oldlen
= startblockval(left
.br_startblock
) +
2795 startblockval(new->br_startblock
) +
2796 startblockval(right
.br_startblock
);
2797 newlen
= xfs_bmap_worst_indlen(ip
, temp
);
2798 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, *idx
),
2799 nullstartblock((int)newlen
));
2800 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2802 xfs_iext_remove(ip
, *idx
+ 1, 1, state
);
2805 case BMAP_LEFT_CONTIG
:
2807 * New allocation is contiguous with a delayed allocation
2809 * Merge the new allocation with the left neighbor.
2812 temp
= left
.br_blockcount
+ new->br_blockcount
;
2814 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2815 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, *idx
), temp
);
2816 oldlen
= startblockval(left
.br_startblock
) +
2817 startblockval(new->br_startblock
);
2818 newlen
= xfs_bmap_worst_indlen(ip
, temp
);
2819 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp
, *idx
),
2820 nullstartblock((int)newlen
));
2821 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2824 case BMAP_RIGHT_CONTIG
:
2826 * New allocation is contiguous with a delayed allocation
2828 * Merge the new allocation with the right neighbor.
2830 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
2831 temp
= new->br_blockcount
+ right
.br_blockcount
;
2832 oldlen
= startblockval(new->br_startblock
) +
2833 startblockval(right
.br_startblock
);
2834 newlen
= xfs_bmap_worst_indlen(ip
, temp
);
2835 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, *idx
),
2837 nullstartblock((int)newlen
), temp
, right
.br_state
);
2838 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
2843 * New allocation is not contiguous with another
2844 * delayed allocation.
2845 * Insert a new entry.
2847 oldlen
= newlen
= 0;
2848 xfs_iext_insert(ip
, *idx
, 1, new, state
);
2851 if (oldlen
!= newlen
) {
2852 ASSERT(oldlen
> newlen
);
2853 xfs_mod_fdblocks(ip
->i_mount
, (int64_t)(oldlen
- newlen
),
2856 * Nothing to do for disk quota accounting here.
2862 * Convert a hole to a real allocation.
2864 STATIC
int /* error */
2865 xfs_bmap_add_extent_hole_real(
2866 struct xfs_bmalloca
*bma
,
2869 struct xfs_bmbt_irec
*new = &bma
->got
;
2870 int error
; /* error return value */
2871 int i
; /* temp state */
2872 xfs_ifork_t
*ifp
; /* inode fork pointer */
2873 xfs_bmbt_irec_t left
; /* left neighbor extent entry */
2874 xfs_bmbt_irec_t right
; /* right neighbor extent entry */
2875 int rval
=0; /* return value (logging flags) */
2876 int state
; /* state bits, accessed thru macros */
2877 struct xfs_mount
*mp
;
2879 mp
= bma
->ip
->i_mount
;
2880 ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
2882 ASSERT(bma
->idx
>= 0);
2883 ASSERT(bma
->idx
<= xfs_iext_count(ifp
));
2884 ASSERT(!isnullstartblock(new->br_startblock
));
2886 !(bma
->cur
->bc_private
.b
.flags
& XFS_BTCUR_BPRV_WASDEL
));
2887 ASSERT(whichfork
!= XFS_COW_FORK
);
2889 XFS_STATS_INC(mp
, xs_add_exlist
);
2892 if (whichfork
== XFS_ATTR_FORK
)
2893 state
|= BMAP_ATTRFORK
;
2896 * Check and set flags if this segment has a left neighbor.
2899 state
|= BMAP_LEFT_VALID
;
2900 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
- 1), &left
);
2901 if (isnullstartblock(left
.br_startblock
))
2902 state
|= BMAP_LEFT_DELAY
;
2906 * Check and set flags if this segment has a current value.
2907 * Not true if we're inserting into the "hole" at eof.
2909 if (bma
->idx
< xfs_iext_count(ifp
)) {
2910 state
|= BMAP_RIGHT_VALID
;
2911 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
), &right
);
2912 if (isnullstartblock(right
.br_startblock
))
2913 state
|= BMAP_RIGHT_DELAY
;
2917 * We're inserting a real allocation between "left" and "right".
2918 * Set the contiguity flags. Don't let extents get too large.
2920 if ((state
& BMAP_LEFT_VALID
) && !(state
& BMAP_LEFT_DELAY
) &&
2921 left
.br_startoff
+ left
.br_blockcount
== new->br_startoff
&&
2922 left
.br_startblock
+ left
.br_blockcount
== new->br_startblock
&&
2923 left
.br_state
== new->br_state
&&
2924 left
.br_blockcount
+ new->br_blockcount
<= MAXEXTLEN
)
2925 state
|= BMAP_LEFT_CONTIG
;
2927 if ((state
& BMAP_RIGHT_VALID
) && !(state
& BMAP_RIGHT_DELAY
) &&
2928 new->br_startoff
+ new->br_blockcount
== right
.br_startoff
&&
2929 new->br_startblock
+ new->br_blockcount
== right
.br_startblock
&&
2930 new->br_state
== right
.br_state
&&
2931 new->br_blockcount
+ right
.br_blockcount
<= MAXEXTLEN
&&
2932 (!(state
& BMAP_LEFT_CONTIG
) ||
2933 left
.br_blockcount
+ new->br_blockcount
+
2934 right
.br_blockcount
<= MAXEXTLEN
))
2935 state
|= BMAP_RIGHT_CONTIG
;
2939 * Select which case we're in here, and implement it.
2941 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
2942 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
2944 * New allocation is contiguous with real allocations on the
2945 * left and on the right.
2946 * Merge all three into a single extent record.
2949 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2950 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
2951 left
.br_blockcount
+ new->br_blockcount
+
2952 right
.br_blockcount
);
2953 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2955 xfs_iext_remove(bma
->ip
, bma
->idx
+ 1, 1, state
);
2957 XFS_IFORK_NEXT_SET(bma
->ip
, whichfork
,
2958 XFS_IFORK_NEXTENTS(bma
->ip
, whichfork
) - 1);
2959 if (bma
->cur
== NULL
) {
2960 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
2962 rval
= XFS_ILOG_CORE
;
2963 error
= xfs_bmbt_lookup_eq(bma
->cur
, right
.br_startoff
,
2964 right
.br_startblock
, right
.br_blockcount
,
2968 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2969 error
= xfs_btree_delete(bma
->cur
, &i
);
2972 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2973 error
= xfs_btree_decrement(bma
->cur
, 0, &i
);
2976 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
2977 error
= xfs_bmbt_update(bma
->cur
, left
.br_startoff
,
2979 left
.br_blockcount
+
2980 new->br_blockcount
+
2981 right
.br_blockcount
,
2988 case BMAP_LEFT_CONTIG
:
2990 * New allocation is contiguous with a real allocation
2992 * Merge the new allocation with the left neighbor.
2995 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
2996 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp
, bma
->idx
),
2997 left
.br_blockcount
+ new->br_blockcount
);
2998 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
3000 if (bma
->cur
== NULL
) {
3001 rval
= xfs_ilog_fext(whichfork
);
3004 error
= xfs_bmbt_lookup_eq(bma
->cur
, left
.br_startoff
,
3005 left
.br_startblock
, left
.br_blockcount
,
3009 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
3010 error
= xfs_bmbt_update(bma
->cur
, left
.br_startoff
,
3012 left
.br_blockcount
+
3020 case BMAP_RIGHT_CONTIG
:
3022 * New allocation is contiguous with a real allocation
3024 * Merge the new allocation with the right neighbor.
3026 trace_xfs_bmap_pre_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
3027 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp
, bma
->idx
),
3028 new->br_startoff
, new->br_startblock
,
3029 new->br_blockcount
+ right
.br_blockcount
,
3031 trace_xfs_bmap_post_update(bma
->ip
, bma
->idx
, state
, _THIS_IP_
);
3033 if (bma
->cur
== NULL
) {
3034 rval
= xfs_ilog_fext(whichfork
);
3037 error
= xfs_bmbt_lookup_eq(bma
->cur
,
3039 right
.br_startblock
,
3040 right
.br_blockcount
, &i
);
3043 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
3044 error
= xfs_bmbt_update(bma
->cur
, new->br_startoff
,
3046 new->br_blockcount
+
3047 right
.br_blockcount
,
3056 * New allocation is not contiguous with another
3058 * Insert a new entry.
3060 xfs_iext_insert(bma
->ip
, bma
->idx
, 1, new, state
);
3061 XFS_IFORK_NEXT_SET(bma
->ip
, whichfork
,
3062 XFS_IFORK_NEXTENTS(bma
->ip
, whichfork
) + 1);
3063 if (bma
->cur
== NULL
) {
3064 rval
= XFS_ILOG_CORE
| xfs_ilog_fext(whichfork
);
3066 rval
= XFS_ILOG_CORE
;
3067 error
= xfs_bmbt_lookup_eq(bma
->cur
,
3070 new->br_blockcount
, &i
);
3073 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, done
);
3074 bma
->cur
->bc_rec
.b
.br_state
= new->br_state
;
3075 error
= xfs_btree_insert(bma
->cur
, &i
);
3078 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
3083 /* add reverse mapping */
3084 error
= xfs_rmap_map_extent(mp
, bma
->dfops
, bma
->ip
, whichfork
, new);
3088 /* convert to a btree if necessary */
3089 if (xfs_bmap_needs_btree(bma
->ip
, whichfork
)) {
3090 int tmp_logflags
; /* partial log flag return val */
3092 ASSERT(bma
->cur
== NULL
);
3093 error
= xfs_bmap_extents_to_btree(bma
->tp
, bma
->ip
,
3094 bma
->firstblock
, bma
->dfops
, &bma
->cur
,
3095 0, &tmp_logflags
, whichfork
);
3096 bma
->logflags
|= tmp_logflags
;
3101 /* clear out the allocated field, done with it now in any case. */
3103 bma
->cur
->bc_private
.b
.allocated
= 0;
3105 xfs_bmap_check_leaf_extents(bma
->cur
, bma
->ip
, whichfork
);
3107 bma
->logflags
|= rval
;
3112 * Functions used in the extent read, allocate and remove paths
3116 * Adjust the size of the new extent based on di_extsize and rt extsize.
3119 xfs_bmap_extsize_align(
3121 xfs_bmbt_irec_t
*gotp
, /* next extent pointer */
3122 xfs_bmbt_irec_t
*prevp
, /* previous extent pointer */
3123 xfs_extlen_t extsz
, /* align to this extent size */
3124 int rt
, /* is this a realtime inode? */
3125 int eof
, /* is extent at end-of-file? */
3126 int delay
, /* creating delalloc extent? */
3127 int convert
, /* overwriting unwritten extent? */
3128 xfs_fileoff_t
*offp
, /* in/out: aligned offset */
3129 xfs_extlen_t
*lenp
) /* in/out: aligned length */
3131 xfs_fileoff_t orig_off
; /* original offset */
3132 xfs_extlen_t orig_alen
; /* original length */
3133 xfs_fileoff_t orig_end
; /* original off+len */
3134 xfs_fileoff_t nexto
; /* next file offset */
3135 xfs_fileoff_t prevo
; /* previous file offset */
3136 xfs_fileoff_t align_off
; /* temp for offset */
3137 xfs_extlen_t align_alen
; /* temp for length */
3138 xfs_extlen_t temp
; /* temp for calculations */
3143 orig_off
= align_off
= *offp
;
3144 orig_alen
= align_alen
= *lenp
;
3145 orig_end
= orig_off
+ orig_alen
;
3148 * If this request overlaps an existing extent, then don't
3149 * attempt to perform any additional alignment.
3151 if (!delay
&& !eof
&&
3152 (orig_off
>= gotp
->br_startoff
) &&
3153 (orig_end
<= gotp
->br_startoff
+ gotp
->br_blockcount
)) {
3158 * If the file offset is unaligned vs. the extent size
3159 * we need to align it. This will be possible unless
3160 * the file was previously written with a kernel that didn't
3161 * perform this alignment, or if a truncate shot us in the
3164 temp
= do_mod(orig_off
, extsz
);
3170 /* Same adjustment for the end of the requested area. */
3171 temp
= (align_alen
% extsz
);
3173 align_alen
+= extsz
- temp
;
3176 * For large extent hint sizes, the aligned extent might be larger than
3177 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3178 * the length back under MAXEXTLEN. The outer allocation loops handle
3179 * short allocation just fine, so it is safe to do this. We only want to
3180 * do it when we are forced to, though, because it means more allocation
3181 * operations are required.
3183 while (align_alen
> MAXEXTLEN
)
3184 align_alen
-= extsz
;
3185 ASSERT(align_alen
<= MAXEXTLEN
);
3188 * If the previous block overlaps with this proposed allocation
3189 * then move the start forward without adjusting the length.
3191 if (prevp
->br_startoff
!= NULLFILEOFF
) {
3192 if (prevp
->br_startblock
== HOLESTARTBLOCK
)
3193 prevo
= prevp
->br_startoff
;
3195 prevo
= prevp
->br_startoff
+ prevp
->br_blockcount
;
3198 if (align_off
!= orig_off
&& align_off
< prevo
)
3201 * If the next block overlaps with this proposed allocation
3202 * then move the start back without adjusting the length,
3203 * but not before offset 0.
3204 * This may of course make the start overlap previous block,
3205 * and if we hit the offset 0 limit then the next block
3206 * can still overlap too.
3208 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
) {
3209 if ((delay
&& gotp
->br_startblock
== HOLESTARTBLOCK
) ||
3210 (!delay
&& gotp
->br_startblock
== DELAYSTARTBLOCK
))
3211 nexto
= gotp
->br_startoff
+ gotp
->br_blockcount
;
3213 nexto
= gotp
->br_startoff
;
3215 nexto
= NULLFILEOFF
;
3217 align_off
+ align_alen
!= orig_end
&&
3218 align_off
+ align_alen
> nexto
)
3219 align_off
= nexto
> align_alen
? nexto
- align_alen
: 0;
3221 * If we're now overlapping the next or previous extent that
3222 * means we can't fit an extsz piece in this hole. Just move
3223 * the start forward to the first valid spot and set
3224 * the length so we hit the end.
3226 if (align_off
!= orig_off
&& align_off
< prevo
)
3228 if (align_off
+ align_alen
!= orig_end
&&
3229 align_off
+ align_alen
> nexto
&&
3230 nexto
!= NULLFILEOFF
) {
3231 ASSERT(nexto
> prevo
);
3232 align_alen
= nexto
- align_off
;
3236 * If realtime, and the result isn't a multiple of the realtime
3237 * extent size we need to remove blocks until it is.
3239 if (rt
&& (temp
= (align_alen
% mp
->m_sb
.sb_rextsize
))) {
3241 * We're not covering the original request, or
3242 * we won't be able to once we fix the length.
3244 if (orig_off
< align_off
||
3245 orig_end
> align_off
+ align_alen
||
3246 align_alen
- temp
< orig_alen
)
3249 * Try to fix it by moving the start up.
3251 if (align_off
+ temp
<= orig_off
) {
3256 * Try to fix it by moving the end in.
3258 else if (align_off
+ align_alen
- temp
>= orig_end
)
3261 * Set the start to the minimum then trim the length.
3264 align_alen
-= orig_off
- align_off
;
3265 align_off
= orig_off
;
3266 align_alen
-= align_alen
% mp
->m_sb
.sb_rextsize
;
3269 * Result doesn't cover the request, fail it.
3271 if (orig_off
< align_off
|| orig_end
> align_off
+ align_alen
)
3274 ASSERT(orig_off
>= align_off
);
3275 /* see MAXEXTLEN handling above */
3276 ASSERT(orig_end
<= align_off
+ align_alen
||
3277 align_alen
+ extsz
> MAXEXTLEN
);
3281 if (!eof
&& gotp
->br_startoff
!= NULLFILEOFF
)
3282 ASSERT(align_off
+ align_alen
<= gotp
->br_startoff
);
3283 if (prevp
->br_startoff
!= NULLFILEOFF
)
3284 ASSERT(align_off
>= prevp
->br_startoff
+ prevp
->br_blockcount
);
3292 #define XFS_ALLOC_GAP_UNITS 4
3296 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3298 xfs_fsblock_t adjust
; /* adjustment to block numbers */
3299 xfs_agnumber_t fb_agno
; /* ag number of ap->firstblock */
3300 xfs_mount_t
*mp
; /* mount point structure */
3301 int nullfb
; /* true if ap->firstblock isn't set */
3302 int rt
; /* true if inode is realtime */
3304 #define ISVALID(x,y) \
3306 (x) < mp->m_sb.sb_rblocks : \
3307 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3308 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3309 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3311 mp
= ap
->ip
->i_mount
;
3312 nullfb
= *ap
->firstblock
== NULLFSBLOCK
;
3313 rt
= XFS_IS_REALTIME_INODE(ap
->ip
) &&
3314 xfs_alloc_is_userdata(ap
->datatype
);
3315 fb_agno
= nullfb
? NULLAGNUMBER
: XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
);
3317 * If allocating at eof, and there's a previous real block,
3318 * try to use its last block as our starting point.
3320 if (ap
->eof
&& ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3321 !isnullstartblock(ap
->prev
.br_startblock
) &&
3322 ISVALID(ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
,
3323 ap
->prev
.br_startblock
)) {
3324 ap
->blkno
= ap
->prev
.br_startblock
+ ap
->prev
.br_blockcount
;
3326 * Adjust for the gap between prevp and us.
3328 adjust
= ap
->offset
-
3329 (ap
->prev
.br_startoff
+ ap
->prev
.br_blockcount
);
3331 ISVALID(ap
->blkno
+ adjust
, ap
->prev
.br_startblock
))
3332 ap
->blkno
+= adjust
;
3335 * If not at eof, then compare the two neighbor blocks.
3336 * Figure out whether either one gives us a good starting point,
3337 * and pick the better one.
3339 else if (!ap
->eof
) {
3340 xfs_fsblock_t gotbno
; /* right side block number */
3341 xfs_fsblock_t gotdiff
=0; /* right side difference */
3342 xfs_fsblock_t prevbno
; /* left side block number */
3343 xfs_fsblock_t prevdiff
=0; /* left side difference */
3346 * If there's a previous (left) block, select a requested
3347 * start block based on it.
3349 if (ap
->prev
.br_startoff
!= NULLFILEOFF
&&
3350 !isnullstartblock(ap
->prev
.br_startblock
) &&
3351 (prevbno
= ap
->prev
.br_startblock
+
3352 ap
->prev
.br_blockcount
) &&
3353 ISVALID(prevbno
, ap
->prev
.br_startblock
)) {
3355 * Calculate gap to end of previous block.
3357 adjust
= prevdiff
= ap
->offset
-
3358 (ap
->prev
.br_startoff
+
3359 ap
->prev
.br_blockcount
);
3361 * Figure the startblock based on the previous block's
3362 * end and the gap size.
3364 * If the gap is large relative to the piece we're
3365 * allocating, or using it gives us an invalid block
3366 * number, then just use the end of the previous block.
3368 if (prevdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3369 ISVALID(prevbno
+ prevdiff
,
3370 ap
->prev
.br_startblock
))
3375 * If the firstblock forbids it, can't use it,
3378 if (!rt
&& !nullfb
&&
3379 XFS_FSB_TO_AGNO(mp
, prevbno
) != fb_agno
)
3380 prevbno
= NULLFSBLOCK
;
3383 * No previous block or can't follow it, just default.
3386 prevbno
= NULLFSBLOCK
;
3388 * If there's a following (right) block, select a requested
3389 * start block based on it.
3391 if (!isnullstartblock(ap
->got
.br_startblock
)) {
3393 * Calculate gap to start of next block.
3395 adjust
= gotdiff
= ap
->got
.br_startoff
- ap
->offset
;
3397 * Figure the startblock based on the next block's
3398 * start and the gap size.
3400 gotbno
= ap
->got
.br_startblock
;
3403 * If the gap is large relative to the piece we're
3404 * allocating, or using it gives us an invalid block
3405 * number, then just use the start of the next block
3406 * offset by our length.
3408 if (gotdiff
<= XFS_ALLOC_GAP_UNITS
* ap
->length
&&
3409 ISVALID(gotbno
- gotdiff
, gotbno
))
3411 else if (ISVALID(gotbno
- ap
->length
, gotbno
)) {
3412 gotbno
-= ap
->length
;
3413 gotdiff
+= adjust
- ap
->length
;
3417 * If the firstblock forbids it, can't use it,
3420 if (!rt
&& !nullfb
&&
3421 XFS_FSB_TO_AGNO(mp
, gotbno
) != fb_agno
)
3422 gotbno
= NULLFSBLOCK
;
3425 * No next block, just default.
3428 gotbno
= NULLFSBLOCK
;
3430 * If both valid, pick the better one, else the only good
3431 * one, else ap->blkno is already set (to 0 or the inode block).
3433 if (prevbno
!= NULLFSBLOCK
&& gotbno
!= NULLFSBLOCK
)
3434 ap
->blkno
= prevdiff
<= gotdiff
? prevbno
: gotbno
;
3435 else if (prevbno
!= NULLFSBLOCK
)
3436 ap
->blkno
= prevbno
;
3437 else if (gotbno
!= NULLFSBLOCK
)
3444 xfs_bmap_longest_free_extent(
3445 struct xfs_trans
*tp
,
3450 struct xfs_mount
*mp
= tp
->t_mountp
;
3451 struct xfs_perag
*pag
;
3452 xfs_extlen_t longest
;
3455 pag
= xfs_perag_get(mp
, ag
);
3456 if (!pag
->pagf_init
) {
3457 error
= xfs_alloc_pagf_init(mp
, tp
, ag
, XFS_ALLOC_FLAG_TRYLOCK
);
3461 if (!pag
->pagf_init
) {
3467 longest
= xfs_alloc_longest_free_extent(mp
, pag
,
3468 xfs_alloc_min_freelist(mp
, pag
),
3469 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
));
3470 if (*blen
< longest
)
3479 xfs_bmap_select_minlen(
3480 struct xfs_bmalloca
*ap
,
3481 struct xfs_alloc_arg
*args
,
3485 if (notinit
|| *blen
< ap
->minlen
) {
3487 * Since we did a BUF_TRYLOCK above, it is possible that
3488 * there is space for this request.
3490 args
->minlen
= ap
->minlen
;
3491 } else if (*blen
< args
->maxlen
) {
3493 * If the best seen length is less than the request length,
3494 * use the best as the minimum.
3496 args
->minlen
= *blen
;
3499 * Otherwise we've seen an extent as big as maxlen, use that
3502 args
->minlen
= args
->maxlen
;
3507 xfs_bmap_btalloc_nullfb(
3508 struct xfs_bmalloca
*ap
,
3509 struct xfs_alloc_arg
*args
,
3512 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3513 xfs_agnumber_t ag
, startag
;
3517 args
->type
= XFS_ALLOCTYPE_START_BNO
;
3518 args
->total
= ap
->total
;
3520 startag
= ag
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
3521 if (startag
== NULLAGNUMBER
)
3524 while (*blen
< args
->maxlen
) {
3525 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
,
3530 if (++ag
== mp
->m_sb
.sb_agcount
)
3536 xfs_bmap_select_minlen(ap
, args
, blen
, notinit
);
3541 xfs_bmap_btalloc_filestreams(
3542 struct xfs_bmalloca
*ap
,
3543 struct xfs_alloc_arg
*args
,
3546 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
3551 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
3552 args
->total
= ap
->total
;
3554 ag
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
3555 if (ag
== NULLAGNUMBER
)
3558 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
, ¬init
);
3562 if (*blen
< args
->maxlen
) {
3563 error
= xfs_filestream_new_ag(ap
, &ag
);
3567 error
= xfs_bmap_longest_free_extent(args
->tp
, ag
, blen
,
3574 xfs_bmap_select_minlen(ap
, args
, blen
, notinit
);
3577 * Set the failure fallback case to look in the selected AG as stream
3580 ap
->blkno
= args
->fsbno
= XFS_AGB_TO_FSB(mp
, ag
, 0);
3586 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3588 xfs_mount_t
*mp
; /* mount point structure */
3589 xfs_alloctype_t atype
= 0; /* type for allocation routines */
3590 xfs_extlen_t align
= 0; /* minimum allocation alignment */
3591 xfs_agnumber_t fb_agno
; /* ag number of ap->firstblock */
3593 xfs_alloc_arg_t args
;
3595 xfs_extlen_t nextminlen
= 0;
3596 int nullfb
; /* true if ap->firstblock isn't set */
3604 mp
= ap
->ip
->i_mount
;
3606 /* stripe alignment for allocation is determined by mount parameters */
3608 if (mp
->m_swidth
&& (mp
->m_flags
& XFS_MOUNT_SWALLOC
))
3609 stripe_align
= mp
->m_swidth
;
3610 else if (mp
->m_dalign
)
3611 stripe_align
= mp
->m_dalign
;
3613 if (ap
->flags
& XFS_BMAPI_COWFORK
)
3614 align
= xfs_get_cowextsz_hint(ap
->ip
);
3615 else if (xfs_alloc_is_userdata(ap
->datatype
))
3616 align
= xfs_get_extsz_hint(ap
->ip
);
3617 if (unlikely(align
)) {
3618 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
3619 align
, 0, ap
->eof
, 0, ap
->conv
,
3620 &ap
->offset
, &ap
->length
);
3626 nullfb
= *ap
->firstblock
== NULLFSBLOCK
;
3627 fb_agno
= nullfb
? NULLAGNUMBER
: XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
);
3629 if (xfs_alloc_is_userdata(ap
->datatype
) &&
3630 xfs_inode_is_filestream(ap
->ip
)) {
3631 ag
= xfs_filestream_lookup_ag(ap
->ip
);
3632 ag
= (ag
!= NULLAGNUMBER
) ? ag
: 0;
3633 ap
->blkno
= XFS_AGB_TO_FSB(mp
, ag
, 0);
3635 ap
->blkno
= XFS_INO_TO_FSB(mp
, ap
->ip
->i_ino
);
3638 ap
->blkno
= *ap
->firstblock
;
3640 xfs_bmap_adjacent(ap
);
3643 * If allowed, use ap->blkno; otherwise must use firstblock since
3644 * it's in the right allocation group.
3646 if (nullfb
|| XFS_FSB_TO_AGNO(mp
, ap
->blkno
) == fb_agno
)
3649 ap
->blkno
= *ap
->firstblock
;
3651 * Normal allocation, done through xfs_alloc_vextent.
3653 tryagain
= isaligned
= 0;
3654 memset(&args
, 0, sizeof(args
));
3657 args
.fsbno
= ap
->blkno
;
3658 xfs_rmap_skip_owner_update(&args
.oinfo
);
3660 /* Trim the allocation back to the maximum an AG can fit. */
3661 args
.maxlen
= MIN(ap
->length
, mp
->m_ag_max_usable
);
3662 args
.firstblock
= *ap
->firstblock
;
3666 * Search for an allocation group with a single extent large
3667 * enough for the request. If one isn't found, then adjust
3668 * the minimum allocation size to the largest space found.
3670 if (xfs_alloc_is_userdata(ap
->datatype
) &&
3671 xfs_inode_is_filestream(ap
->ip
))
3672 error
= xfs_bmap_btalloc_filestreams(ap
, &args
, &blen
);
3674 error
= xfs_bmap_btalloc_nullfb(ap
, &args
, &blen
);
3677 } else if (ap
->dfops
->dop_low
) {
3678 if (xfs_inode_is_filestream(ap
->ip
))
3679 args
.type
= XFS_ALLOCTYPE_FIRST_AG
;
3681 args
.type
= XFS_ALLOCTYPE_START_BNO
;
3682 args
.total
= args
.minlen
= ap
->minlen
;
3684 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
3685 args
.total
= ap
->total
;
3686 args
.minlen
= ap
->minlen
;
3688 /* apply extent size hints if obtained earlier */
3689 if (unlikely(align
)) {
3691 if ((args
.mod
= (xfs_extlen_t
)do_mod(ap
->offset
, args
.prod
)))
3692 args
.mod
= (xfs_extlen_t
)(args
.prod
- args
.mod
);
3693 } else if (mp
->m_sb
.sb_blocksize
>= PAGE_SIZE
) {
3697 args
.prod
= PAGE_SIZE
>> mp
->m_sb
.sb_blocklog
;
3698 if ((args
.mod
= (xfs_extlen_t
)(do_mod(ap
->offset
, args
.prod
))))
3699 args
.mod
= (xfs_extlen_t
)(args
.prod
- args
.mod
);
3702 * If we are not low on available data blocks, and the
3703 * underlying logical volume manager is a stripe, and
3704 * the file offset is zero then try to allocate data
3705 * blocks on stripe unit boundary.
3706 * NOTE: ap->aeof is only set if the allocation length
3707 * is >= the stripe unit and the allocation offset is
3708 * at the end of file.
3710 if (!ap
->dfops
->dop_low
&& ap
->aeof
) {
3712 args
.alignment
= stripe_align
;
3716 * Adjust for alignment
3718 if (blen
> args
.alignment
&& blen
<= args
.maxlen
)
3719 args
.minlen
= blen
- args
.alignment
;
3720 args
.minalignslop
= 0;
3723 * First try an exact bno allocation.
3724 * If it fails then do a near or start bno
3725 * allocation with alignment turned on.
3729 args
.type
= XFS_ALLOCTYPE_THIS_BNO
;
3732 * Compute the minlen+alignment for the
3733 * next case. Set slop so that the value
3734 * of minlen+alignment+slop doesn't go up
3735 * between the calls.
3737 if (blen
> stripe_align
&& blen
<= args
.maxlen
)
3738 nextminlen
= blen
- stripe_align
;
3740 nextminlen
= args
.minlen
;
3741 if (nextminlen
+ stripe_align
> args
.minlen
+ 1)
3743 nextminlen
+ stripe_align
-
3746 args
.minalignslop
= 0;
3750 args
.minalignslop
= 0;
3752 args
.minleft
= ap
->minleft
;
3753 args
.wasdel
= ap
->wasdel
;
3754 args
.resv
= XFS_AG_RESV_NONE
;
3755 args
.datatype
= ap
->datatype
;
3756 if (ap
->datatype
& XFS_ALLOC_USERDATA_ZERO
)
3759 error
= xfs_alloc_vextent(&args
);
3763 if (tryagain
&& args
.fsbno
== NULLFSBLOCK
) {
3765 * Exact allocation failed. Now try with alignment
3769 args
.fsbno
= ap
->blkno
;
3770 args
.alignment
= stripe_align
;
3771 args
.minlen
= nextminlen
;
3772 args
.minalignslop
= 0;
3774 if ((error
= xfs_alloc_vextent(&args
)))
3777 if (isaligned
&& args
.fsbno
== NULLFSBLOCK
) {
3779 * allocation failed, so turn off alignment and
3783 args
.fsbno
= ap
->blkno
;
3785 if ((error
= xfs_alloc_vextent(&args
)))
3788 if (args
.fsbno
== NULLFSBLOCK
&& nullfb
&&
3789 args
.minlen
> ap
->minlen
) {
3790 args
.minlen
= ap
->minlen
;
3791 args
.type
= XFS_ALLOCTYPE_START_BNO
;
3792 args
.fsbno
= ap
->blkno
;
3793 if ((error
= xfs_alloc_vextent(&args
)))
3796 if (args
.fsbno
== NULLFSBLOCK
&& nullfb
) {
3798 args
.type
= XFS_ALLOCTYPE_FIRST_AG
;
3799 args
.total
= ap
->minlen
;
3801 if ((error
= xfs_alloc_vextent(&args
)))
3803 ap
->dfops
->dop_low
= true;
3805 if (args
.fsbno
!= NULLFSBLOCK
) {
3807 * check the allocation happened at the same or higher AG than
3808 * the first block that was allocated.
3810 ASSERT(*ap
->firstblock
== NULLFSBLOCK
||
3811 XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
) ==
3812 XFS_FSB_TO_AGNO(mp
, args
.fsbno
) ||
3813 (ap
->dfops
->dop_low
&&
3814 XFS_FSB_TO_AGNO(mp
, *ap
->firstblock
) <
3815 XFS_FSB_TO_AGNO(mp
, args
.fsbno
)));
3817 ap
->blkno
= args
.fsbno
;
3818 if (*ap
->firstblock
== NULLFSBLOCK
)
3819 *ap
->firstblock
= args
.fsbno
;
3820 ASSERT(nullfb
|| fb_agno
== args
.agno
||
3821 (ap
->dfops
->dop_low
&& fb_agno
< args
.agno
));
3822 ap
->length
= args
.len
;
3823 if (!(ap
->flags
& XFS_BMAPI_COWFORK
))
3824 ap
->ip
->i_d
.di_nblocks
+= args
.len
;
3825 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3827 ap
->ip
->i_delayed_blks
-= args
.len
;
3829 * Adjust the disk quota also. This was reserved
3832 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
3833 ap
->wasdel
? XFS_TRANS_DQ_DELBCOUNT
:
3834 XFS_TRANS_DQ_BCOUNT
,
3837 ap
->blkno
= NULLFSBLOCK
;
3844 * For a remap operation, just "allocate" an extent at the address that the
3845 * caller passed in, and ensure that the AGFL is the right size. The caller
3846 * will then map the "allocated" extent into the file somewhere.
3849 xfs_bmap_remap_alloc(
3850 struct xfs_bmalloca
*ap
)
3852 struct xfs_trans
*tp
= ap
->tp
;
3853 struct xfs_mount
*mp
= tp
->t_mountp
;
3855 struct xfs_alloc_arg args
;
3859 * validate that the block number is legal - the enables us to detect
3860 * and handle a silent filesystem corruption rather than crashing.
3862 memset(&args
, 0, sizeof(struct xfs_alloc_arg
));
3864 args
.mp
= ap
->tp
->t_mountp
;
3865 bno
= *ap
->firstblock
;
3866 args
.agno
= XFS_FSB_TO_AGNO(mp
, bno
);
3867 args
.agbno
= XFS_FSB_TO_AGBNO(mp
, bno
);
3868 if (args
.agno
>= mp
->m_sb
.sb_agcount
||
3869 args
.agbno
>= mp
->m_sb
.sb_agblocks
)
3870 return -EFSCORRUPTED
;
3872 /* "Allocate" the extent from the range we passed in. */
3873 trace_xfs_bmap_remap_alloc(ap
->ip
, *ap
->firstblock
, ap
->length
);
3875 ap
->ip
->i_d
.di_nblocks
+= ap
->length
;
3876 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
3878 /* Fix the freelist, like a real allocator does. */
3879 args
.datatype
= ap
->datatype
;
3880 args
.pag
= xfs_perag_get(args
.mp
, args
.agno
);
3884 * The freelist fixing code will decline the allocation if
3885 * the size and shape of the free space doesn't allow for
3886 * allocating the extent and updating all the metadata that
3887 * happens during an allocation. We're remapping, not
3888 * allocating, so skip that check by pretending to be freeing.
3890 error
= xfs_alloc_fix_freelist(&args
, XFS_ALLOC_FLAG_FREEING
);
3891 xfs_perag_put(args
.pag
);
3893 trace_xfs_bmap_remap_alloc_error(ap
->ip
, error
, _RET_IP_
);
3898 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3899 * It figures out where to ask the underlying allocator to put the new extent.
3903 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
3905 if (ap
->flags
& XFS_BMAPI_REMAP
)
3906 return xfs_bmap_remap_alloc(ap
);
3907 if (XFS_IS_REALTIME_INODE(ap
->ip
) &&
3908 xfs_alloc_is_userdata(ap
->datatype
))
3909 return xfs_bmap_rtalloc(ap
);
3910 return xfs_bmap_btalloc(ap
);
3913 /* Trim extent to fit a logical block range. */
3916 struct xfs_bmbt_irec
*irec
,
3920 xfs_fileoff_t distance
;
3921 xfs_fileoff_t end
= bno
+ len
;
3923 if (irec
->br_startoff
+ irec
->br_blockcount
<= bno
||
3924 irec
->br_startoff
>= end
) {
3925 irec
->br_blockcount
= 0;
3929 if (irec
->br_startoff
< bno
) {
3930 distance
= bno
- irec
->br_startoff
;
3931 if (isnullstartblock(irec
->br_startblock
))
3932 irec
->br_startblock
= DELAYSTARTBLOCK
;
3933 if (irec
->br_startblock
!= DELAYSTARTBLOCK
&&
3934 irec
->br_startblock
!= HOLESTARTBLOCK
)
3935 irec
->br_startblock
+= distance
;
3936 irec
->br_startoff
+= distance
;
3937 irec
->br_blockcount
-= distance
;
3940 if (end
< irec
->br_startoff
+ irec
->br_blockcount
) {
3941 distance
= irec
->br_startoff
+ irec
->br_blockcount
- end
;
3942 irec
->br_blockcount
-= distance
;
3947 * Trim the returned map to the required bounds
3951 struct xfs_bmbt_irec
*mval
,
3952 struct xfs_bmbt_irec
*got
,
3960 if ((flags
& XFS_BMAPI_ENTIRE
) ||
3961 got
->br_startoff
+ got
->br_blockcount
<= obno
) {
3963 if (isnullstartblock(got
->br_startblock
))
3964 mval
->br_startblock
= DELAYSTARTBLOCK
;
3970 ASSERT((*bno
>= obno
) || (n
== 0));
3972 mval
->br_startoff
= *bno
;
3973 if (isnullstartblock(got
->br_startblock
))
3974 mval
->br_startblock
= DELAYSTARTBLOCK
;
3976 mval
->br_startblock
= got
->br_startblock
+
3977 (*bno
- got
->br_startoff
);
3979 * Return the minimum of what we got and what we asked for for
3980 * the length. We can use the len variable here because it is
3981 * modified below and we could have been there before coming
3982 * here if the first part of the allocation didn't overlap what
3985 mval
->br_blockcount
= XFS_FILBLKS_MIN(end
- *bno
,
3986 got
->br_blockcount
- (*bno
- got
->br_startoff
));
3987 mval
->br_state
= got
->br_state
;
3988 ASSERT(mval
->br_blockcount
<= len
);
3993 * Update and validate the extent map to return
3996 xfs_bmapi_update_map(
3997 struct xfs_bmbt_irec
**map
,
4005 xfs_bmbt_irec_t
*mval
= *map
;
4007 ASSERT((flags
& XFS_BMAPI_ENTIRE
) ||
4008 ((mval
->br_startoff
+ mval
->br_blockcount
) <= end
));
4009 ASSERT((flags
& XFS_BMAPI_ENTIRE
) || (mval
->br_blockcount
<= *len
) ||
4010 (mval
->br_startoff
< obno
));
4012 *bno
= mval
->br_startoff
+ mval
->br_blockcount
;
4014 if (*n
> 0 && mval
->br_startoff
== mval
[-1].br_startoff
) {
4015 /* update previous map with new information */
4016 ASSERT(mval
->br_startblock
== mval
[-1].br_startblock
);
4017 ASSERT(mval
->br_blockcount
> mval
[-1].br_blockcount
);
4018 ASSERT(mval
->br_state
== mval
[-1].br_state
);
4019 mval
[-1].br_blockcount
= mval
->br_blockcount
;
4020 mval
[-1].br_state
= mval
->br_state
;
4021 } else if (*n
> 0 && mval
->br_startblock
!= DELAYSTARTBLOCK
&&
4022 mval
[-1].br_startblock
!= DELAYSTARTBLOCK
&&
4023 mval
[-1].br_startblock
!= HOLESTARTBLOCK
&&
4024 mval
->br_startblock
== mval
[-1].br_startblock
+
4025 mval
[-1].br_blockcount
&&
4026 ((flags
& XFS_BMAPI_IGSTATE
) ||
4027 mval
[-1].br_state
== mval
->br_state
)) {
4028 ASSERT(mval
->br_startoff
==
4029 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
);
4030 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
4031 } else if (*n
> 0 &&
4032 mval
->br_startblock
== DELAYSTARTBLOCK
&&
4033 mval
[-1].br_startblock
== DELAYSTARTBLOCK
&&
4034 mval
->br_startoff
==
4035 mval
[-1].br_startoff
+ mval
[-1].br_blockcount
) {
4036 mval
[-1].br_blockcount
+= mval
->br_blockcount
;
4037 mval
[-1].br_state
= mval
->br_state
;
4038 } else if (!((*n
== 0) &&
4039 ((mval
->br_startoff
+ mval
->br_blockcount
) <=
4048 * Map file blocks to filesystem blocks without allocation.
4052 struct xfs_inode
*ip
,
4055 struct xfs_bmbt_irec
*mval
,
4059 struct xfs_mount
*mp
= ip
->i_mount
;
4060 struct xfs_ifork
*ifp
;
4061 struct xfs_bmbt_irec got
;
4068 int whichfork
= xfs_bmapi_whichfork(flags
);
4071 ASSERT(!(flags
& ~(XFS_BMAPI_ATTRFORK
|XFS_BMAPI_ENTIRE
|
4072 XFS_BMAPI_IGSTATE
|XFS_BMAPI_COWFORK
)));
4073 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_SHARED
|XFS_ILOCK_EXCL
));
4075 if (unlikely(XFS_TEST_ERROR(
4076 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
4077 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
4078 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
4079 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW
, mp
);
4080 return -EFSCORRUPTED
;
4083 if (XFS_FORCED_SHUTDOWN(mp
))
4086 XFS_STATS_INC(mp
, xs_blk_mapr
);
4088 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4090 /* No CoW fork? Return a hole. */
4091 if (whichfork
== XFS_COW_FORK
&& !ifp
) {
4092 mval
->br_startoff
= bno
;
4093 mval
->br_startblock
= HOLESTARTBLOCK
;
4094 mval
->br_blockcount
= len
;
4095 mval
->br_state
= XFS_EXT_NORM
;
4100 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
4101 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
4106 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &idx
, &got
))
4111 while (bno
< end
&& n
< *nmap
) {
4112 /* Reading past eof, act as though there's a hole up to end. */
4114 got
.br_startoff
= end
;
4115 if (got
.br_startoff
> bno
) {
4116 /* Reading in a hole. */
4117 mval
->br_startoff
= bno
;
4118 mval
->br_startblock
= HOLESTARTBLOCK
;
4119 mval
->br_blockcount
=
4120 XFS_FILBLKS_MIN(len
, got
.br_startoff
- bno
);
4121 mval
->br_state
= XFS_EXT_NORM
;
4122 bno
+= mval
->br_blockcount
;
4123 len
-= mval
->br_blockcount
;
4129 /* set up the extent map to return. */
4130 xfs_bmapi_trim_map(mval
, &got
, &bno
, len
, obno
, end
, n
, flags
);
4131 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4133 /* If we're done, stop now. */
4134 if (bno
>= end
|| n
>= *nmap
)
4137 /* Else go on to the next record. */
4138 if (!xfs_iext_get_extent(ifp
, ++idx
, &got
))
4146 xfs_bmapi_reserve_delalloc(
4147 struct xfs_inode
*ip
,
4151 xfs_filblks_t prealloc
,
4152 struct xfs_bmbt_irec
*got
,
4153 xfs_extnum_t
*lastx
,
4156 struct xfs_mount
*mp
= ip
->i_mount
;
4157 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4159 xfs_extlen_t indlen
;
4160 char rt
= XFS_IS_REALTIME_INODE(ip
);
4163 xfs_fileoff_t aoff
= off
;
4166 * Cap the alloc length. Keep track of prealloc so we know whether to
4167 * tag the inode before we return.
4169 alen
= XFS_FILBLKS_MIN(len
+ prealloc
, MAXEXTLEN
);
4171 alen
= XFS_FILBLKS_MIN(alen
, got
->br_startoff
- aoff
);
4172 if (prealloc
&& alen
>= len
)
4173 prealloc
= alen
- len
;
4175 /* Figure out the extent size, adjust alen */
4176 if (whichfork
== XFS_COW_FORK
)
4177 extsz
= xfs_get_cowextsz_hint(ip
);
4179 extsz
= xfs_get_extsz_hint(ip
);
4181 struct xfs_bmbt_irec prev
;
4183 if (!xfs_iext_get_extent(ifp
, *lastx
- 1, &prev
))
4184 prev
.br_startoff
= NULLFILEOFF
;
4186 error
= xfs_bmap_extsize_align(mp
, got
, &prev
, extsz
, rt
, eof
,
4187 1, 0, &aoff
, &alen
);
4192 extsz
= alen
/ mp
->m_sb
.sb_rextsize
;
4195 * Make a transaction-less quota reservation for delayed allocation
4196 * blocks. This number gets adjusted later. We return if we haven't
4197 * allocated blocks already inside this loop.
4199 error
= xfs_trans_reserve_quota_nblks(NULL
, ip
, (long)alen
, 0,
4200 rt
? XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4205 * Split changing sb for alen and indlen since they could be coming
4206 * from different places.
4208 indlen
= (xfs_extlen_t
)xfs_bmap_worst_indlen(ip
, alen
);
4212 error
= xfs_mod_frextents(mp
, -((int64_t)extsz
));
4214 error
= xfs_mod_fdblocks(mp
, -((int64_t)alen
), false);
4218 goto out_unreserve_quota
;
4220 error
= xfs_mod_fdblocks(mp
, -((int64_t)indlen
), false);
4222 goto out_unreserve_blocks
;
4225 ip
->i_delayed_blks
+= alen
;
4227 got
->br_startoff
= aoff
;
4228 got
->br_startblock
= nullstartblock(indlen
);
4229 got
->br_blockcount
= alen
;
4230 got
->br_state
= XFS_EXT_NORM
;
4231 xfs_bmap_add_extent_hole_delay(ip
, whichfork
, lastx
, got
);
4234 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4235 * might have merged it into one of the neighbouring ones.
4237 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, *lastx
), got
);
4240 * Tag the inode if blocks were preallocated. Note that COW fork
4241 * preallocation can occur at the start or end of the extent, even when
4242 * prealloc == 0, so we must also check the aligned offset and length.
4244 if (whichfork
== XFS_DATA_FORK
&& prealloc
)
4245 xfs_inode_set_eofblocks_tag(ip
);
4246 if (whichfork
== XFS_COW_FORK
&& (prealloc
|| aoff
< off
|| alen
> len
))
4247 xfs_inode_set_cowblocks_tag(ip
);
4249 ASSERT(got
->br_startoff
<= aoff
);
4250 ASSERT(got
->br_startoff
+ got
->br_blockcount
>= aoff
+ alen
);
4251 ASSERT(isnullstartblock(got
->br_startblock
));
4252 ASSERT(got
->br_state
== XFS_EXT_NORM
);
4255 out_unreserve_blocks
:
4257 xfs_mod_frextents(mp
, extsz
);
4259 xfs_mod_fdblocks(mp
, alen
, false);
4260 out_unreserve_quota
:
4261 if (XFS_IS_QUOTA_ON(mp
))
4262 xfs_trans_unreserve_quota_nblks(NULL
, ip
, (long)alen
, 0, rt
?
4263 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4269 struct xfs_bmalloca
*bma
)
4271 struct xfs_mount
*mp
= bma
->ip
->i_mount
;
4272 int whichfork
= xfs_bmapi_whichfork(bma
->flags
);
4273 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
4274 int tmp_logflags
= 0;
4277 ASSERT(bma
->length
> 0);
4280 * For the wasdelay case, we could also just allocate the stuff asked
4281 * for in this bmap call but that wouldn't be as good.
4284 bma
->length
= (xfs_extlen_t
)bma
->got
.br_blockcount
;
4285 bma
->offset
= bma
->got
.br_startoff
;
4287 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
- 1),
4291 bma
->length
= XFS_FILBLKS_MIN(bma
->length
, MAXEXTLEN
);
4293 bma
->length
= XFS_FILBLKS_MIN(bma
->length
,
4294 bma
->got
.br_startoff
- bma
->offset
);
4298 * Set the data type being allocated. For the data fork, the first data
4299 * in the file is treated differently to all other allocations. For the
4300 * attribute fork, we only need to ensure the allocated range is not on
4303 if (!(bma
->flags
& XFS_BMAPI_METADATA
)) {
4304 bma
->datatype
= XFS_ALLOC_NOBUSY
;
4305 if (whichfork
== XFS_DATA_FORK
) {
4306 if (bma
->offset
== 0)
4307 bma
->datatype
|= XFS_ALLOC_INITIAL_USER_DATA
;
4309 bma
->datatype
|= XFS_ALLOC_USERDATA
;
4311 if (bma
->flags
& XFS_BMAPI_ZERO
)
4312 bma
->datatype
|= XFS_ALLOC_USERDATA_ZERO
;
4315 bma
->minlen
= (bma
->flags
& XFS_BMAPI_CONTIG
) ? bma
->length
: 1;
4318 * Only want to do the alignment at the eof if it is userdata and
4319 * allocation length is larger than a stripe unit.
4321 if (mp
->m_dalign
&& bma
->length
>= mp
->m_dalign
&&
4322 !(bma
->flags
& XFS_BMAPI_METADATA
) && whichfork
== XFS_DATA_FORK
) {
4323 error
= xfs_bmap_isaeof(bma
, whichfork
);
4328 error
= xfs_bmap_alloc(bma
);
4332 if (bma
->dfops
->dop_low
)
4335 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4336 if (bma
->blkno
== NULLFSBLOCK
)
4338 if ((ifp
->if_flags
& XFS_IFBROOT
) && !bma
->cur
) {
4339 bma
->cur
= xfs_bmbt_init_cursor(mp
, bma
->tp
, bma
->ip
, whichfork
);
4340 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4341 bma
->cur
->bc_private
.b
.dfops
= bma
->dfops
;
4344 * Bump the number of extents we've allocated
4350 bma
->cur
->bc_private
.b
.flags
=
4351 bma
->wasdel
? XFS_BTCUR_BPRV_WASDEL
: 0;
4353 bma
->got
.br_startoff
= bma
->offset
;
4354 bma
->got
.br_startblock
= bma
->blkno
;
4355 bma
->got
.br_blockcount
= bma
->length
;
4356 bma
->got
.br_state
= XFS_EXT_NORM
;
4359 * A wasdelay extent has been initialized, so shouldn't be flagged
4362 if (!bma
->wasdel
&& (bma
->flags
& XFS_BMAPI_PREALLOC
) &&
4363 xfs_sb_version_hasextflgbit(&mp
->m_sb
))
4364 bma
->got
.br_state
= XFS_EXT_UNWRITTEN
;
4367 error
= xfs_bmap_add_extent_delay_real(bma
, whichfork
);
4369 error
= xfs_bmap_add_extent_hole_real(bma
, whichfork
);
4371 bma
->logflags
|= tmp_logflags
;
4376 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4377 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4378 * the neighbouring ones.
4380 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
), &bma
->got
);
4382 ASSERT(bma
->got
.br_startoff
<= bma
->offset
);
4383 ASSERT(bma
->got
.br_startoff
+ bma
->got
.br_blockcount
>=
4384 bma
->offset
+ bma
->length
);
4385 ASSERT(bma
->got
.br_state
== XFS_EXT_NORM
||
4386 bma
->got
.br_state
== XFS_EXT_UNWRITTEN
);
4391 xfs_bmapi_convert_unwritten(
4392 struct xfs_bmalloca
*bma
,
4393 struct xfs_bmbt_irec
*mval
,
4397 int whichfork
= xfs_bmapi_whichfork(flags
);
4398 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(bma
->ip
, whichfork
);
4399 int tmp_logflags
= 0;
4402 /* check if we need to do unwritten->real conversion */
4403 if (mval
->br_state
== XFS_EXT_UNWRITTEN
&&
4404 (flags
& XFS_BMAPI_PREALLOC
))
4407 /* check if we need to do real->unwritten conversion */
4408 if (mval
->br_state
== XFS_EXT_NORM
&&
4409 (flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
)) !=
4410 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
))
4413 ASSERT(whichfork
!= XFS_COW_FORK
);
4416 * Modify (by adding) the state flag, if writing.
4418 ASSERT(mval
->br_blockcount
<= len
);
4419 if ((ifp
->if_flags
& XFS_IFBROOT
) && !bma
->cur
) {
4420 bma
->cur
= xfs_bmbt_init_cursor(bma
->ip
->i_mount
, bma
->tp
,
4421 bma
->ip
, whichfork
);
4422 bma
->cur
->bc_private
.b
.firstblock
= *bma
->firstblock
;
4423 bma
->cur
->bc_private
.b
.dfops
= bma
->dfops
;
4425 mval
->br_state
= (mval
->br_state
== XFS_EXT_UNWRITTEN
)
4426 ? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN
;
4429 * Before insertion into the bmbt, zero the range being converted
4432 if (flags
& XFS_BMAPI_ZERO
) {
4433 error
= xfs_zero_extent(bma
->ip
, mval
->br_startblock
,
4434 mval
->br_blockcount
);
4439 error
= xfs_bmap_add_extent_unwritten_real(bma
->tp
, bma
->ip
, &bma
->idx
,
4440 &bma
->cur
, mval
, bma
->firstblock
, bma
->dfops
,
4443 * Log the inode core unconditionally in the unwritten extent conversion
4444 * path because the conversion might not have done so (e.g., if the
4445 * extent count hasn't changed). We need to make sure the inode is dirty
4446 * in the transaction for the sake of fsync(), even if nothing has
4447 * changed, because fsync() will not force the log for this transaction
4448 * unless it sees the inode pinned.
4450 bma
->logflags
|= tmp_logflags
| XFS_ILOG_CORE
;
4455 * Update our extent pointer, given that
4456 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4457 * of the neighbouring ones.
4459 xfs_bmbt_get_all(xfs_iext_get_ext(ifp
, bma
->idx
), &bma
->got
);
4462 * We may have combined previously unwritten space with written space,
4463 * so generate another request.
4465 if (mval
->br_blockcount
< len
)
4471 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4472 * extent state if necessary. Details behaviour is controlled by the flags
4473 * parameter. Only allocates blocks from a single allocation group, to avoid
4476 * The returned value in "firstblock" from the first call in a transaction
4477 * must be remembered and presented to subsequent calls in "firstblock".
4478 * An upper bound for the number of blocks to be allocated is supplied to
4479 * the first call in "total"; if no allocation group has that many free
4480 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4484 struct xfs_trans
*tp
, /* transaction pointer */
4485 struct xfs_inode
*ip
, /* incore inode */
4486 xfs_fileoff_t bno
, /* starting file offs. mapped */
4487 xfs_filblks_t len
, /* length to map in file */
4488 int flags
, /* XFS_BMAPI_... */
4489 xfs_fsblock_t
*firstblock
, /* first allocated block
4490 controls a.g. for allocs */
4491 xfs_extlen_t total
, /* total blocks needed */
4492 struct xfs_bmbt_irec
*mval
, /* output: map values */
4493 int *nmap
, /* i/o: mval size/count */
4494 struct xfs_defer_ops
*dfops
) /* i/o: list extents to free */
4496 struct xfs_mount
*mp
= ip
->i_mount
;
4497 struct xfs_ifork
*ifp
;
4498 struct xfs_bmalloca bma
= { NULL
}; /* args for xfs_bmap_alloc */
4499 xfs_fileoff_t end
; /* end of mapped file region */
4500 bool eof
= false; /* after the end of extents */
4501 int error
; /* error return */
4502 int n
; /* current extent index */
4503 xfs_fileoff_t obno
; /* old block number (offset) */
4504 int whichfork
; /* data or attr fork */
4505 char inhole
; /* current location is hole in file */
4506 char wasdelay
; /* old extent was delayed */
4509 xfs_fileoff_t orig_bno
; /* original block number value */
4510 int orig_flags
; /* original flags arg value */
4511 xfs_filblks_t orig_len
; /* original value of len arg */
4512 struct xfs_bmbt_irec
*orig_mval
; /* original value of mval */
4513 int orig_nmap
; /* original value of *nmap */
4521 whichfork
= xfs_bmapi_whichfork(flags
);
4524 ASSERT(*nmap
<= XFS_BMAP_MAX_NMAP
);
4525 ASSERT(!(flags
& XFS_BMAPI_IGSTATE
));
4528 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_LOCAL
);
4529 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
4530 ASSERT(!(flags
& XFS_BMAPI_REMAP
) || whichfork
== XFS_DATA_FORK
);
4531 ASSERT(!(flags
& XFS_BMAPI_PREALLOC
) || !(flags
& XFS_BMAPI_REMAP
));
4532 ASSERT(!(flags
& XFS_BMAPI_CONVERT
) || !(flags
& XFS_BMAPI_REMAP
));
4533 ASSERT(!(flags
& XFS_BMAPI_PREALLOC
) || whichfork
!= XFS_COW_FORK
);
4534 ASSERT(!(flags
& XFS_BMAPI_CONVERT
) || whichfork
!= XFS_COW_FORK
);
4536 /* zeroing is for currently only for data extents, not metadata */
4537 ASSERT((flags
& (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
)) !=
4538 (XFS_BMAPI_METADATA
| XFS_BMAPI_ZERO
));
4540 * we can allocate unwritten extents or pre-zero allocated blocks,
4541 * but it makes no sense to do both at once. This would result in
4542 * zeroing the unwritten extent twice, but it still being an
4543 * unwritten extent....
4545 ASSERT((flags
& (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
)) !=
4546 (XFS_BMAPI_PREALLOC
| XFS_BMAPI_ZERO
));
4548 if (unlikely(XFS_TEST_ERROR(
4549 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
4550 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
4551 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
4552 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW
, mp
);
4553 return -EFSCORRUPTED
;
4556 if (XFS_FORCED_SHUTDOWN(mp
))
4559 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4561 XFS_STATS_INC(mp
, xs_blk_mapw
);
4563 if (*firstblock
== NULLFSBLOCK
) {
4564 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
)
4565 bma
.minleft
= be16_to_cpu(ifp
->if_broot
->bb_level
) + 1;
4572 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
4573 error
= xfs_iread_extents(tp
, ip
, whichfork
);
4582 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &bma
.idx
, &bma
.got
))
4584 if (!xfs_iext_get_extent(ifp
, bma
.idx
- 1, &bma
.prev
))
4585 bma
.prev
.br_startoff
= NULLFILEOFF
;
4591 bma
.firstblock
= firstblock
;
4593 while (bno
< end
&& n
< *nmap
) {
4594 inhole
= eof
|| bma
.got
.br_startoff
> bno
;
4595 wasdelay
= !inhole
&& isnullstartblock(bma
.got
.br_startblock
);
4598 * Make sure we only reflink into a hole.
4600 if (flags
& XFS_BMAPI_REMAP
)
4602 if (flags
& XFS_BMAPI_COWFORK
)
4606 * First, deal with the hole before the allocated space
4607 * that we found, if any.
4609 if (inhole
|| wasdelay
) {
4611 bma
.conv
= !!(flags
& XFS_BMAPI_CONVERT
);
4612 bma
.wasdel
= wasdelay
;
4617 * There's a 32/64 bit type mismatch between the
4618 * allocation length request (which can be 64 bits in
4619 * length) and the bma length request, which is
4620 * xfs_extlen_t and therefore 32 bits. Hence we have to
4621 * check for 32-bit overflows and handle them here.
4623 if (len
> (xfs_filblks_t
)MAXEXTLEN
)
4624 bma
.length
= MAXEXTLEN
;
4629 ASSERT(bma
.length
> 0);
4630 error
= xfs_bmapi_allocate(&bma
);
4633 if (bma
.blkno
== NULLFSBLOCK
)
4637 * If this is a CoW allocation, record the data in
4638 * the refcount btree for orphan recovery.
4640 if (whichfork
== XFS_COW_FORK
) {
4641 error
= xfs_refcount_alloc_cow_extent(mp
, dfops
,
4642 bma
.blkno
, bma
.length
);
4648 /* Deal with the allocated space we found. */
4649 xfs_bmapi_trim_map(mval
, &bma
.got
, &bno
, len
, obno
,
4652 /* Execute unwritten extent conversion if necessary */
4653 error
= xfs_bmapi_convert_unwritten(&bma
, mval
, len
, flags
);
4654 if (error
== -EAGAIN
)
4659 /* update the extent map to return */
4660 xfs_bmapi_update_map(&mval
, &bno
, &len
, obno
, end
, &n
, flags
);
4663 * If we're done, stop now. Stop when we've allocated
4664 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4665 * the transaction may get too big.
4667 if (bno
>= end
|| n
>= *nmap
|| bma
.nallocs
>= *nmap
)
4670 /* Else go on to the next record. */
4672 if (!xfs_iext_get_extent(ifp
, ++bma
.idx
, &bma
.got
))
4678 * Transform from btree to extents, give it cur.
4680 if (xfs_bmap_wants_extents(ip
, whichfork
)) {
4681 int tmp_logflags
= 0;
4684 error
= xfs_bmap_btree_to_extents(tp
, ip
, bma
.cur
,
4685 &tmp_logflags
, whichfork
);
4686 bma
.logflags
|= tmp_logflags
;
4691 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
||
4692 XFS_IFORK_NEXTENTS(ip
, whichfork
) >
4693 XFS_IFORK_MAXEXT(ip
, whichfork
));
4697 * Log everything. Do this after conversion, there's no point in
4698 * logging the extent records if we've converted to btree format.
4700 if ((bma
.logflags
& xfs_ilog_fext(whichfork
)) &&
4701 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
4702 bma
.logflags
&= ~xfs_ilog_fext(whichfork
);
4703 else if ((bma
.logflags
& xfs_ilog_fbroot(whichfork
)) &&
4704 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)
4705 bma
.logflags
&= ~xfs_ilog_fbroot(whichfork
);
4707 * Log whatever the flags say, even if error. Otherwise we might miss
4708 * detecting a case where the data is changed, there's an error,
4709 * and it's not logged so we don't shutdown when we should.
4712 xfs_trans_log_inode(tp
, ip
, bma
.logflags
);
4716 ASSERT(*firstblock
== NULLFSBLOCK
||
4717 XFS_FSB_TO_AGNO(mp
, *firstblock
) ==
4719 bma
.cur
->bc_private
.b
.firstblock
) ||
4721 XFS_FSB_TO_AGNO(mp
, *firstblock
) <
4723 bma
.cur
->bc_private
.b
.firstblock
)));
4724 *firstblock
= bma
.cur
->bc_private
.b
.firstblock
;
4726 xfs_btree_del_cursor(bma
.cur
,
4727 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
4730 xfs_bmap_validate_ret(orig_bno
, orig_len
, orig_flags
, orig_mval
,
4736 * When a delalloc extent is split (e.g., due to a hole punch), the original
4737 * indlen reservation must be shared across the two new extents that are left
4740 * Given the original reservation and the worst case indlen for the two new
4741 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4742 * reservation fairly across the two new extents. If necessary, steal available
4743 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4744 * ores == 1). The number of stolen blocks is returned. The availability and
4745 * subsequent accounting of stolen blocks is the responsibility of the caller.
4747 static xfs_filblks_t
4748 xfs_bmap_split_indlen(
4749 xfs_filblks_t ores
, /* original res. */
4750 xfs_filblks_t
*indlen1
, /* ext1 worst indlen */
4751 xfs_filblks_t
*indlen2
, /* ext2 worst indlen */
4752 xfs_filblks_t avail
) /* stealable blocks */
4754 xfs_filblks_t len1
= *indlen1
;
4755 xfs_filblks_t len2
= *indlen2
;
4756 xfs_filblks_t nres
= len1
+ len2
; /* new total res. */
4757 xfs_filblks_t stolen
= 0;
4760 * Steal as many blocks as we can to try and satisfy the worst case
4761 * indlen for both new extents.
4763 while (nres
> ores
&& avail
) {
4770 * The only blocks available are those reserved for the original
4771 * extent and what we can steal from the extent being removed.
4772 * If this still isn't enough to satisfy the combined
4773 * requirements for the two new extents, skim blocks off of each
4774 * of the new reservations until they match what is available.
4776 while (nres
> ores
) {
4796 xfs_bmap_del_extent_delay(
4797 struct xfs_inode
*ip
,
4800 struct xfs_bmbt_irec
*got
,
4801 struct xfs_bmbt_irec
*del
)
4803 struct xfs_mount
*mp
= ip
->i_mount
;
4804 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
4805 struct xfs_bmbt_irec
new;
4806 int64_t da_old
, da_new
, da_diff
= 0;
4807 xfs_fileoff_t del_endoff
, got_endoff
;
4808 xfs_filblks_t got_indlen
, new_indlen
, stolen
;
4809 int error
= 0, state
= 0;
4812 XFS_STATS_INC(mp
, xs_del_exlist
);
4814 isrt
= (whichfork
== XFS_DATA_FORK
) && XFS_IS_REALTIME_INODE(ip
);
4815 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4816 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4817 da_old
= startblockval(got
->br_startblock
);
4821 ASSERT(*idx
<= xfs_iext_count(ifp
));
4822 ASSERT(del
->br_blockcount
> 0);
4823 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4824 ASSERT(got_endoff
>= del_endoff
);
4827 int64_t rtexts
= XFS_FSB_TO_B(mp
, del
->br_blockcount
);
4829 do_div(rtexts
, mp
->m_sb
.sb_rextsize
);
4830 xfs_mod_frextents(mp
, rtexts
);
4834 * Update the inode delalloc counter now and wait to update the
4835 * sb counters as we might have to borrow some blocks for the
4836 * indirect block accounting.
4838 error
= xfs_trans_reserve_quota_nblks(NULL
, ip
,
4839 -((long)del
->br_blockcount
), 0,
4840 isrt
? XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
);
4843 ip
->i_delayed_blks
-= del
->br_blockcount
;
4845 if (whichfork
== XFS_COW_FORK
)
4846 state
|= BMAP_COWFORK
;
4848 if (got
->br_startoff
== del
->br_startoff
)
4849 state
|= BMAP_LEFT_CONTIG
;
4850 if (got_endoff
== del_endoff
)
4851 state
|= BMAP_RIGHT_CONTIG
;
4853 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
4854 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
4856 * Matches the whole extent. Delete the entry.
4858 xfs_iext_remove(ip
, *idx
, 1, state
);
4861 case BMAP_LEFT_CONTIG
:
4863 * Deleting the first part of the extent.
4865 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4866 got
->br_startoff
= del_endoff
;
4867 got
->br_blockcount
-= del
->br_blockcount
;
4868 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4869 got
->br_blockcount
), da_old
);
4870 got
->br_startblock
= nullstartblock((int)da_new
);
4871 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4872 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4874 case BMAP_RIGHT_CONTIG
:
4876 * Deleting the last part of the extent.
4878 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4879 got
->br_blockcount
= got
->br_blockcount
- del
->br_blockcount
;
4880 da_new
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
,
4881 got
->br_blockcount
), da_old
);
4882 got
->br_startblock
= nullstartblock((int)da_new
);
4883 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4884 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4888 * Deleting the middle of the extent.
4890 * Distribute the original indlen reservation across the two new
4891 * extents. Steal blocks from the deleted extent if necessary.
4892 * Stealing blocks simply fudges the fdblocks accounting below.
4893 * Warn if either of the new indlen reservations is zero as this
4894 * can lead to delalloc problems.
4896 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4898 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4899 got_indlen
= xfs_bmap_worst_indlen(ip
, got
->br_blockcount
);
4901 new.br_blockcount
= got_endoff
- del_endoff
;
4902 new_indlen
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
4904 WARN_ON_ONCE(!got_indlen
|| !new_indlen
);
4905 stolen
= xfs_bmap_split_indlen(da_old
, &got_indlen
, &new_indlen
,
4906 del
->br_blockcount
);
4908 got
->br_startblock
= nullstartblock((int)got_indlen
);
4909 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4910 trace_xfs_bmap_post_update(ip
, *idx
, 0, _THIS_IP_
);
4912 new.br_startoff
= del_endoff
;
4913 new.br_state
= got
->br_state
;
4914 new.br_startblock
= nullstartblock((int)new_indlen
);
4917 xfs_iext_insert(ip
, *idx
, 1, &new, state
);
4919 da_new
= got_indlen
+ new_indlen
- stolen
;
4920 del
->br_blockcount
-= stolen
;
4924 ASSERT(da_old
>= da_new
);
4925 da_diff
= da_old
- da_new
;
4927 da_diff
+= del
->br_blockcount
;
4929 xfs_mod_fdblocks(mp
, da_diff
, false);
4934 xfs_bmap_del_extent_cow(
4935 struct xfs_inode
*ip
,
4937 struct xfs_bmbt_irec
*got
,
4938 struct xfs_bmbt_irec
*del
)
4940 struct xfs_mount
*mp
= ip
->i_mount
;
4941 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_COW_FORK
);
4942 struct xfs_bmbt_irec
new;
4943 xfs_fileoff_t del_endoff
, got_endoff
;
4944 int state
= BMAP_COWFORK
;
4946 XFS_STATS_INC(mp
, xs_del_exlist
);
4948 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
4949 got_endoff
= got
->br_startoff
+ got
->br_blockcount
;
4952 ASSERT(*idx
<= xfs_iext_count(ifp
));
4953 ASSERT(del
->br_blockcount
> 0);
4954 ASSERT(got
->br_startoff
<= del
->br_startoff
);
4955 ASSERT(got_endoff
>= del_endoff
);
4956 ASSERT(!isnullstartblock(got
->br_startblock
));
4958 if (got
->br_startoff
== del
->br_startoff
)
4959 state
|= BMAP_LEFT_CONTIG
;
4960 if (got_endoff
== del_endoff
)
4961 state
|= BMAP_RIGHT_CONTIG
;
4963 switch (state
& (BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
)) {
4964 case BMAP_LEFT_CONTIG
| BMAP_RIGHT_CONTIG
:
4966 * Matches the whole extent. Delete the entry.
4968 xfs_iext_remove(ip
, *idx
, 1, state
);
4971 case BMAP_LEFT_CONTIG
:
4973 * Deleting the first part of the extent.
4975 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4976 got
->br_startoff
= del_endoff
;
4977 got
->br_blockcount
-= del
->br_blockcount
;
4978 got
->br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
4979 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4980 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4982 case BMAP_RIGHT_CONTIG
:
4984 * Deleting the last part of the extent.
4986 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4987 got
->br_blockcount
-= del
->br_blockcount
;
4988 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4989 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
4993 * Deleting the middle of the extent.
4995 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
4996 got
->br_blockcount
= del
->br_startoff
- got
->br_startoff
;
4997 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, *idx
), got
);
4998 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5000 new.br_startoff
= del_endoff
;
5001 new.br_blockcount
= got_endoff
- del_endoff
;
5002 new.br_state
= got
->br_state
;
5003 new.br_startblock
= del
->br_startblock
+ del
->br_blockcount
;
5006 xfs_iext_insert(ip
, *idx
, 1, &new, state
);
5012 * Called by xfs_bmapi to update file extent records and the btree
5013 * after removing space (or undoing a delayed allocation).
5015 STATIC
int /* error */
5016 xfs_bmap_del_extent(
5017 xfs_inode_t
*ip
, /* incore inode pointer */
5018 xfs_trans_t
*tp
, /* current transaction pointer */
5019 xfs_extnum_t
*idx
, /* extent number to update/delete */
5020 struct xfs_defer_ops
*dfops
, /* list of extents to be freed */
5021 xfs_btree_cur_t
*cur
, /* if null, not a btree */
5022 xfs_bmbt_irec_t
*del
, /* data to remove from extents */
5023 int *logflagsp
, /* inode logging flags */
5024 int whichfork
, /* data or attr fork */
5025 int bflags
) /* bmapi flags */
5027 xfs_filblks_t da_new
; /* new delay-alloc indirect blocks */
5028 xfs_filblks_t da_old
; /* old delay-alloc indirect blocks */
5029 xfs_fsblock_t del_endblock
=0; /* first block past del */
5030 xfs_fileoff_t del_endoff
; /* first offset past del */
5031 int delay
; /* current block is delayed allocated */
5032 int do_fx
; /* free extent at end of routine */
5033 xfs_bmbt_rec_host_t
*ep
; /* current extent entry pointer */
5034 int error
; /* error return value */
5035 int flags
; /* inode logging flags */
5036 xfs_bmbt_irec_t got
; /* current extent entry */
5037 xfs_fileoff_t got_endoff
; /* first offset past got */
5038 int i
; /* temp state */
5039 xfs_ifork_t
*ifp
; /* inode fork pointer */
5040 xfs_mount_t
*mp
; /* mount structure */
5041 xfs_filblks_t nblks
; /* quota/sb block count */
5042 xfs_bmbt_irec_t
new; /* new record to be inserted */
5044 uint qfield
; /* quota field to update */
5045 xfs_filblks_t temp
; /* for indirect length calculations */
5046 xfs_filblks_t temp2
; /* for indirect length calculations */
5050 XFS_STATS_INC(mp
, xs_del_exlist
);
5052 if (whichfork
== XFS_ATTR_FORK
)
5053 state
|= BMAP_ATTRFORK
;
5054 else if (whichfork
== XFS_COW_FORK
)
5055 state
|= BMAP_COWFORK
;
5057 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5058 ASSERT((*idx
>= 0) && (*idx
< xfs_iext_count(ifp
)));
5059 ASSERT(del
->br_blockcount
> 0);
5060 ep
= xfs_iext_get_ext(ifp
, *idx
);
5061 xfs_bmbt_get_all(ep
, &got
);
5062 ASSERT(got
.br_startoff
<= del
->br_startoff
);
5063 del_endoff
= del
->br_startoff
+ del
->br_blockcount
;
5064 got_endoff
= got
.br_startoff
+ got
.br_blockcount
;
5065 ASSERT(got_endoff
>= del_endoff
);
5066 delay
= isnullstartblock(got
.br_startblock
);
5067 ASSERT(isnullstartblock(del
->br_startblock
) == delay
);
5072 * If deleting a real allocation, must free up the disk space.
5075 flags
= XFS_ILOG_CORE
;
5077 * Realtime allocation. Free it and record di_nblocks update.
5079 if (whichfork
== XFS_DATA_FORK
&& XFS_IS_REALTIME_INODE(ip
)) {
5083 ASSERT(do_mod(del
->br_blockcount
,
5084 mp
->m_sb
.sb_rextsize
) == 0);
5085 ASSERT(do_mod(del
->br_startblock
,
5086 mp
->m_sb
.sb_rextsize
) == 0);
5087 bno
= del
->br_startblock
;
5088 len
= del
->br_blockcount
;
5089 do_div(bno
, mp
->m_sb
.sb_rextsize
);
5090 do_div(len
, mp
->m_sb
.sb_rextsize
);
5091 error
= xfs_rtfree_extent(tp
, bno
, (xfs_extlen_t
)len
);
5095 nblks
= len
* mp
->m_sb
.sb_rextsize
;
5096 qfield
= XFS_TRANS_DQ_RTBCOUNT
;
5099 * Ordinary allocation.
5103 nblks
= del
->br_blockcount
;
5104 qfield
= XFS_TRANS_DQ_BCOUNT
;
5107 * Set up del_endblock and cur for later.
5109 del_endblock
= del
->br_startblock
+ del
->br_blockcount
;
5111 if ((error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
,
5112 got
.br_startblock
, got
.br_blockcount
,
5115 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5117 da_old
= da_new
= 0;
5119 da_old
= startblockval(got
.br_startblock
);
5126 * Set flag value to use in switch statement.
5127 * Left-contig is 2, right-contig is 1.
5129 switch (((got
.br_startoff
== del
->br_startoff
) << 1) |
5130 (got_endoff
== del_endoff
)) {
5133 * Matches the whole extent. Delete the entry.
5135 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5136 xfs_iext_remove(ip
, *idx
, 1,
5137 whichfork
== XFS_ATTR_FORK
? BMAP_ATTRFORK
: 0);
5142 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5143 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
5144 flags
|= XFS_ILOG_CORE
;
5146 flags
|= xfs_ilog_fext(whichfork
);
5149 if ((error
= xfs_btree_delete(cur
, &i
)))
5151 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5156 * Deleting the first part of the extent.
5158 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5159 xfs_bmbt_set_startoff(ep
, del_endoff
);
5160 temp
= got
.br_blockcount
- del
->br_blockcount
;
5161 xfs_bmbt_set_blockcount(ep
, temp
);
5163 temp
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
5165 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5166 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5170 xfs_bmbt_set_startblock(ep
, del_endblock
);
5171 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5173 flags
|= xfs_ilog_fext(whichfork
);
5176 if ((error
= xfs_bmbt_update(cur
, del_endoff
, del_endblock
,
5177 got
.br_blockcount
- del
->br_blockcount
,
5184 * Deleting the last part of the extent.
5186 temp
= got
.br_blockcount
- del
->br_blockcount
;
5187 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5188 xfs_bmbt_set_blockcount(ep
, temp
);
5190 temp
= XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip
, temp
),
5192 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5193 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5197 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5199 flags
|= xfs_ilog_fext(whichfork
);
5202 if ((error
= xfs_bmbt_update(cur
, got
.br_startoff
,
5204 got
.br_blockcount
- del
->br_blockcount
,
5211 * Deleting the middle of the extent.
5213 temp
= del
->br_startoff
- got
.br_startoff
;
5214 trace_xfs_bmap_pre_update(ip
, *idx
, state
, _THIS_IP_
);
5215 xfs_bmbt_set_blockcount(ep
, temp
);
5216 new.br_startoff
= del_endoff
;
5217 temp2
= got_endoff
- del_endoff
;
5218 new.br_blockcount
= temp2
;
5219 new.br_state
= got
.br_state
;
5221 new.br_startblock
= del_endblock
;
5222 flags
|= XFS_ILOG_CORE
;
5224 if ((error
= xfs_bmbt_update(cur
,
5226 got
.br_startblock
, temp
,
5229 if ((error
= xfs_btree_increment(cur
, 0, &i
)))
5231 cur
->bc_rec
.b
= new;
5232 error
= xfs_btree_insert(cur
, &i
);
5233 if (error
&& error
!= -ENOSPC
)
5236 * If get no-space back from btree insert,
5237 * it tried a split, and we have a zero
5238 * block reservation.
5239 * Fix up our state and return the error.
5241 if (error
== -ENOSPC
) {
5243 * Reset the cursor, don't trust
5244 * it after any insert operation.
5246 if ((error
= xfs_bmbt_lookup_eq(cur
,
5251 XFS_WANT_CORRUPTED_GOTO(mp
,
5254 * Update the btree record back
5255 * to the original value.
5257 if ((error
= xfs_bmbt_update(cur
,
5264 * Reset the extent record back
5265 * to the original value.
5267 xfs_bmbt_set_blockcount(ep
,
5273 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, done
);
5275 flags
|= xfs_ilog_fext(whichfork
);
5276 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5277 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
5279 xfs_filblks_t stolen
;
5280 ASSERT(whichfork
== XFS_DATA_FORK
);
5283 * Distribute the original indlen reservation across the
5284 * two new extents. Steal blocks from the deleted extent
5285 * if necessary. Stealing blocks simply fudges the
5286 * fdblocks accounting in xfs_bunmapi().
5288 temp
= xfs_bmap_worst_indlen(ip
, got
.br_blockcount
);
5289 temp2
= xfs_bmap_worst_indlen(ip
, new.br_blockcount
);
5290 stolen
= xfs_bmap_split_indlen(da_old
, &temp
, &temp2
,
5291 del
->br_blockcount
);
5292 da_new
= temp
+ temp2
- stolen
;
5293 del
->br_blockcount
-= stolen
;
5296 * Set the reservation for each extent. Warn if either
5297 * is zero as this can lead to delalloc problems.
5299 WARN_ON_ONCE(!temp
|| !temp2
);
5300 xfs_bmbt_set_startblock(ep
, nullstartblock((int)temp
));
5301 new.br_startblock
= nullstartblock((int)temp2
);
5303 trace_xfs_bmap_post_update(ip
, *idx
, state
, _THIS_IP_
);
5304 xfs_iext_insert(ip
, *idx
+ 1, 1, &new, state
);
5309 /* remove reverse mapping */
5311 error
= xfs_rmap_unmap_extent(mp
, dfops
, ip
, whichfork
, del
);
5317 * If we need to, add to list of extents to delete.
5319 if (do_fx
&& !(bflags
& XFS_BMAPI_REMAP
)) {
5320 if (xfs_is_reflink_inode(ip
) && whichfork
== XFS_DATA_FORK
) {
5321 error
= xfs_refcount_decrease_extent(mp
, dfops
, del
);
5325 xfs_bmap_add_free(mp
, dfops
, del
->br_startblock
,
5326 del
->br_blockcount
, NULL
);
5330 * Adjust inode # blocks in the file.
5333 ip
->i_d
.di_nblocks
-= nblks
;
5335 * Adjust quota data.
5337 if (qfield
&& !(bflags
& XFS_BMAPI_REMAP
))
5338 xfs_trans_mod_dquot_byino(tp
, ip
, qfield
, (long)-nblks
);
5341 * Account for change in delayed indirect blocks.
5342 * Nothing to do for disk quota accounting here.
5344 ASSERT(da_old
>= da_new
);
5345 if (da_old
> da_new
)
5346 xfs_mod_fdblocks(mp
, (int64_t)(da_old
- da_new
), false);
5353 * Unmap (remove) blocks from a file.
5354 * If nexts is nonzero then the number of extents to remove is limited to
5355 * that value. If not all extents in the block range can be removed then
5360 xfs_trans_t
*tp
, /* transaction pointer */
5361 struct xfs_inode
*ip
, /* incore inode */
5362 xfs_fileoff_t bno
, /* starting offset to unmap */
5363 xfs_filblks_t
*rlen
, /* i/o: amount remaining */
5364 int flags
, /* misc flags */
5365 xfs_extnum_t nexts
, /* number of extents max */
5366 xfs_fsblock_t
*firstblock
, /* first allocated block
5367 controls a.g. for allocs */
5368 struct xfs_defer_ops
*dfops
) /* i/o: deferred updates */
5370 xfs_btree_cur_t
*cur
; /* bmap btree cursor */
5371 xfs_bmbt_irec_t del
; /* extent being deleted */
5372 int error
; /* error return value */
5373 xfs_extnum_t extno
; /* extent number in list */
5374 xfs_bmbt_irec_t got
; /* current extent record */
5375 xfs_ifork_t
*ifp
; /* inode fork pointer */
5376 int isrt
; /* freeing in rt area */
5377 xfs_extnum_t lastx
; /* last extent index used */
5378 int logflags
; /* transaction logging flags */
5379 xfs_extlen_t mod
; /* rt extent offset */
5380 xfs_mount_t
*mp
; /* mount structure */
5381 xfs_fileoff_t start
; /* first file offset deleted */
5382 int tmp_logflags
; /* partial logging flags */
5383 int wasdel
; /* was a delayed alloc extent */
5384 int whichfork
; /* data or attribute fork */
5386 xfs_filblks_t len
= *rlen
; /* length to unmap in file */
5388 trace_xfs_bunmap(ip
, bno
, len
, flags
, _RET_IP_
);
5390 whichfork
= xfs_bmapi_whichfork(flags
);
5391 ASSERT(whichfork
!= XFS_COW_FORK
);
5392 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5394 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
5395 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)) {
5396 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW
,
5398 return -EFSCORRUPTED
;
5401 if (XFS_FORCED_SHUTDOWN(mp
))
5404 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
5408 if (!(ifp
->if_flags
& XFS_IFEXTENTS
) &&
5409 (error
= xfs_iread_extents(tp
, ip
, whichfork
)))
5411 if (xfs_iext_count(ifp
) == 0) {
5415 XFS_STATS_INC(mp
, xs_blk_unmap
);
5416 isrt
= (whichfork
== XFS_DATA_FORK
) && XFS_IS_REALTIME_INODE(ip
);
5418 bno
= start
+ len
- 1;
5421 * Check to see if the given block number is past the end of the
5422 * file, back up to the last block if so...
5424 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &lastx
, &got
)) {
5426 xfs_iext_get_extent(ifp
, --lastx
, &got
);
5427 bno
= got
.br_startoff
+ got
.br_blockcount
- 1;
5431 if (ifp
->if_flags
& XFS_IFBROOT
) {
5432 ASSERT(XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_BTREE
);
5433 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
5434 cur
->bc_private
.b
.firstblock
= *firstblock
;
5435 cur
->bc_private
.b
.dfops
= dfops
;
5436 cur
->bc_private
.b
.flags
= 0;
5442 * Synchronize by locking the bitmap inode.
5444 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTBITMAP
);
5445 xfs_trans_ijoin(tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
5446 xfs_ilock(mp
->m_rsumip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTSUM
);
5447 xfs_trans_ijoin(tp
, mp
->m_rsumip
, XFS_ILOCK_EXCL
);
5451 while (bno
!= (xfs_fileoff_t
)-1 && bno
>= start
&& lastx
>= 0 &&
5452 (nexts
== 0 || extno
< nexts
)) {
5454 * Is the found extent after a hole in which bno lives?
5455 * Just back up to the previous extent, if so.
5457 if (got
.br_startoff
> bno
) {
5460 xfs_iext_get_extent(ifp
, lastx
, &got
);
5463 * Is the last block of this extent before the range
5464 * we're supposed to delete? If so, we're done.
5466 bno
= XFS_FILEOFF_MIN(bno
,
5467 got
.br_startoff
+ got
.br_blockcount
- 1);
5471 * Then deal with the (possibly delayed) allocated space
5475 wasdel
= isnullstartblock(del
.br_startblock
);
5476 if (got
.br_startoff
< start
) {
5477 del
.br_startoff
= start
;
5478 del
.br_blockcount
-= start
- got
.br_startoff
;
5480 del
.br_startblock
+= start
- got
.br_startoff
;
5482 if (del
.br_startoff
+ del
.br_blockcount
> bno
+ 1)
5483 del
.br_blockcount
= bno
+ 1 - del
.br_startoff
;
5484 sum
= del
.br_startblock
+ del
.br_blockcount
;
5486 (mod
= do_mod(sum
, mp
->m_sb
.sb_rextsize
))) {
5488 * Realtime extent not lined up at the end.
5489 * The extent could have been split into written
5490 * and unwritten pieces, or we could just be
5491 * unmapping part of it. But we can't really
5492 * get rid of part of a realtime extent.
5494 if (del
.br_state
== XFS_EXT_UNWRITTEN
||
5495 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
5497 * This piece is unwritten, or we're not
5498 * using unwritten extents. Skip over it.
5501 bno
-= mod
> del
.br_blockcount
?
5502 del
.br_blockcount
: mod
;
5503 if (bno
< got
.br_startoff
) {
5505 xfs_bmbt_get_all(xfs_iext_get_ext(
5511 * It's written, turn it unwritten.
5512 * This is better than zeroing it.
5514 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5515 ASSERT(tp
->t_blk_res
> 0);
5517 * If this spans a realtime extent boundary,
5518 * chop it back to the start of the one we end at.
5520 if (del
.br_blockcount
> mod
) {
5521 del
.br_startoff
+= del
.br_blockcount
- mod
;
5522 del
.br_startblock
+= del
.br_blockcount
- mod
;
5523 del
.br_blockcount
= mod
;
5525 del
.br_state
= XFS_EXT_UNWRITTEN
;
5526 error
= xfs_bmap_add_extent_unwritten_real(tp
, ip
,
5527 &lastx
, &cur
, &del
, firstblock
, dfops
,
5533 if (isrt
&& (mod
= do_mod(del
.br_startblock
, mp
->m_sb
.sb_rextsize
))) {
5535 * Realtime extent is lined up at the end but not
5536 * at the front. We'll get rid of full extents if
5539 mod
= mp
->m_sb
.sb_rextsize
- mod
;
5540 if (del
.br_blockcount
> mod
) {
5541 del
.br_blockcount
-= mod
;
5542 del
.br_startoff
+= mod
;
5543 del
.br_startblock
+= mod
;
5544 } else if ((del
.br_startoff
== start
&&
5545 (del
.br_state
== XFS_EXT_UNWRITTEN
||
5546 tp
->t_blk_res
== 0)) ||
5547 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
5549 * Can't make it unwritten. There isn't
5550 * a full extent here so just skip it.
5552 ASSERT(bno
>= del
.br_blockcount
);
5553 bno
-= del
.br_blockcount
;
5554 if (got
.br_startoff
> bno
&& --lastx
>= 0)
5555 xfs_iext_get_extent(ifp
, lastx
, &got
);
5557 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
) {
5558 struct xfs_bmbt_irec prev
;
5561 * This one is already unwritten.
5562 * It must have a written left neighbor.
5563 * Unwrite the killed part of that one and
5567 xfs_iext_get_extent(ifp
, lastx
- 1, &prev
);
5568 ASSERT(prev
.br_state
== XFS_EXT_NORM
);
5569 ASSERT(!isnullstartblock(prev
.br_startblock
));
5570 ASSERT(del
.br_startblock
==
5571 prev
.br_startblock
+ prev
.br_blockcount
);
5572 if (prev
.br_startoff
< start
) {
5573 mod
= start
- prev
.br_startoff
;
5574 prev
.br_blockcount
-= mod
;
5575 prev
.br_startblock
+= mod
;
5576 prev
.br_startoff
= start
;
5578 prev
.br_state
= XFS_EXT_UNWRITTEN
;
5580 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5581 ip
, &lastx
, &cur
, &prev
,
5582 firstblock
, dfops
, &logflags
);
5587 ASSERT(del
.br_state
== XFS_EXT_NORM
);
5588 del
.br_state
= XFS_EXT_UNWRITTEN
;
5589 error
= xfs_bmap_add_extent_unwritten_real(tp
,
5590 ip
, &lastx
, &cur
, &del
,
5591 firstblock
, dfops
, &logflags
);
5599 * If it's the case where the directory code is running
5600 * with no block reservation, and the deleted block is in
5601 * the middle of its extent, and the resulting insert
5602 * of an extent would cause transformation to btree format,
5603 * then reject it. The calling code will then swap
5604 * blocks around instead.
5605 * We have to do this now, rather than waiting for the
5606 * conversion to btree format, since the transaction
5609 if (!wasdel
&& tp
->t_blk_res
== 0 &&
5610 XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
&&
5611 XFS_IFORK_NEXTENTS(ip
, whichfork
) >= /* Note the >= */
5612 XFS_IFORK_MAXEXT(ip
, whichfork
) &&
5613 del
.br_startoff
> got
.br_startoff
&&
5614 del
.br_startoff
+ del
.br_blockcount
<
5615 got
.br_startoff
+ got
.br_blockcount
) {
5621 * Unreserve quota and update realtime free space, if
5622 * appropriate. If delayed allocation, update the inode delalloc
5623 * counter now and wait to update the sb counters as
5624 * xfs_bmap_del_extent() might need to borrow some blocks.
5627 ASSERT(startblockval(del
.br_startblock
) > 0);
5629 xfs_filblks_t rtexts
;
5631 rtexts
= XFS_FSB_TO_B(mp
, del
.br_blockcount
);
5632 do_div(rtexts
, mp
->m_sb
.sb_rextsize
);
5633 xfs_mod_frextents(mp
, (int64_t)rtexts
);
5634 (void)xfs_trans_reserve_quota_nblks(NULL
,
5635 ip
, -((long)del
.br_blockcount
), 0,
5636 XFS_QMOPT_RES_RTBLKS
);
5638 (void)xfs_trans_reserve_quota_nblks(NULL
,
5639 ip
, -((long)del
.br_blockcount
), 0,
5640 XFS_QMOPT_RES_REGBLKS
);
5642 ip
->i_delayed_blks
-= del
.br_blockcount
;
5644 cur
->bc_private
.b
.flags
|=
5645 XFS_BTCUR_BPRV_WASDEL
;
5647 cur
->bc_private
.b
.flags
&= ~XFS_BTCUR_BPRV_WASDEL
;
5649 error
= xfs_bmap_del_extent(ip
, tp
, &lastx
, dfops
, cur
, &del
,
5650 &tmp_logflags
, whichfork
, flags
);
5651 logflags
|= tmp_logflags
;
5655 if (!isrt
&& wasdel
)
5656 xfs_mod_fdblocks(mp
, (int64_t)del
.br_blockcount
, false);
5658 bno
= del
.br_startoff
- 1;
5661 * If not done go on to the next (previous) record.
5663 if (bno
!= (xfs_fileoff_t
)-1 && bno
>= start
) {
5665 xfs_iext_get_extent(ifp
, lastx
, &got
);
5666 if (got
.br_startoff
> bno
&& --lastx
>= 0)
5667 xfs_iext_get_extent(ifp
, lastx
, &got
);
5672 if (bno
== (xfs_fileoff_t
)-1 || bno
< start
|| lastx
< 0)
5675 *rlen
= bno
- start
+ 1;
5678 * Convert to a btree if necessary.
5680 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
5681 ASSERT(cur
== NULL
);
5682 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstblock
, dfops
,
5683 &cur
, 0, &tmp_logflags
, whichfork
);
5684 logflags
|= tmp_logflags
;
5689 * transform from btree to extents, give it cur
5691 else if (xfs_bmap_wants_extents(ip
, whichfork
)) {
5692 ASSERT(cur
!= NULL
);
5693 error
= xfs_bmap_btree_to_extents(tp
, ip
, cur
, &tmp_logflags
,
5695 logflags
|= tmp_logflags
;
5700 * transform from extents to local?
5705 * Log everything. Do this after conversion, there's no point in
5706 * logging the extent records if we've converted to btree format.
5708 if ((logflags
& xfs_ilog_fext(whichfork
)) &&
5709 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
)
5710 logflags
&= ~xfs_ilog_fext(whichfork
);
5711 else if ((logflags
& xfs_ilog_fbroot(whichfork
)) &&
5712 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)
5713 logflags
&= ~xfs_ilog_fbroot(whichfork
);
5715 * Log inode even in the error case, if the transaction
5716 * is dirty we'll need to shut down the filesystem.
5719 xfs_trans_log_inode(tp
, ip
, logflags
);
5722 *firstblock
= cur
->bc_private
.b
.firstblock
;
5723 cur
->bc_private
.b
.allocated
= 0;
5725 xfs_btree_del_cursor(cur
,
5726 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
5731 /* Unmap a range of a file. */
5735 struct xfs_inode
*ip
,
5740 xfs_fsblock_t
*firstblock
,
5741 struct xfs_defer_ops
*dfops
,
5746 error
= __xfs_bunmapi(tp
, ip
, bno
, &len
, flags
, nexts
, firstblock
,
5753 * Determine whether an extent shift can be accomplished by a merge with the
5754 * extent that precedes the target hole of the shift.
5758 struct xfs_bmbt_irec
*left
, /* preceding extent */
5759 struct xfs_bmbt_irec
*got
, /* current extent to shift */
5760 xfs_fileoff_t shift
) /* shift fsb */
5762 xfs_fileoff_t startoff
;
5764 startoff
= got
->br_startoff
- shift
;
5767 * The extent, once shifted, must be adjacent in-file and on-disk with
5768 * the preceding extent.
5770 if ((left
->br_startoff
+ left
->br_blockcount
!= startoff
) ||
5771 (left
->br_startblock
+ left
->br_blockcount
!= got
->br_startblock
) ||
5772 (left
->br_state
!= got
->br_state
) ||
5773 (left
->br_blockcount
+ got
->br_blockcount
> MAXEXTLEN
))
5780 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5781 * hole in the file. If an extent shift would result in the extent being fully
5782 * adjacent to the extent that currently precedes the hole, we can merge with
5783 * the preceding extent rather than do the shift.
5785 * This function assumes the caller has verified a shift-by-merge is possible
5786 * with the provided extents via xfs_bmse_can_merge().
5790 struct xfs_inode
*ip
,
5792 xfs_fileoff_t shift
, /* shift fsb */
5793 int current_ext
, /* idx of gotp */
5794 struct xfs_bmbt_rec_host
*gotp
, /* extent to shift */
5795 struct xfs_bmbt_rec_host
*leftp
, /* preceding extent */
5796 struct xfs_btree_cur
*cur
,
5797 int *logflags
) /* output */
5799 struct xfs_bmbt_irec got
;
5800 struct xfs_bmbt_irec left
;
5801 xfs_filblks_t blockcount
;
5803 struct xfs_mount
*mp
= ip
->i_mount
;
5805 xfs_bmbt_get_all(gotp
, &got
);
5806 xfs_bmbt_get_all(leftp
, &left
);
5807 blockcount
= left
.br_blockcount
+ got
.br_blockcount
;
5809 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
5810 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
5811 ASSERT(xfs_bmse_can_merge(&left
, &got
, shift
));
5814 * Merge the in-core extents. Note that the host record pointers and
5815 * current_ext index are invalid once the extent has been removed via
5816 * xfs_iext_remove().
5818 xfs_bmbt_set_blockcount(leftp
, blockcount
);
5819 xfs_iext_remove(ip
, current_ext
, 1, 0);
5822 * Update the on-disk extent count, the btree if necessary and log the
5825 XFS_IFORK_NEXT_SET(ip
, whichfork
,
5826 XFS_IFORK_NEXTENTS(ip
, whichfork
) - 1);
5827 *logflags
|= XFS_ILOG_CORE
;
5829 *logflags
|= XFS_ILOG_DEXT
;
5833 /* lookup and remove the extent to merge */
5834 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
, got
.br_startblock
,
5835 got
.br_blockcount
, &i
);
5838 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5840 error
= xfs_btree_delete(cur
, &i
);
5843 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5845 /* lookup and update size of the previous extent */
5846 error
= xfs_bmbt_lookup_eq(cur
, left
.br_startoff
, left
.br_startblock
,
5847 left
.br_blockcount
, &i
);
5850 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5852 left
.br_blockcount
= blockcount
;
5854 return xfs_bmbt_update(cur
, left
.br_startoff
, left
.br_startblock
,
5855 left
.br_blockcount
, left
.br_state
);
5859 * Shift a single extent.
5863 struct xfs_inode
*ip
,
5865 xfs_fileoff_t offset_shift_fsb
,
5867 struct xfs_bmbt_rec_host
*gotp
,
5868 struct xfs_btree_cur
*cur
,
5870 enum shift_direction direction
,
5871 struct xfs_defer_ops
*dfops
)
5873 struct xfs_ifork
*ifp
;
5874 struct xfs_mount
*mp
;
5875 xfs_fileoff_t startoff
;
5876 struct xfs_bmbt_rec_host
*adj_irecp
;
5877 struct xfs_bmbt_irec got
;
5878 struct xfs_bmbt_irec adj_irec
;
5884 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
5885 total_extents
= xfs_iext_count(ifp
);
5887 xfs_bmbt_get_all(gotp
, &got
);
5889 /* delalloc extents should be prevented by caller */
5890 XFS_WANT_CORRUPTED_RETURN(mp
, !isnullstartblock(got
.br_startblock
));
5892 if (direction
== SHIFT_LEFT
) {
5893 startoff
= got
.br_startoff
- offset_shift_fsb
;
5896 * Check for merge if we've got an extent to the left,
5897 * otherwise make sure there's enough room at the start
5898 * of the file for the shift.
5900 if (!*current_ext
) {
5901 if (got
.br_startoff
< offset_shift_fsb
)
5903 goto update_current_ext
;
5906 * grab the left extent and check for a large
5909 adj_irecp
= xfs_iext_get_ext(ifp
, *current_ext
- 1);
5910 xfs_bmbt_get_all(adj_irecp
, &adj_irec
);
5913 adj_irec
.br_startoff
+ adj_irec
.br_blockcount
)
5916 /* check whether to merge the extent or shift it down */
5917 if (xfs_bmse_can_merge(&adj_irec
, &got
,
5918 offset_shift_fsb
)) {
5919 error
= xfs_bmse_merge(ip
, whichfork
, offset_shift_fsb
,
5920 *current_ext
, gotp
, adj_irecp
,
5928 startoff
= got
.br_startoff
+ offset_shift_fsb
;
5929 /* nothing to move if this is the last extent */
5930 if (*current_ext
>= (total_extents
- 1))
5931 goto update_current_ext
;
5933 * If this is not the last extent in the file, make sure there
5934 * is enough room between current extent and next extent for
5935 * accommodating the shift.
5937 adj_irecp
= xfs_iext_get_ext(ifp
, *current_ext
+ 1);
5938 xfs_bmbt_get_all(adj_irecp
, &adj_irec
);
5939 if (startoff
+ got
.br_blockcount
> adj_irec
.br_startoff
)
5942 * Unlike a left shift (which involves a hole punch),
5943 * a right shift does not modify extent neighbors
5944 * in any way. We should never find mergeable extents
5945 * in this scenario. Check anyways and warn if we
5946 * encounter two extents that could be one.
5948 if (xfs_bmse_can_merge(&got
, &adj_irec
, offset_shift_fsb
))
5952 * Increment the extent index for the next iteration, update the start
5953 * offset of the in-core extent and update the btree if applicable.
5956 if (direction
== SHIFT_LEFT
)
5960 xfs_bmbt_set_startoff(gotp
, startoff
);
5961 *logflags
|= XFS_ILOG_CORE
;
5964 *logflags
|= XFS_ILOG_DEXT
;
5968 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
, got
.br_startblock
,
5969 got
.br_blockcount
, &i
);
5972 XFS_WANT_CORRUPTED_RETURN(mp
, i
== 1);
5974 got
.br_startoff
= startoff
;
5975 error
= xfs_bmbt_update(cur
, got
.br_startoff
, got
.br_startblock
,
5976 got
.br_blockcount
, got
.br_state
);
5981 /* update reverse mapping */
5982 error
= xfs_rmap_unmap_extent(mp
, dfops
, ip
, whichfork
, &adj_irec
);
5985 adj_irec
.br_startoff
= startoff
;
5986 return xfs_rmap_map_extent(mp
, dfops
, ip
, whichfork
, &adj_irec
);
5990 * Shift extent records to the left/right to cover/create a hole.
5992 * The maximum number of extents to be shifted in a single operation is
5993 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
5994 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
5995 * is the length by which each extent is shifted. If there is no hole to shift
5996 * the extents into, this will be considered invalid operation and we abort
6000 xfs_bmap_shift_extents(
6001 struct xfs_trans
*tp
,
6002 struct xfs_inode
*ip
,
6003 xfs_fileoff_t
*next_fsb
,
6004 xfs_fileoff_t offset_shift_fsb
,
6006 xfs_fileoff_t stop_fsb
,
6007 xfs_fsblock_t
*firstblock
,
6008 struct xfs_defer_ops
*dfops
,
6009 enum shift_direction direction
,
6012 struct xfs_btree_cur
*cur
= NULL
;
6013 struct xfs_bmbt_rec_host
*gotp
;
6014 struct xfs_bmbt_irec got
;
6015 struct xfs_mount
*mp
= ip
->i_mount
;
6016 struct xfs_ifork
*ifp
;
6017 xfs_extnum_t nexts
= 0;
6018 xfs_extnum_t current_ext
;
6019 xfs_extnum_t total_extents
;
6020 xfs_extnum_t stop_extent
;
6022 int whichfork
= XFS_DATA_FORK
;
6025 if (unlikely(XFS_TEST_ERROR(
6026 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
6027 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
6028 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
6029 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6030 XFS_ERRLEVEL_LOW
, mp
);
6031 return -EFSCORRUPTED
;
6034 if (XFS_FORCED_SHUTDOWN(mp
))
6037 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
6038 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
6039 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
6040 ASSERT(*next_fsb
!= NULLFSBLOCK
|| direction
== SHIFT_RIGHT
);
6042 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
6043 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
6044 /* Read in all the extents */
6045 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6050 if (ifp
->if_flags
& XFS_IFBROOT
) {
6051 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6052 cur
->bc_private
.b
.firstblock
= *firstblock
;
6053 cur
->bc_private
.b
.dfops
= dfops
;
6054 cur
->bc_private
.b
.flags
= 0;
6058 * There may be delalloc extents in the data fork before the range we
6059 * are collapsing out, so we cannot use the count of real extents here.
6060 * Instead we have to calculate it from the incore fork.
6062 total_extents
= xfs_iext_count(ifp
);
6063 if (total_extents
== 0) {
6069 * In case of first right shift, we need to initialize next_fsb
6071 if (*next_fsb
== NULLFSBLOCK
) {
6072 gotp
= xfs_iext_get_ext(ifp
, total_extents
- 1);
6073 xfs_bmbt_get_all(gotp
, &got
);
6074 *next_fsb
= got
.br_startoff
;
6075 if (stop_fsb
> *next_fsb
) {
6081 /* Lookup the extent index at which we have to stop */
6082 if (direction
== SHIFT_RIGHT
) {
6083 gotp
= xfs_iext_bno_to_ext(ifp
, stop_fsb
, &stop_extent
);
6084 /* Make stop_extent exclusive of shift range */
6087 stop_extent
= total_extents
;
6090 * Look up the extent index for the fsb where we start shifting. We can
6091 * henceforth iterate with current_ext as extent list changes are locked
6094 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6095 * *next_fsb lies in a hole beyond which there are no extents. Either
6098 gotp
= xfs_iext_bno_to_ext(ifp
, *next_fsb
, ¤t_ext
);
6104 /* some sanity checking before we finally start shifting extents */
6105 if ((direction
== SHIFT_LEFT
&& current_ext
>= stop_extent
) ||
6106 (direction
== SHIFT_RIGHT
&& current_ext
<= stop_extent
)) {
6111 while (nexts
++ < num_exts
) {
6112 error
= xfs_bmse_shift_one(ip
, whichfork
, offset_shift_fsb
,
6113 ¤t_ext
, gotp
, cur
, &logflags
,
6118 * If there was an extent merge during the shift, the extent
6119 * count can change. Update the total and grade the next record.
6121 if (direction
== SHIFT_LEFT
) {
6122 total_extents
= xfs_iext_count(ifp
);
6123 stop_extent
= total_extents
;
6126 if (current_ext
== stop_extent
) {
6128 *next_fsb
= NULLFSBLOCK
;
6131 gotp
= xfs_iext_get_ext(ifp
, current_ext
);
6135 xfs_bmbt_get_all(gotp
, &got
);
6136 *next_fsb
= got
.br_startoff
;
6141 xfs_btree_del_cursor(cur
,
6142 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
6145 xfs_trans_log_inode(tp
, ip
, logflags
);
6151 * Splits an extent into two extents at split_fsb block such that it is
6152 * the first block of the current_ext. @current_ext is a target extent
6153 * to be split. @split_fsb is a block where the extents is split.
6154 * If split_fsb lies in a hole or the first block of extents, just return 0.
6157 xfs_bmap_split_extent_at(
6158 struct xfs_trans
*tp
,
6159 struct xfs_inode
*ip
,
6160 xfs_fileoff_t split_fsb
,
6161 xfs_fsblock_t
*firstfsb
,
6162 struct xfs_defer_ops
*dfops
)
6164 int whichfork
= XFS_DATA_FORK
;
6165 struct xfs_btree_cur
*cur
= NULL
;
6166 struct xfs_bmbt_rec_host
*gotp
;
6167 struct xfs_bmbt_irec got
;
6168 struct xfs_bmbt_irec
new; /* split extent */
6169 struct xfs_mount
*mp
= ip
->i_mount
;
6170 struct xfs_ifork
*ifp
;
6171 xfs_fsblock_t gotblkcnt
; /* new block count for got */
6172 xfs_extnum_t current_ext
;
6177 if (unlikely(XFS_TEST_ERROR(
6178 (XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_EXTENTS
&&
6179 XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
),
6180 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
6181 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6182 XFS_ERRLEVEL_LOW
, mp
);
6183 return -EFSCORRUPTED
;
6186 if (XFS_FORCED_SHUTDOWN(mp
))
6189 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
6190 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
6191 /* Read in all the extents */
6192 error
= xfs_iread_extents(tp
, ip
, whichfork
);
6198 * gotp can be null in 2 cases: 1) if there are no extents
6199 * or 2) split_fsb lies in a hole beyond which there are
6200 * no extents. Either way, we are done.
6202 gotp
= xfs_iext_bno_to_ext(ifp
, split_fsb
, ¤t_ext
);
6206 xfs_bmbt_get_all(gotp
, &got
);
6209 * Check split_fsb lies in a hole or the start boundary offset
6212 if (got
.br_startoff
>= split_fsb
)
6215 gotblkcnt
= split_fsb
- got
.br_startoff
;
6216 new.br_startoff
= split_fsb
;
6217 new.br_startblock
= got
.br_startblock
+ gotblkcnt
;
6218 new.br_blockcount
= got
.br_blockcount
- gotblkcnt
;
6219 new.br_state
= got
.br_state
;
6221 if (ifp
->if_flags
& XFS_IFBROOT
) {
6222 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
6223 cur
->bc_private
.b
.firstblock
= *firstfsb
;
6224 cur
->bc_private
.b
.dfops
= dfops
;
6225 cur
->bc_private
.b
.flags
= 0;
6226 error
= xfs_bmbt_lookup_eq(cur
, got
.br_startoff
,
6232 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, del_cursor
);
6235 xfs_bmbt_set_blockcount(gotp
, gotblkcnt
);
6236 got
.br_blockcount
= gotblkcnt
;
6238 logflags
= XFS_ILOG_CORE
;
6240 error
= xfs_bmbt_update(cur
, got
.br_startoff
,
6247 logflags
|= XFS_ILOG_DEXT
;
6249 /* Add new extent */
6251 xfs_iext_insert(ip
, current_ext
, 1, &new, 0);
6252 XFS_IFORK_NEXT_SET(ip
, whichfork
,
6253 XFS_IFORK_NEXTENTS(ip
, whichfork
) + 1);
6256 error
= xfs_bmbt_lookup_eq(cur
, new.br_startoff
,
6257 new.br_startblock
, new.br_blockcount
,
6261 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 0, del_cursor
);
6262 cur
->bc_rec
.b
.br_state
= new.br_state
;
6264 error
= xfs_btree_insert(cur
, &i
);
6267 XFS_WANT_CORRUPTED_GOTO(mp
, i
== 1, del_cursor
);
6271 * Convert to a btree if necessary.
6273 if (xfs_bmap_needs_btree(ip
, whichfork
)) {
6274 int tmp_logflags
; /* partial log flag return val */
6276 ASSERT(cur
== NULL
);
6277 error
= xfs_bmap_extents_to_btree(tp
, ip
, firstfsb
, dfops
,
6278 &cur
, 0, &tmp_logflags
, whichfork
);
6279 logflags
|= tmp_logflags
;
6284 cur
->bc_private
.b
.allocated
= 0;
6285 xfs_btree_del_cursor(cur
,
6286 error
? XFS_BTREE_ERROR
: XFS_BTREE_NOERROR
);
6290 xfs_trans_log_inode(tp
, ip
, logflags
);
6295 xfs_bmap_split_extent(
6296 struct xfs_inode
*ip
,
6297 xfs_fileoff_t split_fsb
)
6299 struct xfs_mount
*mp
= ip
->i_mount
;
6300 struct xfs_trans
*tp
;
6301 struct xfs_defer_ops dfops
;
6302 xfs_fsblock_t firstfsb
;
6305 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
,
6306 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0, 0, &tp
);
6310 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
6311 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
6313 xfs_defer_init(&dfops
, &firstfsb
);
6315 error
= xfs_bmap_split_extent_at(tp
, ip
, split_fsb
,
6320 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
6324 return xfs_trans_commit(tp
);
6327 xfs_defer_cancel(&dfops
);
6328 xfs_trans_cancel(tp
);
6332 /* Deferred mapping is only for real extents in the data fork. */
6334 xfs_bmap_is_update_needed(
6335 struct xfs_bmbt_irec
*bmap
)
6337 return bmap
->br_startblock
!= HOLESTARTBLOCK
&&
6338 bmap
->br_startblock
!= DELAYSTARTBLOCK
;
6341 /* Record a bmap intent. */
6344 struct xfs_mount
*mp
,
6345 struct xfs_defer_ops
*dfops
,
6346 enum xfs_bmap_intent_type type
,
6347 struct xfs_inode
*ip
,
6349 struct xfs_bmbt_irec
*bmap
)
6352 struct xfs_bmap_intent
*bi
;
6354 trace_xfs_bmap_defer(mp
,
6355 XFS_FSB_TO_AGNO(mp
, bmap
->br_startblock
),
6357 XFS_FSB_TO_AGBNO(mp
, bmap
->br_startblock
),
6358 ip
->i_ino
, whichfork
,
6360 bmap
->br_blockcount
,
6363 bi
= kmem_alloc(sizeof(struct xfs_bmap_intent
), KM_SLEEP
| KM_NOFS
);
6364 INIT_LIST_HEAD(&bi
->bi_list
);
6367 bi
->bi_whichfork
= whichfork
;
6368 bi
->bi_bmap
= *bmap
;
6370 error
= xfs_defer_join(dfops
, bi
->bi_owner
);
6376 xfs_defer_add(dfops
, XFS_DEFER_OPS_TYPE_BMAP
, &bi
->bi_list
);
6380 /* Map an extent into a file. */
6382 xfs_bmap_map_extent(
6383 struct xfs_mount
*mp
,
6384 struct xfs_defer_ops
*dfops
,
6385 struct xfs_inode
*ip
,
6386 struct xfs_bmbt_irec
*PREV
)
6388 if (!xfs_bmap_is_update_needed(PREV
))
6391 return __xfs_bmap_add(mp
, dfops
, XFS_BMAP_MAP
, ip
,
6392 XFS_DATA_FORK
, PREV
);
6395 /* Unmap an extent out of a file. */
6397 xfs_bmap_unmap_extent(
6398 struct xfs_mount
*mp
,
6399 struct xfs_defer_ops
*dfops
,
6400 struct xfs_inode
*ip
,
6401 struct xfs_bmbt_irec
*PREV
)
6403 if (!xfs_bmap_is_update_needed(PREV
))
6406 return __xfs_bmap_add(mp
, dfops
, XFS_BMAP_UNMAP
, ip
,
6407 XFS_DATA_FORK
, PREV
);
6411 * Process one of the deferred bmap operations. We pass back the
6412 * btree cursor to maintain our lock on the bmapbt between calls.
6415 xfs_bmap_finish_one(
6416 struct xfs_trans
*tp
,
6417 struct xfs_defer_ops
*dfops
,
6418 struct xfs_inode
*ip
,
6419 enum xfs_bmap_intent_type type
,
6421 xfs_fileoff_t startoff
,
6422 xfs_fsblock_t startblock
,
6423 xfs_filblks_t blockcount
,
6426 struct xfs_bmbt_irec bmap
;
6428 xfs_fsblock_t firstfsb
;
6429 int flags
= XFS_BMAPI_REMAP
;
6433 bmap
.br_startblock
= startblock
;
6434 bmap
.br_startoff
= startoff
;
6435 bmap
.br_blockcount
= blockcount
;
6436 bmap
.br_state
= state
;
6438 trace_xfs_bmap_deferred(tp
->t_mountp
,
6439 XFS_FSB_TO_AGNO(tp
->t_mountp
, startblock
), type
,
6440 XFS_FSB_TO_AGBNO(tp
->t_mountp
, startblock
),
6441 ip
->i_ino
, whichfork
, startoff
, blockcount
, state
);
6443 if (whichfork
!= XFS_DATA_FORK
&& whichfork
!= XFS_ATTR_FORK
)
6444 return -EFSCORRUPTED
;
6445 if (whichfork
== XFS_ATTR_FORK
)
6446 flags
|= XFS_BMAPI_ATTRFORK
;
6448 if (XFS_TEST_ERROR(false, tp
->t_mountp
,
6449 XFS_ERRTAG_BMAP_FINISH_ONE
,
6450 XFS_RANDOM_BMAP_FINISH_ONE
))
6455 firstfsb
= bmap
.br_startblock
;
6456 error
= xfs_bmapi_write(tp
, ip
, bmap
.br_startoff
,
6457 bmap
.br_blockcount
, flags
, &firstfsb
,
6458 bmap
.br_blockcount
, &bmap
, &nimaps
,
6461 case XFS_BMAP_UNMAP
:
6462 error
= xfs_bunmapi(tp
, ip
, bmap
.br_startoff
,
6463 bmap
.br_blockcount
, flags
, 1, &firstfsb
,
6469 error
= -EFSCORRUPTED
;