]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it would be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | #include "libxfs_priv.h" | |
19 | #include "xfs_fs.h" | |
20 | #include "xfs_shared.h" | |
21 | #include "xfs_format.h" | |
22 | #include "xfs_log_format.h" | |
23 | #include "xfs_trans_resv.h" | |
24 | #include "xfs_bit.h" | |
25 | #include "xfs_sb.h" | |
26 | #include "xfs_mount.h" | |
27 | #include "xfs_defer.h" | |
28 | #include "xfs_da_format.h" | |
29 | #include "xfs_da_btree.h" | |
30 | #include "xfs_dir2.h" | |
31 | #include "xfs_inode.h" | |
32 | #include "xfs_btree.h" | |
33 | #include "xfs_trans.h" | |
34 | #include "xfs_alloc.h" | |
35 | #include "xfs_bmap.h" | |
36 | #include "xfs_bmap_btree.h" | |
37 | #include "xfs_trans_space.h" | |
38 | #include "xfs_trace.h" | |
39 | #include "xfs_attr_leaf.h" | |
40 | #include "xfs_quota_defs.h" | |
41 | #include "xfs_rmap.h" | |
42 | #include "xfs_ag_resv.h" | |
43 | #include "xfs_refcount.h" | |
44 | #include "xfs_rmap_btree.h" | |
45 | ||
46 | ||
47 | kmem_zone_t *xfs_bmap_free_item_zone; | |
48 | ||
49 | /* | |
50 | * Miscellaneous helper functions | |
51 | */ | |
52 | ||
53 | /* | |
54 | * Compute and fill in the value of the maximum depth of a bmap btree | |
55 | * in this filesystem. Done once, during mount. | |
56 | */ | |
57 | void | |
58 | xfs_bmap_compute_maxlevels( | |
59 | xfs_mount_t *mp, /* file system mount structure */ | |
60 | int whichfork) /* data or attr fork */ | |
61 | { | |
62 | int level; /* btree level */ | |
63 | uint maxblocks; /* max blocks at this level */ | |
64 | uint maxleafents; /* max leaf entries possible */ | |
65 | int maxrootrecs; /* max records in root block */ | |
66 | int minleafrecs; /* min records in leaf block */ | |
67 | int minnoderecs; /* min records in node block */ | |
68 | int sz; /* root block size */ | |
69 | ||
70 | /* | |
71 | * The maximum number of extents in a file, hence the maximum | |
72 | * number of leaf entries, is controlled by the type of di_nextents | |
73 | * (a signed 32-bit number, xfs_extnum_t), or by di_anextents | |
74 | * (a signed 16-bit number, xfs_aextnum_t). | |
75 | * | |
76 | * Note that we can no longer assume that if we are in ATTR1 that | |
77 | * the fork offset of all the inodes will be | |
78 | * (xfs_default_attroffset(ip) >> 3) because we could have mounted | |
79 | * with ATTR2 and then mounted back with ATTR1, keeping the | |
80 | * di_forkoff's fixed but probably at various positions. Therefore, | |
81 | * for both ATTR1 and ATTR2 we have to assume the worst case scenario | |
82 | * of a minimum size available. | |
83 | */ | |
84 | if (whichfork == XFS_DATA_FORK) { | |
85 | maxleafents = MAXEXTNUM; | |
86 | sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); | |
87 | } else { | |
88 | maxleafents = MAXAEXTNUM; | |
89 | sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); | |
90 | } | |
91 | maxrootrecs = xfs_bmdr_maxrecs(sz, 0); | |
92 | minleafrecs = mp->m_bmap_dmnr[0]; | |
93 | minnoderecs = mp->m_bmap_dmnr[1]; | |
94 | maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; | |
95 | for (level = 1; maxblocks > 1; level++) { | |
96 | if (maxblocks <= maxrootrecs) | |
97 | maxblocks = 1; | |
98 | else | |
99 | maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; | |
100 | } | |
101 | mp->m_bm_maxlevels[whichfork] = level; | |
102 | } | |
103 | ||
104 | STATIC int /* error */ | |
105 | xfs_bmbt_lookup_eq( | |
106 | struct xfs_btree_cur *cur, | |
107 | xfs_fileoff_t off, | |
108 | xfs_fsblock_t bno, | |
109 | xfs_filblks_t len, | |
110 | int *stat) /* success/failure */ | |
111 | { | |
112 | cur->bc_rec.b.br_startoff = off; | |
113 | cur->bc_rec.b.br_startblock = bno; | |
114 | cur->bc_rec.b.br_blockcount = len; | |
115 | return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); | |
116 | } | |
117 | ||
118 | STATIC int /* error */ | |
119 | xfs_bmbt_lookup_ge( | |
120 | struct xfs_btree_cur *cur, | |
121 | xfs_fileoff_t off, | |
122 | xfs_fsblock_t bno, | |
123 | xfs_filblks_t len, | |
124 | int *stat) /* success/failure */ | |
125 | { | |
126 | cur->bc_rec.b.br_startoff = off; | |
127 | cur->bc_rec.b.br_startblock = bno; | |
128 | cur->bc_rec.b.br_blockcount = len; | |
129 | return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); | |
130 | } | |
131 | ||
132 | /* | |
133 | * Check if the inode needs to be converted to btree format. | |
134 | */ | |
135 | static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) | |
136 | { | |
137 | return whichfork != XFS_COW_FORK && | |
138 | XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && | |
139 | XFS_IFORK_NEXTENTS(ip, whichfork) > | |
140 | XFS_IFORK_MAXEXT(ip, whichfork); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Check if the inode should be converted to extent format. | |
145 | */ | |
146 | static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) | |
147 | { | |
148 | return whichfork != XFS_COW_FORK && | |
149 | XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && | |
150 | XFS_IFORK_NEXTENTS(ip, whichfork) <= | |
151 | XFS_IFORK_MAXEXT(ip, whichfork); | |
152 | } | |
153 | ||
154 | /* | |
155 | * Update the record referred to by cur to the value given | |
156 | * by [off, bno, len, state]. | |
157 | * This either works (return 0) or gets an EFSCORRUPTED error. | |
158 | */ | |
159 | STATIC int | |
160 | xfs_bmbt_update( | |
161 | struct xfs_btree_cur *cur, | |
162 | xfs_fileoff_t off, | |
163 | xfs_fsblock_t bno, | |
164 | xfs_filblks_t len, | |
165 | xfs_exntst_t state) | |
166 | { | |
167 | union xfs_btree_rec rec; | |
168 | ||
169 | xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); | |
170 | return xfs_btree_update(cur, &rec); | |
171 | } | |
172 | ||
173 | /* | |
174 | * Compute the worst-case number of indirect blocks that will be used | |
175 | * for ip's delayed extent of length "len". | |
176 | */ | |
177 | STATIC xfs_filblks_t | |
178 | xfs_bmap_worst_indlen( | |
179 | xfs_inode_t *ip, /* incore inode pointer */ | |
180 | xfs_filblks_t len) /* delayed extent length */ | |
181 | { | |
182 | int level; /* btree level number */ | |
183 | int maxrecs; /* maximum record count at this level */ | |
184 | xfs_mount_t *mp; /* mount structure */ | |
185 | xfs_filblks_t rval; /* return value */ | |
186 | xfs_filblks_t orig_len; | |
187 | ||
188 | mp = ip->i_mount; | |
189 | ||
190 | /* Calculate the worst-case size of the bmbt. */ | |
191 | orig_len = len; | |
192 | maxrecs = mp->m_bmap_dmxr[0]; | |
193 | for (level = 0, rval = 0; | |
194 | level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); | |
195 | level++) { | |
196 | len += maxrecs - 1; | |
197 | do_div(len, maxrecs); | |
198 | rval += len; | |
199 | if (len == 1) { | |
200 | rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - | |
201 | level - 1; | |
202 | break; | |
203 | } | |
204 | if (level == 0) | |
205 | maxrecs = mp->m_bmap_dmxr[1]; | |
206 | } | |
207 | ||
208 | /* Calculate the worst-case size of the rmapbt. */ | |
209 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) | |
210 | rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) + | |
211 | mp->m_rmap_maxlevels; | |
212 | ||
213 | return rval; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Calculate the default attribute fork offset for newly created inodes. | |
218 | */ | |
219 | uint | |
220 | xfs_default_attroffset( | |
221 | struct xfs_inode *ip) | |
222 | { | |
223 | struct xfs_mount *mp = ip->i_mount; | |
224 | uint offset; | |
225 | ||
226 | if (mp->m_sb.sb_inodesize == 256) { | |
227 | offset = XFS_LITINO(mp, ip->i_d.di_version) - | |
228 | XFS_BMDR_SPACE_CALC(MINABTPTRS); | |
229 | } else { | |
230 | offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); | |
231 | } | |
232 | ||
233 | ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); | |
234 | return offset; | |
235 | } | |
236 | ||
237 | /* | |
238 | * Helper routine to reset inode di_forkoff field when switching | |
239 | * attribute fork from local to extent format - we reset it where | |
240 | * possible to make space available for inline data fork extents. | |
241 | */ | |
242 | STATIC void | |
243 | xfs_bmap_forkoff_reset( | |
244 | xfs_inode_t *ip, | |
245 | int whichfork) | |
246 | { | |
247 | if (whichfork == XFS_ATTR_FORK && | |
248 | ip->i_d.di_format != XFS_DINODE_FMT_DEV && | |
249 | ip->i_d.di_format != XFS_DINODE_FMT_UUID && | |
250 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { | |
251 | uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; | |
252 | ||
253 | if (dfl_forkoff > ip->i_d.di_forkoff) | |
254 | ip->i_d.di_forkoff = dfl_forkoff; | |
255 | } | |
256 | } | |
257 | ||
258 | #ifdef DEBUG | |
259 | STATIC struct xfs_buf * | |
260 | xfs_bmap_get_bp( | |
261 | struct xfs_btree_cur *cur, | |
262 | xfs_fsblock_t bno) | |
263 | { | |
264 | struct xfs_log_item_desc *lidp; | |
265 | int i; | |
266 | ||
267 | if (!cur) | |
268 | return NULL; | |
269 | ||
270 | for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { | |
271 | if (!cur->bc_bufs[i]) | |
272 | break; | |
273 | if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) | |
274 | return cur->bc_bufs[i]; | |
275 | } | |
276 | ||
277 | /* Chase down all the log items to see if the bp is there */ | |
278 | list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { | |
279 | struct xfs_buf_log_item *bip; | |
280 | bip = (struct xfs_buf_log_item *)lidp->lid_item; | |
281 | if (bip->bli_item.li_type == XFS_LI_BUF && | |
282 | XFS_BUF_ADDR(bip->bli_buf) == bno) | |
283 | return bip->bli_buf; | |
284 | } | |
285 | ||
286 | return NULL; | |
287 | } | |
288 | ||
289 | STATIC void | |
290 | xfs_check_block( | |
291 | struct xfs_btree_block *block, | |
292 | xfs_mount_t *mp, | |
293 | int root, | |
294 | short sz) | |
295 | { | |
296 | int i, j, dmxr; | |
297 | __be64 *pp, *thispa; /* pointer to block address */ | |
298 | xfs_bmbt_key_t *prevp, *keyp; | |
299 | ||
300 | ASSERT(be16_to_cpu(block->bb_level) > 0); | |
301 | ||
302 | prevp = NULL; | |
303 | for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { | |
304 | dmxr = mp->m_bmap_dmxr[0]; | |
305 | keyp = XFS_BMBT_KEY_ADDR(mp, block, i); | |
306 | ||
307 | if (prevp) { | |
308 | ASSERT(be64_to_cpu(prevp->br_startoff) < | |
309 | be64_to_cpu(keyp->br_startoff)); | |
310 | } | |
311 | prevp = keyp; | |
312 | ||
313 | /* | |
314 | * Compare the block numbers to see if there are dups. | |
315 | */ | |
316 | if (root) | |
317 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); | |
318 | else | |
319 | pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); | |
320 | ||
321 | for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { | |
322 | if (root) | |
323 | thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); | |
324 | else | |
325 | thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); | |
326 | if (*thispa == *pp) { | |
327 | xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", | |
328 | __func__, j, i, | |
329 | (unsigned long long)be64_to_cpu(*thispa)); | |
330 | panic("%s: ptrs are equal in node\n", | |
331 | __func__); | |
332 | } | |
333 | } | |
334 | } | |
335 | } | |
336 | ||
337 | /* | |
338 | * Check that the extents for the inode ip are in the right order in all | |
339 | * btree leaves. THis becomes prohibitively expensive for large extent count | |
340 | * files, so don't bother with inodes that have more than 10,000 extents in | |
341 | * them. The btree record ordering checks will still be done, so for such large | |
342 | * bmapbt constructs that is going to catch most corruptions. | |
343 | */ | |
344 | STATIC void | |
345 | xfs_bmap_check_leaf_extents( | |
346 | xfs_btree_cur_t *cur, /* btree cursor or null */ | |
347 | xfs_inode_t *ip, /* incore inode pointer */ | |
348 | int whichfork) /* data or attr fork */ | |
349 | { | |
350 | struct xfs_btree_block *block; /* current btree block */ | |
351 | xfs_fsblock_t bno; /* block # of "block" */ | |
352 | xfs_buf_t *bp; /* buffer for "block" */ | |
353 | int error; /* error return value */ | |
354 | xfs_extnum_t i=0, j; /* index into the extents list */ | |
355 | xfs_ifork_t *ifp; /* fork structure */ | |
356 | int level; /* btree level, for checking */ | |
357 | xfs_mount_t *mp; /* file system mount structure */ | |
358 | __be64 *pp; /* pointer to block address */ | |
359 | xfs_bmbt_rec_t *ep; /* pointer to current extent */ | |
360 | xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ | |
361 | xfs_bmbt_rec_t *nextp; /* pointer to next extent */ | |
362 | int bp_release = 0; | |
363 | ||
364 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { | |
365 | return; | |
366 | } | |
367 | ||
368 | /* skip large extent count inodes */ | |
369 | if (ip->i_d.di_nextents > 10000) | |
370 | return; | |
371 | ||
372 | bno = NULLFSBLOCK; | |
373 | mp = ip->i_mount; | |
374 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
375 | block = ifp->if_broot; | |
376 | /* | |
377 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | |
378 | */ | |
379 | level = be16_to_cpu(block->bb_level); | |
380 | ASSERT(level > 0); | |
381 | xfs_check_block(block, mp, 1, ifp->if_broot_bytes); | |
382 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | |
383 | bno = be64_to_cpu(*pp); | |
384 | ||
385 | ASSERT(bno != NULLFSBLOCK); | |
386 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); | |
387 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); | |
388 | ||
389 | /* | |
390 | * Go down the tree until leaf level is reached, following the first | |
391 | * pointer (leftmost) at each level. | |
392 | */ | |
393 | while (level-- > 0) { | |
394 | /* See if buf is in cur first */ | |
395 | bp_release = 0; | |
396 | bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); | |
397 | if (!bp) { | |
398 | bp_release = 1; | |
399 | error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, | |
400 | XFS_BMAP_BTREE_REF, | |
401 | &xfs_bmbt_buf_ops); | |
402 | if (error) | |
403 | goto error_norelse; | |
404 | } | |
405 | block = XFS_BUF_TO_BLOCK(bp); | |
406 | if (level == 0) | |
407 | break; | |
408 | ||
409 | /* | |
410 | * Check this block for basic sanity (increasing keys and | |
411 | * no duplicate blocks). | |
412 | */ | |
413 | ||
414 | xfs_check_block(block, mp, 0, 0); | |
415 | pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); | |
416 | bno = be64_to_cpu(*pp); | |
417 | XFS_WANT_CORRUPTED_GOTO(mp, | |
418 | XFS_FSB_SANITY_CHECK(mp, bno), error0); | |
419 | if (bp_release) { | |
420 | bp_release = 0; | |
421 | xfs_trans_brelse(NULL, bp); | |
422 | } | |
423 | } | |
424 | ||
425 | /* | |
426 | * Here with bp and block set to the leftmost leaf node in the tree. | |
427 | */ | |
428 | i = 0; | |
429 | ||
430 | /* | |
431 | * Loop over all leaf nodes checking that all extents are in the right order. | |
432 | */ | |
433 | for (;;) { | |
434 | xfs_fsblock_t nextbno; | |
435 | xfs_extnum_t num_recs; | |
436 | ||
437 | ||
438 | num_recs = xfs_btree_get_numrecs(block); | |
439 | ||
440 | /* | |
441 | * Read-ahead the next leaf block, if any. | |
442 | */ | |
443 | ||
444 | nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); | |
445 | ||
446 | /* | |
447 | * Check all the extents to make sure they are OK. | |
448 | * If we had a previous block, the last entry should | |
449 | * conform with the first entry in this one. | |
450 | */ | |
451 | ||
452 | ep = XFS_BMBT_REC_ADDR(mp, block, 1); | |
453 | if (i) { | |
454 | ASSERT(xfs_bmbt_disk_get_startoff(&last) + | |
455 | xfs_bmbt_disk_get_blockcount(&last) <= | |
456 | xfs_bmbt_disk_get_startoff(ep)); | |
457 | } | |
458 | for (j = 1; j < num_recs; j++) { | |
459 | nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); | |
460 | ASSERT(xfs_bmbt_disk_get_startoff(ep) + | |
461 | xfs_bmbt_disk_get_blockcount(ep) <= | |
462 | xfs_bmbt_disk_get_startoff(nextp)); | |
463 | ep = nextp; | |
464 | } | |
465 | ||
466 | last = *ep; | |
467 | i += num_recs; | |
468 | if (bp_release) { | |
469 | bp_release = 0; | |
470 | xfs_trans_brelse(NULL, bp); | |
471 | } | |
472 | bno = nextbno; | |
473 | /* | |
474 | * If we've reached the end, stop. | |
475 | */ | |
476 | if (bno == NULLFSBLOCK) | |
477 | break; | |
478 | ||
479 | bp_release = 0; | |
480 | bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); | |
481 | if (!bp) { | |
482 | bp_release = 1; | |
483 | error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, | |
484 | XFS_BMAP_BTREE_REF, | |
485 | &xfs_bmbt_buf_ops); | |
486 | if (error) | |
487 | goto error_norelse; | |
488 | } | |
489 | block = XFS_BUF_TO_BLOCK(bp); | |
490 | } | |
491 | ||
492 | return; | |
493 | ||
494 | error0: | |
495 | xfs_warn(mp, "%s: at error0", __func__); | |
496 | if (bp_release) | |
497 | xfs_trans_brelse(NULL, bp); | |
498 | error_norelse: | |
499 | xfs_warn(mp, "%s: BAD after btree leaves for %d extents", | |
500 | __func__, i); | |
501 | panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); | |
502 | return; | |
503 | } | |
504 | ||
505 | /* | |
506 | * Add bmap trace insert entries for all the contents of the extent records. | |
507 | */ | |
508 | void | |
509 | xfs_bmap_trace_exlist( | |
510 | xfs_inode_t *ip, /* incore inode pointer */ | |
511 | xfs_extnum_t cnt, /* count of entries in the list */ | |
512 | int whichfork, /* data or attr or cow fork */ | |
513 | unsigned long caller_ip) | |
514 | { | |
515 | xfs_extnum_t idx; /* extent record index */ | |
516 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
517 | int state = 0; | |
518 | ||
519 | if (whichfork == XFS_ATTR_FORK) | |
520 | state |= BMAP_ATTRFORK; | |
521 | else if (whichfork == XFS_COW_FORK) | |
522 | state |= BMAP_COWFORK; | |
523 | ||
524 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
525 | ASSERT(cnt == xfs_iext_count(ifp)); | |
526 | for (idx = 0; idx < cnt; idx++) | |
527 | trace_xfs_extlist(ip, idx, state, caller_ip); | |
528 | } | |
529 | ||
530 | /* | |
531 | * Validate that the bmbt_irecs being returned from bmapi are valid | |
532 | * given the caller's original parameters. Specifically check the | |
533 | * ranges of the returned irecs to ensure that they only extend beyond | |
534 | * the given parameters if the XFS_BMAPI_ENTIRE flag was set. | |
535 | */ | |
536 | STATIC void | |
537 | xfs_bmap_validate_ret( | |
538 | xfs_fileoff_t bno, | |
539 | xfs_filblks_t len, | |
540 | int flags, | |
541 | xfs_bmbt_irec_t *mval, | |
542 | int nmap, | |
543 | int ret_nmap) | |
544 | { | |
545 | int i; /* index to map values */ | |
546 | ||
547 | ASSERT(ret_nmap <= nmap); | |
548 | ||
549 | for (i = 0; i < ret_nmap; i++) { | |
550 | ASSERT(mval[i].br_blockcount > 0); | |
551 | if (!(flags & XFS_BMAPI_ENTIRE)) { | |
552 | ASSERT(mval[i].br_startoff >= bno); | |
553 | ASSERT(mval[i].br_blockcount <= len); | |
554 | ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= | |
555 | bno + len); | |
556 | } else { | |
557 | ASSERT(mval[i].br_startoff < bno + len); | |
558 | ASSERT(mval[i].br_startoff + mval[i].br_blockcount > | |
559 | bno); | |
560 | } | |
561 | ASSERT(i == 0 || | |
562 | mval[i - 1].br_startoff + mval[i - 1].br_blockcount == | |
563 | mval[i].br_startoff); | |
564 | ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && | |
565 | mval[i].br_startblock != HOLESTARTBLOCK); | |
566 | ASSERT(mval[i].br_state == XFS_EXT_NORM || | |
567 | mval[i].br_state == XFS_EXT_UNWRITTEN); | |
568 | } | |
569 | } | |
570 | ||
571 | #else | |
572 | #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) | |
573 | #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) | |
574 | #endif /* DEBUG */ | |
575 | ||
576 | /* | |
577 | * bmap free list manipulation functions | |
578 | */ | |
579 | ||
580 | /* | |
581 | * Add the extent to the list of extents to be free at transaction end. | |
582 | * The list is maintained sorted (by block number). | |
583 | */ | |
584 | void | |
585 | xfs_bmap_add_free( | |
586 | struct xfs_mount *mp, | |
587 | struct xfs_defer_ops *dfops, | |
588 | xfs_fsblock_t bno, | |
589 | xfs_filblks_t len, | |
590 | struct xfs_owner_info *oinfo) | |
591 | { | |
592 | struct xfs_extent_free_item *new; /* new element */ | |
593 | #ifdef DEBUG | |
594 | xfs_agnumber_t agno; | |
595 | xfs_agblock_t agbno; | |
596 | ||
597 | ASSERT(bno != NULLFSBLOCK); | |
598 | ASSERT(len > 0); | |
599 | ASSERT(len <= MAXEXTLEN); | |
600 | ASSERT(!isnullstartblock(bno)); | |
601 | agno = XFS_FSB_TO_AGNO(mp, bno); | |
602 | agbno = XFS_FSB_TO_AGBNO(mp, bno); | |
603 | ASSERT(agno < mp->m_sb.sb_agcount); | |
604 | ASSERT(agbno < mp->m_sb.sb_agblocks); | |
605 | ASSERT(len < mp->m_sb.sb_agblocks); | |
606 | ASSERT(agbno + len <= mp->m_sb.sb_agblocks); | |
607 | #endif | |
608 | ASSERT(xfs_bmap_free_item_zone != NULL); | |
609 | ||
610 | new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); | |
611 | new->xefi_startblock = bno; | |
612 | new->xefi_blockcount = (xfs_extlen_t)len; | |
613 | if (oinfo) | |
614 | new->xefi_oinfo = *oinfo; | |
615 | else | |
616 | xfs_rmap_skip_owner_update(&new->xefi_oinfo); | |
617 | trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0, | |
618 | XFS_FSB_TO_AGBNO(mp, bno), len); | |
619 | xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); | |
620 | } | |
621 | ||
622 | /* | |
623 | * Inode fork format manipulation functions | |
624 | */ | |
625 | ||
626 | /* | |
627 | * Transform a btree format file with only one leaf node, where the | |
628 | * extents list will fit in the inode, into an extents format file. | |
629 | * Since the file extents are already in-core, all we have to do is | |
630 | * give up the space for the btree root and pitch the leaf block. | |
631 | */ | |
632 | STATIC int /* error */ | |
633 | xfs_bmap_btree_to_extents( | |
634 | xfs_trans_t *tp, /* transaction pointer */ | |
635 | xfs_inode_t *ip, /* incore inode pointer */ | |
636 | xfs_btree_cur_t *cur, /* btree cursor */ | |
637 | int *logflagsp, /* inode logging flags */ | |
638 | int whichfork) /* data or attr fork */ | |
639 | { | |
640 | /* REFERENCED */ | |
641 | struct xfs_btree_block *cblock;/* child btree block */ | |
642 | xfs_fsblock_t cbno; /* child block number */ | |
643 | xfs_buf_t *cbp; /* child block's buffer */ | |
644 | int error; /* error return value */ | |
645 | xfs_ifork_t *ifp; /* inode fork data */ | |
646 | xfs_mount_t *mp; /* mount point structure */ | |
647 | __be64 *pp; /* ptr to block address */ | |
648 | struct xfs_btree_block *rblock;/* root btree block */ | |
649 | struct xfs_owner_info oinfo; | |
650 | ||
651 | mp = ip->i_mount; | |
652 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
653 | ASSERT(whichfork != XFS_COW_FORK); | |
654 | ASSERT(ifp->if_flags & XFS_IFEXTENTS); | |
655 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); | |
656 | rblock = ifp->if_broot; | |
657 | ASSERT(be16_to_cpu(rblock->bb_level) == 1); | |
658 | ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); | |
659 | ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); | |
660 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); | |
661 | cbno = be64_to_cpu(*pp); | |
662 | *logflagsp = 0; | |
663 | #ifdef DEBUG | |
664 | if ((error = xfs_btree_check_lptr(cur, cbno, 1))) | |
665 | return error; | |
666 | #endif | |
667 | error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, | |
668 | &xfs_bmbt_buf_ops); | |
669 | if (error) | |
670 | return error; | |
671 | cblock = XFS_BUF_TO_BLOCK(cbp); | |
672 | if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) | |
673 | return error; | |
674 | xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); | |
675 | xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo); | |
676 | ip->i_d.di_nblocks--; | |
677 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); | |
678 | xfs_trans_binval(tp, cbp); | |
679 | if (cur->bc_bufs[0] == cbp) | |
680 | cur->bc_bufs[0] = NULL; | |
681 | xfs_iroot_realloc(ip, -1, whichfork); | |
682 | ASSERT(ifp->if_broot == NULL); | |
683 | ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); | |
684 | XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); | |
685 | *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); | |
686 | return 0; | |
687 | } | |
688 | ||
689 | /* | |
690 | * Convert an extents-format file into a btree-format file. | |
691 | * The new file will have a root block (in the inode) and a single child block. | |
692 | */ | |
693 | STATIC int /* error */ | |
694 | xfs_bmap_extents_to_btree( | |
695 | xfs_trans_t *tp, /* transaction pointer */ | |
696 | xfs_inode_t *ip, /* incore inode pointer */ | |
697 | xfs_fsblock_t *firstblock, /* first-block-allocated */ | |
698 | struct xfs_defer_ops *dfops, /* blocks freed in xaction */ | |
699 | xfs_btree_cur_t **curp, /* cursor returned to caller */ | |
700 | int wasdel, /* converting a delayed alloc */ | |
701 | int *logflagsp, /* inode logging flags */ | |
702 | int whichfork) /* data or attr fork */ | |
703 | { | |
704 | struct xfs_btree_block *ablock; /* allocated (child) bt block */ | |
705 | xfs_buf_t *abp; /* buffer for ablock */ | |
706 | xfs_alloc_arg_t args; /* allocation arguments */ | |
707 | xfs_bmbt_rec_t *arp; /* child record pointer */ | |
708 | struct xfs_btree_block *block; /* btree root block */ | |
709 | xfs_btree_cur_t *cur; /* bmap btree cursor */ | |
710 | xfs_bmbt_rec_host_t *ep; /* extent record pointer */ | |
711 | int error; /* error return value */ | |
712 | xfs_extnum_t i, cnt; /* extent record index */ | |
713 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
714 | xfs_bmbt_key_t *kp; /* root block key pointer */ | |
715 | xfs_mount_t *mp; /* mount structure */ | |
716 | xfs_extnum_t nextents; /* number of file extents */ | |
717 | xfs_bmbt_ptr_t *pp; /* root block address pointer */ | |
718 | ||
719 | mp = ip->i_mount; | |
720 | ASSERT(whichfork != XFS_COW_FORK); | |
721 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
722 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); | |
723 | ||
724 | /* | |
725 | * Make space in the inode incore. | |
726 | */ | |
727 | xfs_iroot_realloc(ip, 1, whichfork); | |
728 | ifp->if_flags |= XFS_IFBROOT; | |
729 | ||
730 | /* | |
731 | * Fill in the root. | |
732 | */ | |
733 | block = ifp->if_broot; | |
734 | xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, | |
735 | XFS_BTNUM_BMAP, 1, 1, ip->i_ino, | |
736 | XFS_BTREE_LONG_PTRS); | |
737 | /* | |
738 | * Need a cursor. Can't allocate until bb_level is filled in. | |
739 | */ | |
740 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); | |
741 | cur->bc_private.b.firstblock = *firstblock; | |
742 | cur->bc_private.b.dfops = dfops; | |
743 | cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; | |
744 | /* | |
745 | * Convert to a btree with two levels, one record in root. | |
746 | */ | |
747 | XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); | |
748 | memset(&args, 0, sizeof(args)); | |
749 | args.tp = tp; | |
750 | args.mp = mp; | |
751 | xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); | |
752 | args.firstblock = *firstblock; | |
753 | if (*firstblock == NULLFSBLOCK) { | |
754 | args.type = XFS_ALLOCTYPE_START_BNO; | |
755 | args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); | |
756 | } else if (dfops->dop_low) { | |
757 | args.type = XFS_ALLOCTYPE_START_BNO; | |
758 | args.fsbno = *firstblock; | |
759 | } else { | |
760 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | |
761 | args.fsbno = *firstblock; | |
762 | } | |
763 | args.minlen = args.maxlen = args.prod = 1; | |
764 | args.wasdel = wasdel; | |
765 | *logflagsp = 0; | |
766 | if ((error = xfs_alloc_vextent(&args))) { | |
767 | xfs_iroot_realloc(ip, -1, whichfork); | |
768 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | |
769 | return error; | |
770 | } | |
771 | ||
772 | if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { | |
773 | xfs_iroot_realloc(ip, -1, whichfork); | |
774 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | |
775 | return -ENOSPC; | |
776 | } | |
777 | /* | |
778 | * Allocation can't fail, the space was reserved. | |
779 | */ | |
780 | ASSERT(*firstblock == NULLFSBLOCK || | |
781 | args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); | |
782 | *firstblock = cur->bc_private.b.firstblock = args.fsbno; | |
783 | cur->bc_private.b.allocated++; | |
784 | ip->i_d.di_nblocks++; | |
785 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); | |
786 | abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); | |
787 | /* | |
788 | * Fill in the child block. | |
789 | */ | |
790 | abp->b_ops = &xfs_bmbt_buf_ops; | |
791 | ablock = XFS_BUF_TO_BLOCK(abp); | |
792 | xfs_btree_init_block_int(mp, ablock, abp->b_bn, | |
793 | XFS_BTNUM_BMAP, 0, 0, ip->i_ino, | |
794 | XFS_BTREE_LONG_PTRS); | |
795 | ||
796 | arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); | |
797 | nextents = xfs_iext_count(ifp); | |
798 | for (cnt = i = 0; i < nextents; i++) { | |
799 | ep = xfs_iext_get_ext(ifp, i); | |
800 | if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { | |
801 | arp->l0 = cpu_to_be64(ep->l0); | |
802 | arp->l1 = cpu_to_be64(ep->l1); | |
803 | arp++; cnt++; | |
804 | } | |
805 | } | |
806 | ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); | |
807 | xfs_btree_set_numrecs(ablock, cnt); | |
808 | ||
809 | /* | |
810 | * Fill in the root key and pointer. | |
811 | */ | |
812 | kp = XFS_BMBT_KEY_ADDR(mp, block, 1); | |
813 | arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); | |
814 | kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); | |
815 | pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, | |
816 | be16_to_cpu(block->bb_level))); | |
817 | *pp = cpu_to_be64(args.fsbno); | |
818 | ||
819 | /* | |
820 | * Do all this logging at the end so that | |
821 | * the root is at the right level. | |
822 | */ | |
823 | xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); | |
824 | xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); | |
825 | ASSERT(*curp == NULL); | |
826 | *curp = cur; | |
827 | *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); | |
828 | return 0; | |
829 | } | |
830 | ||
831 | /* | |
832 | * Convert a local file to an extents file. | |
833 | * This code is out of bounds for data forks of regular files, | |
834 | * since the file data needs to get logged so things will stay consistent. | |
835 | * (The bmap-level manipulations are ok, though). | |
836 | */ | |
837 | void | |
838 | xfs_bmap_local_to_extents_empty( | |
839 | struct xfs_inode *ip, | |
840 | int whichfork) | |
841 | { | |
842 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
843 | ||
844 | ASSERT(whichfork != XFS_COW_FORK); | |
845 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); | |
846 | ASSERT(ifp->if_bytes == 0); | |
847 | ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); | |
848 | ||
849 | xfs_bmap_forkoff_reset(ip, whichfork); | |
850 | ifp->if_flags &= ~XFS_IFINLINE; | |
851 | ifp->if_flags |= XFS_IFEXTENTS; | |
852 | XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); | |
853 | } | |
854 | ||
855 | ||
856 | STATIC int /* error */ | |
857 | xfs_bmap_local_to_extents( | |
858 | xfs_trans_t *tp, /* transaction pointer */ | |
859 | xfs_inode_t *ip, /* incore inode pointer */ | |
860 | xfs_fsblock_t *firstblock, /* first block allocated in xaction */ | |
861 | xfs_extlen_t total, /* total blocks needed by transaction */ | |
862 | int *logflagsp, /* inode logging flags */ | |
863 | int whichfork, | |
864 | void (*init_fn)(struct xfs_trans *tp, | |
865 | struct xfs_buf *bp, | |
866 | struct xfs_inode *ip, | |
867 | struct xfs_ifork *ifp)) | |
868 | { | |
869 | int error = 0; | |
870 | int flags; /* logging flags returned */ | |
871 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
872 | xfs_alloc_arg_t args; /* allocation arguments */ | |
873 | xfs_buf_t *bp; /* buffer for extent block */ | |
874 | xfs_bmbt_rec_host_t *ep; /* extent record pointer */ | |
875 | ||
876 | /* | |
877 | * We don't want to deal with the case of keeping inode data inline yet. | |
878 | * So sending the data fork of a regular inode is invalid. | |
879 | */ | |
880 | ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); | |
881 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
882 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); | |
883 | ||
884 | if (!ifp->if_bytes) { | |
885 | xfs_bmap_local_to_extents_empty(ip, whichfork); | |
886 | flags = XFS_ILOG_CORE; | |
887 | goto done; | |
888 | } | |
889 | ||
890 | flags = 0; | |
891 | error = 0; | |
892 | ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == | |
893 | XFS_IFINLINE); | |
894 | memset(&args, 0, sizeof(args)); | |
895 | args.tp = tp; | |
896 | args.mp = ip->i_mount; | |
897 | xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); | |
898 | args.firstblock = *firstblock; | |
899 | /* | |
900 | * Allocate a block. We know we need only one, since the | |
901 | * file currently fits in an inode. | |
902 | */ | |
903 | if (*firstblock == NULLFSBLOCK) { | |
904 | args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); | |
905 | args.type = XFS_ALLOCTYPE_START_BNO; | |
906 | } else { | |
907 | args.fsbno = *firstblock; | |
908 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | |
909 | } | |
910 | args.total = total; | |
911 | args.minlen = args.maxlen = args.prod = 1; | |
912 | error = xfs_alloc_vextent(&args); | |
913 | if (error) | |
914 | goto done; | |
915 | ||
916 | /* Can't fail, the space was reserved. */ | |
917 | ASSERT(args.fsbno != NULLFSBLOCK); | |
918 | ASSERT(args.len == 1); | |
919 | *firstblock = args.fsbno; | |
920 | bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); | |
921 | ||
922 | /* | |
923 | * Initialize the block, copy the data and log the remote buffer. | |
924 | * | |
925 | * The callout is responsible for logging because the remote format | |
926 | * might differ from the local format and thus we don't know how much to | |
927 | * log here. Note that init_fn must also set the buffer log item type | |
928 | * correctly. | |
929 | */ | |
930 | init_fn(tp, bp, ip, ifp); | |
931 | ||
932 | /* account for the change in fork size */ | |
933 | xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); | |
934 | xfs_bmap_local_to_extents_empty(ip, whichfork); | |
935 | flags |= XFS_ILOG_CORE; | |
936 | ||
937 | xfs_iext_add(ifp, 0, 1); | |
938 | ep = xfs_iext_get_ext(ifp, 0); | |
939 | xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); | |
940 | trace_xfs_bmap_post_update(ip, 0, | |
941 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, | |
942 | _THIS_IP_); | |
943 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); | |
944 | ip->i_d.di_nblocks = 1; | |
945 | xfs_trans_mod_dquot_byino(tp, ip, | |
946 | XFS_TRANS_DQ_BCOUNT, 1L); | |
947 | flags |= xfs_ilog_fext(whichfork); | |
948 | ||
949 | done: | |
950 | *logflagsp = flags; | |
951 | return error; | |
952 | } | |
953 | ||
954 | /* | |
955 | * Called from xfs_bmap_add_attrfork to handle btree format files. | |
956 | */ | |
957 | STATIC int /* error */ | |
958 | xfs_bmap_add_attrfork_btree( | |
959 | xfs_trans_t *tp, /* transaction pointer */ | |
960 | xfs_inode_t *ip, /* incore inode pointer */ | |
961 | xfs_fsblock_t *firstblock, /* first block allocated */ | |
962 | struct xfs_defer_ops *dfops, /* blocks to free at commit */ | |
963 | int *flags) /* inode logging flags */ | |
964 | { | |
965 | xfs_btree_cur_t *cur; /* btree cursor */ | |
966 | int error; /* error return value */ | |
967 | xfs_mount_t *mp; /* file system mount struct */ | |
968 | int stat; /* newroot status */ | |
969 | ||
970 | mp = ip->i_mount; | |
971 | if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) | |
972 | *flags |= XFS_ILOG_DBROOT; | |
973 | else { | |
974 | cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); | |
975 | cur->bc_private.b.dfops = dfops; | |
976 | cur->bc_private.b.firstblock = *firstblock; | |
977 | if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) | |
978 | goto error0; | |
979 | /* must be at least one entry */ | |
980 | XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); | |
981 | if ((error = xfs_btree_new_iroot(cur, flags, &stat))) | |
982 | goto error0; | |
983 | if (stat == 0) { | |
984 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | |
985 | return -ENOSPC; | |
986 | } | |
987 | *firstblock = cur->bc_private.b.firstblock; | |
988 | cur->bc_private.b.allocated = 0; | |
989 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | |
990 | } | |
991 | return 0; | |
992 | error0: | |
993 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | |
994 | return error; | |
995 | } | |
996 | ||
997 | /* | |
998 | * Called from xfs_bmap_add_attrfork to handle extents format files. | |
999 | */ | |
1000 | STATIC int /* error */ | |
1001 | xfs_bmap_add_attrfork_extents( | |
1002 | xfs_trans_t *tp, /* transaction pointer */ | |
1003 | xfs_inode_t *ip, /* incore inode pointer */ | |
1004 | xfs_fsblock_t *firstblock, /* first block allocated */ | |
1005 | struct xfs_defer_ops *dfops, /* blocks to free at commit */ | |
1006 | int *flags) /* inode logging flags */ | |
1007 | { | |
1008 | xfs_btree_cur_t *cur; /* bmap btree cursor */ | |
1009 | int error; /* error return value */ | |
1010 | ||
1011 | if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) | |
1012 | return 0; | |
1013 | cur = NULL; | |
1014 | error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0, | |
1015 | flags, XFS_DATA_FORK); | |
1016 | if (cur) { | |
1017 | cur->bc_private.b.allocated = 0; | |
1018 | xfs_btree_del_cursor(cur, | |
1019 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
1020 | } | |
1021 | return error; | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Called from xfs_bmap_add_attrfork to handle local format files. Each | |
1026 | * different data fork content type needs a different callout to do the | |
1027 | * conversion. Some are basic and only require special block initialisation | |
1028 | * callouts for the data formating, others (directories) are so specialised they | |
1029 | * handle everything themselves. | |
1030 | * | |
1031 | * XXX (dgc): investigate whether directory conversion can use the generic | |
1032 | * formatting callout. It should be possible - it's just a very complex | |
1033 | * formatter. | |
1034 | */ | |
1035 | STATIC int /* error */ | |
1036 | xfs_bmap_add_attrfork_local( | |
1037 | xfs_trans_t *tp, /* transaction pointer */ | |
1038 | xfs_inode_t *ip, /* incore inode pointer */ | |
1039 | xfs_fsblock_t *firstblock, /* first block allocated */ | |
1040 | struct xfs_defer_ops *dfops, /* blocks to free at commit */ | |
1041 | int *flags) /* inode logging flags */ | |
1042 | { | |
1043 | xfs_da_args_t dargs; /* args for dir/attr code */ | |
1044 | ||
1045 | if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) | |
1046 | return 0; | |
1047 | ||
1048 | if (S_ISDIR(VFS_I(ip)->i_mode)) { | |
1049 | memset(&dargs, 0, sizeof(dargs)); | |
1050 | dargs.geo = ip->i_mount->m_dir_geo; | |
1051 | dargs.dp = ip; | |
1052 | dargs.firstblock = firstblock; | |
1053 | dargs.dfops = dfops; | |
1054 | dargs.total = dargs.geo->fsbcount; | |
1055 | dargs.whichfork = XFS_DATA_FORK; | |
1056 | dargs.trans = tp; | |
1057 | return xfs_dir2_sf_to_block(&dargs); | |
1058 | } | |
1059 | ||
1060 | if (S_ISLNK(VFS_I(ip)->i_mode)) | |
1061 | return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, | |
1062 | flags, XFS_DATA_FORK, | |
1063 | xfs_symlink_local_to_remote); | |
1064 | ||
1065 | /* should only be called for types that support local format data */ | |
1066 | ASSERT(0); | |
1067 | return -EFSCORRUPTED; | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * Convert inode from non-attributed to attributed. | |
1072 | * Must not be in a transaction, ip must not be locked. | |
1073 | */ | |
1074 | int /* error code */ | |
1075 | xfs_bmap_add_attrfork( | |
1076 | xfs_inode_t *ip, /* incore inode pointer */ | |
1077 | int size, /* space new attribute needs */ | |
1078 | int rsvd) /* xact may use reserved blks */ | |
1079 | { | |
1080 | xfs_fsblock_t firstblock; /* 1st block/ag allocated */ | |
1081 | struct xfs_defer_ops dfops; /* freed extent records */ | |
1082 | xfs_mount_t *mp; /* mount structure */ | |
1083 | xfs_trans_t *tp; /* transaction pointer */ | |
1084 | int blks; /* space reservation */ | |
1085 | int version = 1; /* superblock attr version */ | |
1086 | int logflags; /* logging flags */ | |
1087 | int error; /* error return value */ | |
1088 | ||
1089 | ASSERT(XFS_IFORK_Q(ip) == 0); | |
1090 | ||
1091 | mp = ip->i_mount; | |
1092 | ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); | |
1093 | ||
1094 | blks = XFS_ADDAFORK_SPACE_RES(mp); | |
1095 | ||
1096 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, | |
1097 | rsvd ? XFS_TRANS_RESERVE : 0, &tp); | |
1098 | if (error) | |
1099 | return error; | |
1100 | ||
1101 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
1102 | error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? | |
1103 | XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : | |
1104 | XFS_QMOPT_RES_REGBLKS); | |
1105 | if (error) | |
1106 | goto trans_cancel; | |
1107 | if (XFS_IFORK_Q(ip)) | |
1108 | goto trans_cancel; | |
1109 | if (ip->i_d.di_anextents != 0) { | |
1110 | error = -EFSCORRUPTED; | |
1111 | goto trans_cancel; | |
1112 | } | |
1113 | if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { | |
1114 | /* | |
1115 | * For inodes coming from pre-6.2 filesystems. | |
1116 | */ | |
1117 | ASSERT(ip->i_d.di_aformat == 0); | |
1118 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; | |
1119 | } | |
1120 | ||
1121 | xfs_trans_ijoin(tp, ip, 0); | |
1122 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
1123 | ||
1124 | switch (ip->i_d.di_format) { | |
1125 | case XFS_DINODE_FMT_DEV: | |
1126 | ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; | |
1127 | break; | |
1128 | case XFS_DINODE_FMT_UUID: | |
1129 | ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; | |
1130 | break; | |
1131 | case XFS_DINODE_FMT_LOCAL: | |
1132 | case XFS_DINODE_FMT_EXTENTS: | |
1133 | case XFS_DINODE_FMT_BTREE: | |
1134 | ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); | |
1135 | if (!ip->i_d.di_forkoff) | |
1136 | ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; | |
1137 | else if (mp->m_flags & XFS_MOUNT_ATTR2) | |
1138 | version = 2; | |
1139 | break; | |
1140 | default: | |
1141 | ASSERT(0); | |
1142 | error = -EINVAL; | |
1143 | goto trans_cancel; | |
1144 | } | |
1145 | ||
1146 | ASSERT(ip->i_afp == NULL); | |
1147 | ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); | |
1148 | ip->i_afp->if_flags = XFS_IFEXTENTS; | |
1149 | logflags = 0; | |
1150 | xfs_defer_init(&dfops, &firstblock); | |
1151 | switch (ip->i_d.di_format) { | |
1152 | case XFS_DINODE_FMT_LOCAL: | |
1153 | error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops, | |
1154 | &logflags); | |
1155 | break; | |
1156 | case XFS_DINODE_FMT_EXTENTS: | |
1157 | error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, | |
1158 | &dfops, &logflags); | |
1159 | break; | |
1160 | case XFS_DINODE_FMT_BTREE: | |
1161 | error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops, | |
1162 | &logflags); | |
1163 | break; | |
1164 | default: | |
1165 | error = 0; | |
1166 | break; | |
1167 | } | |
1168 | if (logflags) | |
1169 | xfs_trans_log_inode(tp, ip, logflags); | |
1170 | if (error) | |
1171 | goto bmap_cancel; | |
1172 | if (!xfs_sb_version_hasattr(&mp->m_sb) || | |
1173 | (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { | |
1174 | bool log_sb = false; | |
1175 | ||
1176 | spin_lock(&mp->m_sb_lock); | |
1177 | if (!xfs_sb_version_hasattr(&mp->m_sb)) { | |
1178 | xfs_sb_version_addattr(&mp->m_sb); | |
1179 | log_sb = true; | |
1180 | } | |
1181 | if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { | |
1182 | xfs_sb_version_addattr2(&mp->m_sb); | |
1183 | log_sb = true; | |
1184 | } | |
1185 | spin_unlock(&mp->m_sb_lock); | |
1186 | if (log_sb) | |
1187 | xfs_log_sb(tp); | |
1188 | } | |
1189 | ||
1190 | error = xfs_defer_finish(&tp, &dfops); | |
1191 | if (error) | |
1192 | goto bmap_cancel; | |
1193 | error = xfs_trans_commit(tp); | |
1194 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1195 | return error; | |
1196 | ||
1197 | bmap_cancel: | |
1198 | xfs_defer_cancel(&dfops); | |
1199 | trans_cancel: | |
1200 | xfs_trans_cancel(tp); | |
1201 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1202 | return error; | |
1203 | } | |
1204 | ||
1205 | /* | |
1206 | * Internal and external extent tree search functions. | |
1207 | */ | |
1208 | ||
1209 | /* | |
1210 | * Read in the extents to if_extents. | |
1211 | * All inode fields are set up by caller, we just traverse the btree | |
1212 | * and copy the records in. If the file system cannot contain unwritten | |
1213 | * extents, the records are checked for no "state" flags. | |
1214 | */ | |
1215 | int /* error */ | |
1216 | xfs_bmap_read_extents( | |
1217 | xfs_trans_t *tp, /* transaction pointer */ | |
1218 | xfs_inode_t *ip, /* incore inode */ | |
1219 | int whichfork) /* data or attr fork */ | |
1220 | { | |
1221 | struct xfs_btree_block *block; /* current btree block */ | |
1222 | xfs_fsblock_t bno; /* block # of "block" */ | |
1223 | xfs_buf_t *bp; /* buffer for "block" */ | |
1224 | int error; /* error return value */ | |
1225 | xfs_extnum_t i, j; /* index into the extents list */ | |
1226 | xfs_ifork_t *ifp; /* fork structure */ | |
1227 | int level; /* btree level, for checking */ | |
1228 | xfs_mount_t *mp; /* file system mount structure */ | |
1229 | __be64 *pp; /* pointer to block address */ | |
1230 | /* REFERENCED */ | |
1231 | xfs_extnum_t room; /* number of entries there's room for */ | |
1232 | ||
1233 | mp = ip->i_mount; | |
1234 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
1235 | block = ifp->if_broot; | |
1236 | /* | |
1237 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | |
1238 | */ | |
1239 | level = be16_to_cpu(block->bb_level); | |
1240 | ASSERT(level > 0); | |
1241 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | |
1242 | bno = be64_to_cpu(*pp); | |
1243 | ||
1244 | /* | |
1245 | * Go down the tree until leaf level is reached, following the first | |
1246 | * pointer (leftmost) at each level. | |
1247 | */ | |
1248 | while (level-- > 0) { | |
1249 | error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, | |
1250 | XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); | |
1251 | if (error) | |
1252 | return error; | |
1253 | block = XFS_BUF_TO_BLOCK(bp); | |
1254 | if (level == 0) | |
1255 | break; | |
1256 | pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); | |
1257 | bno = be64_to_cpu(*pp); | |
1258 | XFS_WANT_CORRUPTED_GOTO(mp, | |
1259 | XFS_FSB_SANITY_CHECK(mp, bno), error0); | |
1260 | xfs_trans_brelse(tp, bp); | |
1261 | } | |
1262 | /* | |
1263 | * Here with bp and block set to the leftmost leaf node in the tree. | |
1264 | */ | |
1265 | room = xfs_iext_count(ifp); | |
1266 | i = 0; | |
1267 | /* | |
1268 | * Loop over all leaf nodes. Copy information to the extent records. | |
1269 | */ | |
1270 | for (;;) { | |
1271 | xfs_bmbt_rec_t *frp; | |
1272 | xfs_fsblock_t nextbno; | |
1273 | xfs_extnum_t num_recs; | |
1274 | ||
1275 | num_recs = xfs_btree_get_numrecs(block); | |
1276 | if (unlikely(i + num_recs > room)) { | |
1277 | ASSERT(i + num_recs <= room); | |
1278 | xfs_warn(ip->i_mount, | |
1279 | "corrupt dinode %Lu, (btree extents).", | |
1280 | (unsigned long long) ip->i_ino); | |
1281 | XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", | |
1282 | XFS_ERRLEVEL_LOW, ip->i_mount, block); | |
1283 | goto error0; | |
1284 | } | |
1285 | /* | |
1286 | * Read-ahead the next leaf block, if any. | |
1287 | */ | |
1288 | nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); | |
1289 | if (nextbno != NULLFSBLOCK) | |
1290 | xfs_btree_reada_bufl(mp, nextbno, 1, | |
1291 | &xfs_bmbt_buf_ops); | |
1292 | /* | |
1293 | * Copy records into the extent records. | |
1294 | */ | |
1295 | frp = XFS_BMBT_REC_ADDR(mp, block, 1); | |
1296 | for (j = 0; j < num_recs; j++, i++, frp++) { | |
1297 | xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); | |
1298 | trp->l0 = be64_to_cpu(frp->l0); | |
1299 | trp->l1 = be64_to_cpu(frp->l1); | |
1300 | if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) { | |
1301 | XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", | |
1302 | XFS_ERRLEVEL_LOW, mp); | |
1303 | goto error0; | |
1304 | } | |
1305 | } | |
1306 | xfs_trans_brelse(tp, bp); | |
1307 | bno = nextbno; | |
1308 | /* | |
1309 | * If we've reached the end, stop. | |
1310 | */ | |
1311 | if (bno == NULLFSBLOCK) | |
1312 | break; | |
1313 | error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, | |
1314 | XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); | |
1315 | if (error) | |
1316 | return error; | |
1317 | block = XFS_BUF_TO_BLOCK(bp); | |
1318 | } | |
1319 | if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) | |
1320 | return -EFSCORRUPTED; | |
1321 | ASSERT(i == xfs_iext_count(ifp)); | |
1322 | XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); | |
1323 | return 0; | |
1324 | error0: | |
1325 | xfs_trans_brelse(tp, bp); | |
1326 | return -EFSCORRUPTED; | |
1327 | } | |
1328 | ||
1329 | /* | |
1330 | * Returns the file-relative block number of the first unused block(s) | |
1331 | * in the file with at least "len" logically contiguous blocks free. | |
1332 | * This is the lowest-address hole if the file has holes, else the first block | |
1333 | * past the end of file. | |
1334 | * Return 0 if the file is currently local (in-inode). | |
1335 | */ | |
1336 | int /* error */ | |
1337 | xfs_bmap_first_unused( | |
1338 | xfs_trans_t *tp, /* transaction pointer */ | |
1339 | xfs_inode_t *ip, /* incore inode */ | |
1340 | xfs_extlen_t len, /* size of hole to find */ | |
1341 | xfs_fileoff_t *first_unused, /* unused block */ | |
1342 | int whichfork) /* data or attr fork */ | |
1343 | { | |
1344 | int error; /* error return value */ | |
1345 | int idx; /* extent record index */ | |
1346 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
1347 | xfs_fileoff_t lastaddr; /* last block number seen */ | |
1348 | xfs_fileoff_t lowest; /* lowest useful block */ | |
1349 | xfs_fileoff_t max; /* starting useful block */ | |
1350 | xfs_fileoff_t off; /* offset for this block */ | |
1351 | xfs_extnum_t nextents; /* number of extent entries */ | |
1352 | ||
1353 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || | |
1354 | XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || | |
1355 | XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); | |
1356 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { | |
1357 | *first_unused = 0; | |
1358 | return 0; | |
1359 | } | |
1360 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
1361 | if (!(ifp->if_flags & XFS_IFEXTENTS) && | |
1362 | (error = xfs_iread_extents(tp, ip, whichfork))) | |
1363 | return error; | |
1364 | lowest = *first_unused; | |
1365 | nextents = xfs_iext_count(ifp); | |
1366 | for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { | |
1367 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); | |
1368 | off = xfs_bmbt_get_startoff(ep); | |
1369 | /* | |
1370 | * See if the hole before this extent will work. | |
1371 | */ | |
1372 | if (off >= lowest + len && off - max >= len) { | |
1373 | *first_unused = max; | |
1374 | return 0; | |
1375 | } | |
1376 | lastaddr = off + xfs_bmbt_get_blockcount(ep); | |
1377 | max = XFS_FILEOFF_MAX(lastaddr, lowest); | |
1378 | } | |
1379 | *first_unused = max; | |
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | /* | |
1384 | * Returns the file-relative block number of the last block - 1 before | |
1385 | * last_block (input value) in the file. | |
1386 | * This is not based on i_size, it is based on the extent records. | |
1387 | * Returns 0 for local files, as they do not have extent records. | |
1388 | */ | |
1389 | int /* error */ | |
1390 | xfs_bmap_last_before( | |
1391 | struct xfs_trans *tp, /* transaction pointer */ | |
1392 | struct xfs_inode *ip, /* incore inode */ | |
1393 | xfs_fileoff_t *last_block, /* last block */ | |
1394 | int whichfork) /* data or attr fork */ | |
1395 | { | |
1396 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
1397 | struct xfs_bmbt_irec got; | |
1398 | xfs_extnum_t idx; | |
1399 | int error; | |
1400 | ||
1401 | switch (XFS_IFORK_FORMAT(ip, whichfork)) { | |
1402 | case XFS_DINODE_FMT_LOCAL: | |
1403 | *last_block = 0; | |
1404 | return 0; | |
1405 | case XFS_DINODE_FMT_BTREE: | |
1406 | case XFS_DINODE_FMT_EXTENTS: | |
1407 | break; | |
1408 | default: | |
1409 | return -EIO; | |
1410 | } | |
1411 | ||
1412 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
1413 | error = xfs_iread_extents(tp, ip, whichfork); | |
1414 | if (error) | |
1415 | return error; | |
1416 | } | |
1417 | ||
1418 | if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) { | |
1419 | if (got.br_startoff <= *last_block - 1) | |
1420 | return 0; | |
1421 | } | |
1422 | ||
1423 | if (xfs_iext_get_extent(ifp, idx - 1, &got)) { | |
1424 | *last_block = got.br_startoff + got.br_blockcount; | |
1425 | return 0; | |
1426 | } | |
1427 | ||
1428 | *last_block = 0; | |
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | int | |
1433 | xfs_bmap_last_extent( | |
1434 | struct xfs_trans *tp, | |
1435 | struct xfs_inode *ip, | |
1436 | int whichfork, | |
1437 | struct xfs_bmbt_irec *rec, | |
1438 | int *is_empty) | |
1439 | { | |
1440 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
1441 | int error; | |
1442 | int nextents; | |
1443 | ||
1444 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
1445 | error = xfs_iread_extents(tp, ip, whichfork); | |
1446 | if (error) | |
1447 | return error; | |
1448 | } | |
1449 | ||
1450 | nextents = xfs_iext_count(ifp); | |
1451 | if (nextents == 0) { | |
1452 | *is_empty = 1; | |
1453 | return 0; | |
1454 | } | |
1455 | ||
1456 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); | |
1457 | *is_empty = 0; | |
1458 | return 0; | |
1459 | } | |
1460 | ||
1461 | /* | |
1462 | * Check the last inode extent to determine whether this allocation will result | |
1463 | * in blocks being allocated at the end of the file. When we allocate new data | |
1464 | * blocks at the end of the file which do not start at the previous data block, | |
1465 | * we will try to align the new blocks at stripe unit boundaries. | |
1466 | * | |
1467 | * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be | |
1468 | * at, or past the EOF. | |
1469 | */ | |
1470 | STATIC int | |
1471 | xfs_bmap_isaeof( | |
1472 | struct xfs_bmalloca *bma, | |
1473 | int whichfork) | |
1474 | { | |
1475 | struct xfs_bmbt_irec rec; | |
1476 | int is_empty; | |
1477 | int error; | |
1478 | ||
1479 | bma->aeof = 0; | |
1480 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | |
1481 | &is_empty); | |
1482 | if (error) | |
1483 | return error; | |
1484 | ||
1485 | if (is_empty) { | |
1486 | bma->aeof = 1; | |
1487 | return 0; | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | * Check if we are allocation or past the last extent, or at least into | |
1492 | * the last delayed allocated extent. | |
1493 | */ | |
1494 | bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || | |
1495 | (bma->offset >= rec.br_startoff && | |
1496 | isnullstartblock(rec.br_startblock)); | |
1497 | return 0; | |
1498 | } | |
1499 | ||
1500 | /* | |
1501 | * Returns the file-relative block number of the first block past eof in | |
1502 | * the file. This is not based on i_size, it is based on the extent records. | |
1503 | * Returns 0 for local files, as they do not have extent records. | |
1504 | */ | |
1505 | int | |
1506 | xfs_bmap_last_offset( | |
1507 | struct xfs_inode *ip, | |
1508 | xfs_fileoff_t *last_block, | |
1509 | int whichfork) | |
1510 | { | |
1511 | struct xfs_bmbt_irec rec; | |
1512 | int is_empty; | |
1513 | int error; | |
1514 | ||
1515 | *last_block = 0; | |
1516 | ||
1517 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) | |
1518 | return 0; | |
1519 | ||
1520 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && | |
1521 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | |
1522 | return -EIO; | |
1523 | ||
1524 | error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); | |
1525 | if (error || is_empty) | |
1526 | return error; | |
1527 | ||
1528 | *last_block = rec.br_startoff + rec.br_blockcount; | |
1529 | return 0; | |
1530 | } | |
1531 | ||
1532 | /* | |
1533 | * Returns whether the selected fork of the inode has exactly one | |
1534 | * block or not. For the data fork we check this matches di_size, | |
1535 | * implying the file's range is 0..bsize-1. | |
1536 | */ | |
1537 | int /* 1=>1 block, 0=>otherwise */ | |
1538 | xfs_bmap_one_block( | |
1539 | xfs_inode_t *ip, /* incore inode */ | |
1540 | int whichfork) /* data or attr fork */ | |
1541 | { | |
1542 | xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ | |
1543 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
1544 | int rval; /* return value */ | |
1545 | xfs_bmbt_irec_t s; /* internal version of extent */ | |
1546 | ||
1547 | #ifndef DEBUG | |
1548 | if (whichfork == XFS_DATA_FORK) | |
1549 | return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; | |
1550 | #endif /* !DEBUG */ | |
1551 | if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) | |
1552 | return 0; | |
1553 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | |
1554 | return 0; | |
1555 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
1556 | ASSERT(ifp->if_flags & XFS_IFEXTENTS); | |
1557 | ep = xfs_iext_get_ext(ifp, 0); | |
1558 | xfs_bmbt_get_all(ep, &s); | |
1559 | rval = s.br_startoff == 0 && s.br_blockcount == 1; | |
1560 | if (rval && whichfork == XFS_DATA_FORK) | |
1561 | ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); | |
1562 | return rval; | |
1563 | } | |
1564 | ||
1565 | /* | |
1566 | * Extent tree manipulation functions used during allocation. | |
1567 | */ | |
1568 | ||
1569 | /* | |
1570 | * Convert a delayed allocation to a real allocation. | |
1571 | */ | |
1572 | STATIC int /* error */ | |
1573 | xfs_bmap_add_extent_delay_real( | |
1574 | struct xfs_bmalloca *bma, | |
1575 | int whichfork) | |
1576 | { | |
1577 | struct xfs_bmbt_irec *new = &bma->got; | |
1578 | int diff; /* temp value */ | |
1579 | xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ | |
1580 | int error; /* error return value */ | |
1581 | int i; /* temp state */ | |
1582 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
1583 | xfs_fileoff_t new_endoff; /* end offset of new entry */ | |
1584 | xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ | |
1585 | /* left is 0, right is 1, prev is 2 */ | |
1586 | int rval=0; /* return value (logging flags) */ | |
1587 | int state = 0;/* state bits, accessed thru macros */ | |
1588 | xfs_filblks_t da_new; /* new count del alloc blocks used */ | |
1589 | xfs_filblks_t da_old; /* old count del alloc blocks used */ | |
1590 | xfs_filblks_t temp=0; /* value for da_new calculations */ | |
1591 | xfs_filblks_t temp2=0;/* value for da_new calculations */ | |
1592 | int tmp_rval; /* partial logging flags */ | |
1593 | struct xfs_mount *mp; | |
1594 | xfs_extnum_t *nextents; | |
1595 | ||
1596 | mp = bma->ip->i_mount; | |
1597 | ifp = XFS_IFORK_PTR(bma->ip, whichfork); | |
1598 | ASSERT(whichfork != XFS_ATTR_FORK); | |
1599 | nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : | |
1600 | &bma->ip->i_d.di_nextents); | |
1601 | ||
1602 | ASSERT(bma->idx >= 0); | |
1603 | ASSERT(bma->idx <= xfs_iext_count(ifp)); | |
1604 | ASSERT(!isnullstartblock(new->br_startblock)); | |
1605 | ASSERT(!bma->cur || | |
1606 | (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); | |
1607 | ||
1608 | XFS_STATS_INC(mp, xs_add_exlist); | |
1609 | ||
1610 | #define LEFT r[0] | |
1611 | #define RIGHT r[1] | |
1612 | #define PREV r[2] | |
1613 | ||
1614 | if (whichfork == XFS_COW_FORK) | |
1615 | state |= BMAP_COWFORK; | |
1616 | ||
1617 | /* | |
1618 | * Set up a bunch of variables to make the tests simpler. | |
1619 | */ | |
1620 | ep = xfs_iext_get_ext(ifp, bma->idx); | |
1621 | xfs_bmbt_get_all(ep, &PREV); | |
1622 | new_endoff = new->br_startoff + new->br_blockcount; | |
1623 | ASSERT(PREV.br_startoff <= new->br_startoff); | |
1624 | ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); | |
1625 | ||
1626 | da_old = startblockval(PREV.br_startblock); | |
1627 | da_new = 0; | |
1628 | ||
1629 | /* | |
1630 | * Set flags determining what part of the previous delayed allocation | |
1631 | * extent is being replaced by a real allocation. | |
1632 | */ | |
1633 | if (PREV.br_startoff == new->br_startoff) | |
1634 | state |= BMAP_LEFT_FILLING; | |
1635 | if (PREV.br_startoff + PREV.br_blockcount == new_endoff) | |
1636 | state |= BMAP_RIGHT_FILLING; | |
1637 | ||
1638 | /* | |
1639 | * Check and set flags if this segment has a left neighbor. | |
1640 | * Don't set contiguous if the combined extent would be too large. | |
1641 | */ | |
1642 | if (bma->idx > 0) { | |
1643 | state |= BMAP_LEFT_VALID; | |
1644 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); | |
1645 | ||
1646 | if (isnullstartblock(LEFT.br_startblock)) | |
1647 | state |= BMAP_LEFT_DELAY; | |
1648 | } | |
1649 | ||
1650 | if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && | |
1651 | LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && | |
1652 | LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && | |
1653 | LEFT.br_state == new->br_state && | |
1654 | LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) | |
1655 | state |= BMAP_LEFT_CONTIG; | |
1656 | ||
1657 | /* | |
1658 | * Check and set flags if this segment has a right neighbor. | |
1659 | * Don't set contiguous if the combined extent would be too large. | |
1660 | * Also check for all-three-contiguous being too large. | |
1661 | */ | |
1662 | if (bma->idx < xfs_iext_count(ifp) - 1) { | |
1663 | state |= BMAP_RIGHT_VALID; | |
1664 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); | |
1665 | ||
1666 | if (isnullstartblock(RIGHT.br_startblock)) | |
1667 | state |= BMAP_RIGHT_DELAY; | |
1668 | } | |
1669 | ||
1670 | if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && | |
1671 | new_endoff == RIGHT.br_startoff && | |
1672 | new->br_startblock + new->br_blockcount == RIGHT.br_startblock && | |
1673 | new->br_state == RIGHT.br_state && | |
1674 | new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && | |
1675 | ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | | |
1676 | BMAP_RIGHT_FILLING)) != | |
1677 | (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | | |
1678 | BMAP_RIGHT_FILLING) || | |
1679 | LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount | |
1680 | <= MAXEXTLEN)) | |
1681 | state |= BMAP_RIGHT_CONTIG; | |
1682 | ||
1683 | error = 0; | |
1684 | /* | |
1685 | * Switch out based on the FILLING and CONTIG state bits. | |
1686 | */ | |
1687 | switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | | |
1688 | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { | |
1689 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | | |
1690 | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
1691 | /* | |
1692 | * Filling in all of a previously delayed allocation extent. | |
1693 | * The left and right neighbors are both contiguous with new. | |
1694 | */ | |
1695 | bma->idx--; | |
1696 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1697 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), | |
1698 | LEFT.br_blockcount + PREV.br_blockcount + | |
1699 | RIGHT.br_blockcount); | |
1700 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1701 | ||
1702 | xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); | |
1703 | (*nextents)--; | |
1704 | if (bma->cur == NULL) | |
1705 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
1706 | else { | |
1707 | rval = XFS_ILOG_CORE; | |
1708 | error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, | |
1709 | RIGHT.br_startblock, | |
1710 | RIGHT.br_blockcount, &i); | |
1711 | if (error) | |
1712 | goto done; | |
1713 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1714 | error = xfs_btree_delete(bma->cur, &i); | |
1715 | if (error) | |
1716 | goto done; | |
1717 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1718 | error = xfs_btree_decrement(bma->cur, 0, &i); | |
1719 | if (error) | |
1720 | goto done; | |
1721 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1722 | error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, | |
1723 | LEFT.br_startblock, | |
1724 | LEFT.br_blockcount + | |
1725 | PREV.br_blockcount + | |
1726 | RIGHT.br_blockcount, LEFT.br_state); | |
1727 | if (error) | |
1728 | goto done; | |
1729 | } | |
1730 | break; | |
1731 | ||
1732 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | |
1733 | /* | |
1734 | * Filling in all of a previously delayed allocation extent. | |
1735 | * The left neighbor is contiguous, the right is not. | |
1736 | */ | |
1737 | bma->idx--; | |
1738 | ||
1739 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1740 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), | |
1741 | LEFT.br_blockcount + PREV.br_blockcount); | |
1742 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1743 | ||
1744 | xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); | |
1745 | if (bma->cur == NULL) | |
1746 | rval = XFS_ILOG_DEXT; | |
1747 | else { | |
1748 | rval = 0; | |
1749 | error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, | |
1750 | LEFT.br_startblock, LEFT.br_blockcount, | |
1751 | &i); | |
1752 | if (error) | |
1753 | goto done; | |
1754 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1755 | error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, | |
1756 | LEFT.br_startblock, | |
1757 | LEFT.br_blockcount + | |
1758 | PREV.br_blockcount, LEFT.br_state); | |
1759 | if (error) | |
1760 | goto done; | |
1761 | } | |
1762 | break; | |
1763 | ||
1764 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
1765 | /* | |
1766 | * Filling in all of a previously delayed allocation extent. | |
1767 | * The right neighbor is contiguous, the left is not. | |
1768 | */ | |
1769 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1770 | xfs_bmbt_set_startblock(ep, new->br_startblock); | |
1771 | xfs_bmbt_set_blockcount(ep, | |
1772 | PREV.br_blockcount + RIGHT.br_blockcount); | |
1773 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1774 | ||
1775 | xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); | |
1776 | if (bma->cur == NULL) | |
1777 | rval = XFS_ILOG_DEXT; | |
1778 | else { | |
1779 | rval = 0; | |
1780 | error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, | |
1781 | RIGHT.br_startblock, | |
1782 | RIGHT.br_blockcount, &i); | |
1783 | if (error) | |
1784 | goto done; | |
1785 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1786 | error = xfs_bmbt_update(bma->cur, PREV.br_startoff, | |
1787 | new->br_startblock, | |
1788 | PREV.br_blockcount + | |
1789 | RIGHT.br_blockcount, PREV.br_state); | |
1790 | if (error) | |
1791 | goto done; | |
1792 | } | |
1793 | break; | |
1794 | ||
1795 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: | |
1796 | /* | |
1797 | * Filling in all of a previously delayed allocation extent. | |
1798 | * Neither the left nor right neighbors are contiguous with | |
1799 | * the new one. | |
1800 | */ | |
1801 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1802 | xfs_bmbt_set_startblock(ep, new->br_startblock); | |
1803 | xfs_bmbt_set_state(ep, new->br_state); | |
1804 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1805 | ||
1806 | (*nextents)++; | |
1807 | if (bma->cur == NULL) | |
1808 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
1809 | else { | |
1810 | rval = XFS_ILOG_CORE; | |
1811 | error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, | |
1812 | new->br_startblock, new->br_blockcount, | |
1813 | &i); | |
1814 | if (error) | |
1815 | goto done; | |
1816 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
1817 | bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; | |
1818 | error = xfs_btree_insert(bma->cur, &i); | |
1819 | if (error) | |
1820 | goto done; | |
1821 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1822 | } | |
1823 | break; | |
1824 | ||
1825 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: | |
1826 | /* | |
1827 | * Filling in the first part of a previous delayed allocation. | |
1828 | * The left neighbor is contiguous. | |
1829 | */ | |
1830 | trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); | |
1831 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), | |
1832 | LEFT.br_blockcount + new->br_blockcount); | |
1833 | xfs_bmbt_set_startoff(ep, | |
1834 | PREV.br_startoff + new->br_blockcount); | |
1835 | trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); | |
1836 | ||
1837 | temp = PREV.br_blockcount - new->br_blockcount; | |
1838 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1839 | xfs_bmbt_set_blockcount(ep, temp); | |
1840 | if (bma->cur == NULL) | |
1841 | rval = XFS_ILOG_DEXT; | |
1842 | else { | |
1843 | rval = 0; | |
1844 | error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, | |
1845 | LEFT.br_startblock, LEFT.br_blockcount, | |
1846 | &i); | |
1847 | if (error) | |
1848 | goto done; | |
1849 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1850 | error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, | |
1851 | LEFT.br_startblock, | |
1852 | LEFT.br_blockcount + | |
1853 | new->br_blockcount, | |
1854 | LEFT.br_state); | |
1855 | if (error) | |
1856 | goto done; | |
1857 | } | |
1858 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), | |
1859 | startblockval(PREV.br_startblock)); | |
1860 | xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); | |
1861 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1862 | ||
1863 | bma->idx--; | |
1864 | break; | |
1865 | ||
1866 | case BMAP_LEFT_FILLING: | |
1867 | /* | |
1868 | * Filling in the first part of a previous delayed allocation. | |
1869 | * The left neighbor is not contiguous. | |
1870 | */ | |
1871 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1872 | xfs_bmbt_set_startoff(ep, new_endoff); | |
1873 | temp = PREV.br_blockcount - new->br_blockcount; | |
1874 | xfs_bmbt_set_blockcount(ep, temp); | |
1875 | xfs_iext_insert(bma->ip, bma->idx, 1, new, state); | |
1876 | (*nextents)++; | |
1877 | if (bma->cur == NULL) | |
1878 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
1879 | else { | |
1880 | rval = XFS_ILOG_CORE; | |
1881 | error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, | |
1882 | new->br_startblock, new->br_blockcount, | |
1883 | &i); | |
1884 | if (error) | |
1885 | goto done; | |
1886 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
1887 | bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; | |
1888 | error = xfs_btree_insert(bma->cur, &i); | |
1889 | if (error) | |
1890 | goto done; | |
1891 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1892 | } | |
1893 | ||
1894 | if (xfs_bmap_needs_btree(bma->ip, whichfork)) { | |
1895 | error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, | |
1896 | bma->firstblock, bma->dfops, | |
1897 | &bma->cur, 1, &tmp_rval, whichfork); | |
1898 | rval |= tmp_rval; | |
1899 | if (error) | |
1900 | goto done; | |
1901 | } | |
1902 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), | |
1903 | startblockval(PREV.br_startblock) - | |
1904 | (bma->cur ? bma->cur->bc_private.b.allocated : 0)); | |
1905 | ep = xfs_iext_get_ext(ifp, bma->idx + 1); | |
1906 | xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); | |
1907 | trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); | |
1908 | break; | |
1909 | ||
1910 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
1911 | /* | |
1912 | * Filling in the last part of a previous delayed allocation. | |
1913 | * The right neighbor is contiguous with the new allocation. | |
1914 | */ | |
1915 | temp = PREV.br_blockcount - new->br_blockcount; | |
1916 | trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); | |
1917 | xfs_bmbt_set_blockcount(ep, temp); | |
1918 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), | |
1919 | new->br_startoff, new->br_startblock, | |
1920 | new->br_blockcount + RIGHT.br_blockcount, | |
1921 | RIGHT.br_state); | |
1922 | trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); | |
1923 | if (bma->cur == NULL) | |
1924 | rval = XFS_ILOG_DEXT; | |
1925 | else { | |
1926 | rval = 0; | |
1927 | error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, | |
1928 | RIGHT.br_startblock, | |
1929 | RIGHT.br_blockcount, &i); | |
1930 | if (error) | |
1931 | goto done; | |
1932 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1933 | error = xfs_bmbt_update(bma->cur, new->br_startoff, | |
1934 | new->br_startblock, | |
1935 | new->br_blockcount + | |
1936 | RIGHT.br_blockcount, | |
1937 | RIGHT.br_state); | |
1938 | if (error) | |
1939 | goto done; | |
1940 | } | |
1941 | ||
1942 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), | |
1943 | startblockval(PREV.br_startblock)); | |
1944 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1945 | xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); | |
1946 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1947 | ||
1948 | bma->idx++; | |
1949 | break; | |
1950 | ||
1951 | case BMAP_RIGHT_FILLING: | |
1952 | /* | |
1953 | * Filling in the last part of a previous delayed allocation. | |
1954 | * The right neighbor is not contiguous. | |
1955 | */ | |
1956 | temp = PREV.br_blockcount - new->br_blockcount; | |
1957 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1958 | xfs_bmbt_set_blockcount(ep, temp); | |
1959 | xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); | |
1960 | (*nextents)++; | |
1961 | if (bma->cur == NULL) | |
1962 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
1963 | else { | |
1964 | rval = XFS_ILOG_CORE; | |
1965 | error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, | |
1966 | new->br_startblock, new->br_blockcount, | |
1967 | &i); | |
1968 | if (error) | |
1969 | goto done; | |
1970 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
1971 | bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; | |
1972 | error = xfs_btree_insert(bma->cur, &i); | |
1973 | if (error) | |
1974 | goto done; | |
1975 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
1976 | } | |
1977 | ||
1978 | if (xfs_bmap_needs_btree(bma->ip, whichfork)) { | |
1979 | error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, | |
1980 | bma->firstblock, bma->dfops, &bma->cur, 1, | |
1981 | &tmp_rval, whichfork); | |
1982 | rval |= tmp_rval; | |
1983 | if (error) | |
1984 | goto done; | |
1985 | } | |
1986 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), | |
1987 | startblockval(PREV.br_startblock) - | |
1988 | (bma->cur ? bma->cur->bc_private.b.allocated : 0)); | |
1989 | ep = xfs_iext_get_ext(ifp, bma->idx); | |
1990 | xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); | |
1991 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
1992 | ||
1993 | bma->idx++; | |
1994 | break; | |
1995 | ||
1996 | case 0: | |
1997 | /* | |
1998 | * Filling in the middle part of a previous delayed allocation. | |
1999 | * Contiguity is impossible here. | |
2000 | * This case is avoided almost all the time. | |
2001 | * | |
2002 | * We start with a delayed allocation: | |
2003 | * | |
2004 | * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ | |
2005 | * PREV @ idx | |
2006 | * | |
2007 | * and we are allocating: | |
2008 | * +rrrrrrrrrrrrrrrrr+ | |
2009 | * new | |
2010 | * | |
2011 | * and we set it up for insertion as: | |
2012 | * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ | |
2013 | * new | |
2014 | * PREV @ idx LEFT RIGHT | |
2015 | * inserted at idx + 1 | |
2016 | */ | |
2017 | temp = new->br_startoff - PREV.br_startoff; | |
2018 | temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; | |
2019 | trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); | |
2020 | xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ | |
2021 | LEFT = *new; | |
2022 | RIGHT.br_state = PREV.br_state; | |
2023 | RIGHT.br_startblock = nullstartblock( | |
2024 | (int)xfs_bmap_worst_indlen(bma->ip, temp2)); | |
2025 | RIGHT.br_startoff = new_endoff; | |
2026 | RIGHT.br_blockcount = temp2; | |
2027 | /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ | |
2028 | xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); | |
2029 | (*nextents)++; | |
2030 | if (bma->cur == NULL) | |
2031 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2032 | else { | |
2033 | rval = XFS_ILOG_CORE; | |
2034 | error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, | |
2035 | new->br_startblock, new->br_blockcount, | |
2036 | &i); | |
2037 | if (error) | |
2038 | goto done; | |
2039 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
2040 | bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; | |
2041 | error = xfs_btree_insert(bma->cur, &i); | |
2042 | if (error) | |
2043 | goto done; | |
2044 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2045 | } | |
2046 | ||
2047 | if (xfs_bmap_needs_btree(bma->ip, whichfork)) { | |
2048 | error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, | |
2049 | bma->firstblock, bma->dfops, &bma->cur, | |
2050 | 1, &tmp_rval, whichfork); | |
2051 | rval |= tmp_rval; | |
2052 | if (error) | |
2053 | goto done; | |
2054 | } | |
2055 | temp = xfs_bmap_worst_indlen(bma->ip, temp); | |
2056 | temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); | |
2057 | diff = (int)(temp + temp2 - | |
2058 | (startblockval(PREV.br_startblock) - | |
2059 | (bma->cur ? | |
2060 | bma->cur->bc_private.b.allocated : 0))); | |
2061 | if (diff > 0) { | |
2062 | error = xfs_mod_fdblocks(bma->ip->i_mount, | |
2063 | -((int64_t)diff), false); | |
2064 | ASSERT(!error); | |
2065 | if (error) | |
2066 | goto done; | |
2067 | } | |
2068 | ||
2069 | ep = xfs_iext_get_ext(ifp, bma->idx); | |
2070 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | |
2071 | trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); | |
2072 | trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); | |
2073 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), | |
2074 | nullstartblock((int)temp2)); | |
2075 | trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); | |
2076 | ||
2077 | bma->idx++; | |
2078 | da_new = temp + temp2; | |
2079 | break; | |
2080 | ||
2081 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2082 | case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2083 | case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: | |
2084 | case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | |
2085 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2086 | case BMAP_LEFT_CONTIG: | |
2087 | case BMAP_RIGHT_CONTIG: | |
2088 | /* | |
2089 | * These cases are all impossible. | |
2090 | */ | |
2091 | ASSERT(0); | |
2092 | } | |
2093 | ||
2094 | /* add reverse mapping */ | |
2095 | error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new); | |
2096 | if (error) | |
2097 | goto done; | |
2098 | ||
2099 | /* convert to a btree if necessary */ | |
2100 | if (xfs_bmap_needs_btree(bma->ip, whichfork)) { | |
2101 | int tmp_logflags; /* partial log flag return val */ | |
2102 | ||
2103 | ASSERT(bma->cur == NULL); | |
2104 | error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, | |
2105 | bma->firstblock, bma->dfops, &bma->cur, | |
2106 | da_old > 0, &tmp_logflags, whichfork); | |
2107 | bma->logflags |= tmp_logflags; | |
2108 | if (error) | |
2109 | goto done; | |
2110 | } | |
2111 | ||
2112 | /* adjust for changes in reserved delayed indirect blocks */ | |
2113 | if (da_old || da_new) { | |
2114 | temp = da_new; | |
2115 | if (bma->cur) | |
2116 | temp += bma->cur->bc_private.b.allocated; | |
2117 | if (temp < da_old) | |
2118 | xfs_mod_fdblocks(bma->ip->i_mount, | |
2119 | (int64_t)(da_old - temp), false); | |
2120 | } | |
2121 | ||
2122 | /* clear out the allocated field, done with it now in any case. */ | |
2123 | if (bma->cur) | |
2124 | bma->cur->bc_private.b.allocated = 0; | |
2125 | ||
2126 | xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); | |
2127 | done: | |
2128 | if (whichfork != XFS_COW_FORK) | |
2129 | bma->logflags |= rval; | |
2130 | return error; | |
2131 | #undef LEFT | |
2132 | #undef RIGHT | |
2133 | #undef PREV | |
2134 | } | |
2135 | ||
2136 | /* | |
2137 | * Convert an unwritten allocation to a real allocation or vice versa. | |
2138 | */ | |
2139 | STATIC int /* error */ | |
2140 | xfs_bmap_add_extent_unwritten_real( | |
2141 | struct xfs_trans *tp, | |
2142 | xfs_inode_t *ip, /* incore inode pointer */ | |
2143 | int whichfork, | |
2144 | xfs_extnum_t *idx, /* extent number to update/insert */ | |
2145 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | |
2146 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | |
2147 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | |
2148 | struct xfs_defer_ops *dfops, /* list of extents to be freed */ | |
2149 | int *logflagsp) /* inode logging flags */ | |
2150 | { | |
2151 | xfs_btree_cur_t *cur; /* btree cursor */ | |
2152 | xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ | |
2153 | int error; /* error return value */ | |
2154 | int i; /* temp state */ | |
2155 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
2156 | xfs_fileoff_t new_endoff; /* end offset of new entry */ | |
2157 | xfs_exntst_t newext; /* new extent state */ | |
2158 | xfs_exntst_t oldext; /* old extent state */ | |
2159 | xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ | |
2160 | /* left is 0, right is 1, prev is 2 */ | |
2161 | int rval=0; /* return value (logging flags) */ | |
2162 | int state = 0;/* state bits, accessed thru macros */ | |
2163 | struct xfs_mount *mp = ip->i_mount; | |
2164 | ||
2165 | *logflagsp = 0; | |
2166 | ||
2167 | cur = *curp; | |
2168 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
2169 | if (whichfork == XFS_COW_FORK) | |
2170 | state |= BMAP_COWFORK; | |
2171 | ||
2172 | ASSERT(*idx >= 0); | |
2173 | ASSERT(*idx <= xfs_iext_count(ifp)); | |
2174 | ASSERT(!isnullstartblock(new->br_startblock)); | |
2175 | ||
2176 | XFS_STATS_INC(mp, xs_add_exlist); | |
2177 | ||
2178 | #define LEFT r[0] | |
2179 | #define RIGHT r[1] | |
2180 | #define PREV r[2] | |
2181 | ||
2182 | /* | |
2183 | * Set up a bunch of variables to make the tests simpler. | |
2184 | */ | |
2185 | error = 0; | |
2186 | ep = xfs_iext_get_ext(ifp, *idx); | |
2187 | xfs_bmbt_get_all(ep, &PREV); | |
2188 | newext = new->br_state; | |
2189 | oldext = (newext == XFS_EXT_UNWRITTEN) ? | |
2190 | XFS_EXT_NORM : XFS_EXT_UNWRITTEN; | |
2191 | ASSERT(PREV.br_state == oldext); | |
2192 | new_endoff = new->br_startoff + new->br_blockcount; | |
2193 | ASSERT(PREV.br_startoff <= new->br_startoff); | |
2194 | ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); | |
2195 | ||
2196 | /* | |
2197 | * Set flags determining what part of the previous oldext allocation | |
2198 | * extent is being replaced by a newext allocation. | |
2199 | */ | |
2200 | if (PREV.br_startoff == new->br_startoff) | |
2201 | state |= BMAP_LEFT_FILLING; | |
2202 | if (PREV.br_startoff + PREV.br_blockcount == new_endoff) | |
2203 | state |= BMAP_RIGHT_FILLING; | |
2204 | ||
2205 | /* | |
2206 | * Check and set flags if this segment has a left neighbor. | |
2207 | * Don't set contiguous if the combined extent would be too large. | |
2208 | */ | |
2209 | if (*idx > 0) { | |
2210 | state |= BMAP_LEFT_VALID; | |
2211 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); | |
2212 | ||
2213 | if (isnullstartblock(LEFT.br_startblock)) | |
2214 | state |= BMAP_LEFT_DELAY; | |
2215 | } | |
2216 | ||
2217 | if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && | |
2218 | LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && | |
2219 | LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && | |
2220 | LEFT.br_state == newext && | |
2221 | LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) | |
2222 | state |= BMAP_LEFT_CONTIG; | |
2223 | ||
2224 | /* | |
2225 | * Check and set flags if this segment has a right neighbor. | |
2226 | * Don't set contiguous if the combined extent would be too large. | |
2227 | * Also check for all-three-contiguous being too large. | |
2228 | */ | |
2229 | if (*idx < xfs_iext_count(ifp) - 1) { | |
2230 | state |= BMAP_RIGHT_VALID; | |
2231 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); | |
2232 | if (isnullstartblock(RIGHT.br_startblock)) | |
2233 | state |= BMAP_RIGHT_DELAY; | |
2234 | } | |
2235 | ||
2236 | if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && | |
2237 | new_endoff == RIGHT.br_startoff && | |
2238 | new->br_startblock + new->br_blockcount == RIGHT.br_startblock && | |
2239 | newext == RIGHT.br_state && | |
2240 | new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && | |
2241 | ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | | |
2242 | BMAP_RIGHT_FILLING)) != | |
2243 | (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | | |
2244 | BMAP_RIGHT_FILLING) || | |
2245 | LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount | |
2246 | <= MAXEXTLEN)) | |
2247 | state |= BMAP_RIGHT_CONTIG; | |
2248 | ||
2249 | /* | |
2250 | * Switch out based on the FILLING and CONTIG state bits. | |
2251 | */ | |
2252 | switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | | |
2253 | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { | |
2254 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | | |
2255 | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
2256 | /* | |
2257 | * Setting all of a previous oldext extent to newext. | |
2258 | * The left and right neighbors are both contiguous with new. | |
2259 | */ | |
2260 | --*idx; | |
2261 | ||
2262 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2263 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | |
2264 | LEFT.br_blockcount + PREV.br_blockcount + | |
2265 | RIGHT.br_blockcount); | |
2266 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2267 | ||
2268 | xfs_iext_remove(ip, *idx + 1, 2, state); | |
2269 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2270 | XFS_IFORK_NEXTENTS(ip, whichfork) - 2); | |
2271 | if (cur == NULL) | |
2272 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2273 | else { | |
2274 | rval = XFS_ILOG_CORE; | |
2275 | if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, | |
2276 | RIGHT.br_startblock, | |
2277 | RIGHT.br_blockcount, &i))) | |
2278 | goto done; | |
2279 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2280 | if ((error = xfs_btree_delete(cur, &i))) | |
2281 | goto done; | |
2282 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2283 | if ((error = xfs_btree_decrement(cur, 0, &i))) | |
2284 | goto done; | |
2285 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2286 | if ((error = xfs_btree_delete(cur, &i))) | |
2287 | goto done; | |
2288 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2289 | if ((error = xfs_btree_decrement(cur, 0, &i))) | |
2290 | goto done; | |
2291 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2292 | if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, | |
2293 | LEFT.br_startblock, | |
2294 | LEFT.br_blockcount + PREV.br_blockcount + | |
2295 | RIGHT.br_blockcount, LEFT.br_state))) | |
2296 | goto done; | |
2297 | } | |
2298 | break; | |
2299 | ||
2300 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | |
2301 | /* | |
2302 | * Setting all of a previous oldext extent to newext. | |
2303 | * The left neighbor is contiguous, the right is not. | |
2304 | */ | |
2305 | --*idx; | |
2306 | ||
2307 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2308 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | |
2309 | LEFT.br_blockcount + PREV.br_blockcount); | |
2310 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2311 | ||
2312 | xfs_iext_remove(ip, *idx + 1, 1, state); | |
2313 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2314 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | |
2315 | if (cur == NULL) | |
2316 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2317 | else { | |
2318 | rval = XFS_ILOG_CORE; | |
2319 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2320 | PREV.br_startblock, PREV.br_blockcount, | |
2321 | &i))) | |
2322 | goto done; | |
2323 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2324 | if ((error = xfs_btree_delete(cur, &i))) | |
2325 | goto done; | |
2326 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2327 | if ((error = xfs_btree_decrement(cur, 0, &i))) | |
2328 | goto done; | |
2329 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2330 | if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, | |
2331 | LEFT.br_startblock, | |
2332 | LEFT.br_blockcount + PREV.br_blockcount, | |
2333 | LEFT.br_state))) | |
2334 | goto done; | |
2335 | } | |
2336 | break; | |
2337 | ||
2338 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
2339 | /* | |
2340 | * Setting all of a previous oldext extent to newext. | |
2341 | * The right neighbor is contiguous, the left is not. | |
2342 | */ | |
2343 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2344 | xfs_bmbt_set_blockcount(ep, | |
2345 | PREV.br_blockcount + RIGHT.br_blockcount); | |
2346 | xfs_bmbt_set_state(ep, newext); | |
2347 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2348 | xfs_iext_remove(ip, *idx + 1, 1, state); | |
2349 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2350 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | |
2351 | if (cur == NULL) | |
2352 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2353 | else { | |
2354 | rval = XFS_ILOG_CORE; | |
2355 | if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, | |
2356 | RIGHT.br_startblock, | |
2357 | RIGHT.br_blockcount, &i))) | |
2358 | goto done; | |
2359 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2360 | if ((error = xfs_btree_delete(cur, &i))) | |
2361 | goto done; | |
2362 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2363 | if ((error = xfs_btree_decrement(cur, 0, &i))) | |
2364 | goto done; | |
2365 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2366 | if ((error = xfs_bmbt_update(cur, new->br_startoff, | |
2367 | new->br_startblock, | |
2368 | new->br_blockcount + RIGHT.br_blockcount, | |
2369 | newext))) | |
2370 | goto done; | |
2371 | } | |
2372 | break; | |
2373 | ||
2374 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: | |
2375 | /* | |
2376 | * Setting all of a previous oldext extent to newext. | |
2377 | * Neither the left nor right neighbors are contiguous with | |
2378 | * the new one. | |
2379 | */ | |
2380 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2381 | xfs_bmbt_set_state(ep, newext); | |
2382 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2383 | ||
2384 | if (cur == NULL) | |
2385 | rval = XFS_ILOG_DEXT; | |
2386 | else { | |
2387 | rval = 0; | |
2388 | if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, | |
2389 | new->br_startblock, new->br_blockcount, | |
2390 | &i))) | |
2391 | goto done; | |
2392 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2393 | if ((error = xfs_bmbt_update(cur, new->br_startoff, | |
2394 | new->br_startblock, new->br_blockcount, | |
2395 | newext))) | |
2396 | goto done; | |
2397 | } | |
2398 | break; | |
2399 | ||
2400 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: | |
2401 | /* | |
2402 | * Setting the first part of a previous oldext extent to newext. | |
2403 | * The left neighbor is contiguous. | |
2404 | */ | |
2405 | trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); | |
2406 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), | |
2407 | LEFT.br_blockcount + new->br_blockcount); | |
2408 | xfs_bmbt_set_startoff(ep, | |
2409 | PREV.br_startoff + new->br_blockcount); | |
2410 | trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); | |
2411 | ||
2412 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2413 | xfs_bmbt_set_startblock(ep, | |
2414 | new->br_startblock + new->br_blockcount); | |
2415 | xfs_bmbt_set_blockcount(ep, | |
2416 | PREV.br_blockcount - new->br_blockcount); | |
2417 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2418 | ||
2419 | --*idx; | |
2420 | ||
2421 | if (cur == NULL) | |
2422 | rval = XFS_ILOG_DEXT; | |
2423 | else { | |
2424 | rval = 0; | |
2425 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2426 | PREV.br_startblock, PREV.br_blockcount, | |
2427 | &i))) | |
2428 | goto done; | |
2429 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2430 | if ((error = xfs_bmbt_update(cur, | |
2431 | PREV.br_startoff + new->br_blockcount, | |
2432 | PREV.br_startblock + new->br_blockcount, | |
2433 | PREV.br_blockcount - new->br_blockcount, | |
2434 | oldext))) | |
2435 | goto done; | |
2436 | if ((error = xfs_btree_decrement(cur, 0, &i))) | |
2437 | goto done; | |
2438 | error = xfs_bmbt_update(cur, LEFT.br_startoff, | |
2439 | LEFT.br_startblock, | |
2440 | LEFT.br_blockcount + new->br_blockcount, | |
2441 | LEFT.br_state); | |
2442 | if (error) | |
2443 | goto done; | |
2444 | } | |
2445 | break; | |
2446 | ||
2447 | case BMAP_LEFT_FILLING: | |
2448 | /* | |
2449 | * Setting the first part of a previous oldext extent to newext. | |
2450 | * The left neighbor is not contiguous. | |
2451 | */ | |
2452 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2453 | ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); | |
2454 | xfs_bmbt_set_startoff(ep, new_endoff); | |
2455 | xfs_bmbt_set_blockcount(ep, | |
2456 | PREV.br_blockcount - new->br_blockcount); | |
2457 | xfs_bmbt_set_startblock(ep, | |
2458 | new->br_startblock + new->br_blockcount); | |
2459 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2460 | ||
2461 | xfs_iext_insert(ip, *idx, 1, new, state); | |
2462 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2463 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | |
2464 | if (cur == NULL) | |
2465 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2466 | else { | |
2467 | rval = XFS_ILOG_CORE; | |
2468 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2469 | PREV.br_startblock, PREV.br_blockcount, | |
2470 | &i))) | |
2471 | goto done; | |
2472 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2473 | if ((error = xfs_bmbt_update(cur, | |
2474 | PREV.br_startoff + new->br_blockcount, | |
2475 | PREV.br_startblock + new->br_blockcount, | |
2476 | PREV.br_blockcount - new->br_blockcount, | |
2477 | oldext))) | |
2478 | goto done; | |
2479 | cur->bc_rec.b = *new; | |
2480 | if ((error = xfs_btree_insert(cur, &i))) | |
2481 | goto done; | |
2482 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2483 | } | |
2484 | break; | |
2485 | ||
2486 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | |
2487 | /* | |
2488 | * Setting the last part of a previous oldext extent to newext. | |
2489 | * The right neighbor is contiguous with the new allocation. | |
2490 | */ | |
2491 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2492 | xfs_bmbt_set_blockcount(ep, | |
2493 | PREV.br_blockcount - new->br_blockcount); | |
2494 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2495 | ||
2496 | ++*idx; | |
2497 | ||
2498 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2499 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), | |
2500 | new->br_startoff, new->br_startblock, | |
2501 | new->br_blockcount + RIGHT.br_blockcount, newext); | |
2502 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2503 | ||
2504 | if (cur == NULL) | |
2505 | rval = XFS_ILOG_DEXT; | |
2506 | else { | |
2507 | rval = 0; | |
2508 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2509 | PREV.br_startblock, | |
2510 | PREV.br_blockcount, &i))) | |
2511 | goto done; | |
2512 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2513 | if ((error = xfs_bmbt_update(cur, PREV.br_startoff, | |
2514 | PREV.br_startblock, | |
2515 | PREV.br_blockcount - new->br_blockcount, | |
2516 | oldext))) | |
2517 | goto done; | |
2518 | if ((error = xfs_btree_increment(cur, 0, &i))) | |
2519 | goto done; | |
2520 | if ((error = xfs_bmbt_update(cur, new->br_startoff, | |
2521 | new->br_startblock, | |
2522 | new->br_blockcount + RIGHT.br_blockcount, | |
2523 | newext))) | |
2524 | goto done; | |
2525 | } | |
2526 | break; | |
2527 | ||
2528 | case BMAP_RIGHT_FILLING: | |
2529 | /* | |
2530 | * Setting the last part of a previous oldext extent to newext. | |
2531 | * The right neighbor is not contiguous. | |
2532 | */ | |
2533 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2534 | xfs_bmbt_set_blockcount(ep, | |
2535 | PREV.br_blockcount - new->br_blockcount); | |
2536 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2537 | ||
2538 | ++*idx; | |
2539 | xfs_iext_insert(ip, *idx, 1, new, state); | |
2540 | ||
2541 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2542 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | |
2543 | if (cur == NULL) | |
2544 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2545 | else { | |
2546 | rval = XFS_ILOG_CORE; | |
2547 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2548 | PREV.br_startblock, PREV.br_blockcount, | |
2549 | &i))) | |
2550 | goto done; | |
2551 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2552 | if ((error = xfs_bmbt_update(cur, PREV.br_startoff, | |
2553 | PREV.br_startblock, | |
2554 | PREV.br_blockcount - new->br_blockcount, | |
2555 | oldext))) | |
2556 | goto done; | |
2557 | if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, | |
2558 | new->br_startblock, new->br_blockcount, | |
2559 | &i))) | |
2560 | goto done; | |
2561 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
2562 | cur->bc_rec.b.br_state = XFS_EXT_NORM; | |
2563 | if ((error = xfs_btree_insert(cur, &i))) | |
2564 | goto done; | |
2565 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2566 | } | |
2567 | break; | |
2568 | ||
2569 | case 0: | |
2570 | /* | |
2571 | * Setting the middle part of a previous oldext extent to | |
2572 | * newext. Contiguity is impossible here. | |
2573 | * One extent becomes three extents. | |
2574 | */ | |
2575 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2576 | xfs_bmbt_set_blockcount(ep, | |
2577 | new->br_startoff - PREV.br_startoff); | |
2578 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2579 | ||
2580 | r[0] = *new; | |
2581 | r[1].br_startoff = new_endoff; | |
2582 | r[1].br_blockcount = | |
2583 | PREV.br_startoff + PREV.br_blockcount - new_endoff; | |
2584 | r[1].br_startblock = new->br_startblock + new->br_blockcount; | |
2585 | r[1].br_state = oldext; | |
2586 | ||
2587 | ++*idx; | |
2588 | xfs_iext_insert(ip, *idx, 2, &r[0], state); | |
2589 | ||
2590 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2591 | XFS_IFORK_NEXTENTS(ip, whichfork) + 2); | |
2592 | if (cur == NULL) | |
2593 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | |
2594 | else { | |
2595 | rval = XFS_ILOG_CORE; | |
2596 | if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, | |
2597 | PREV.br_startblock, PREV.br_blockcount, | |
2598 | &i))) | |
2599 | goto done; | |
2600 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2601 | /* new right extent - oldext */ | |
2602 | if ((error = xfs_bmbt_update(cur, r[1].br_startoff, | |
2603 | r[1].br_startblock, r[1].br_blockcount, | |
2604 | r[1].br_state))) | |
2605 | goto done; | |
2606 | /* new left extent - oldext */ | |
2607 | cur->bc_rec.b = PREV; | |
2608 | cur->bc_rec.b.br_blockcount = | |
2609 | new->br_startoff - PREV.br_startoff; | |
2610 | if ((error = xfs_btree_insert(cur, &i))) | |
2611 | goto done; | |
2612 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2613 | /* | |
2614 | * Reset the cursor to the position of the new extent | |
2615 | * we are about to insert as we can't trust it after | |
2616 | * the previous insert. | |
2617 | */ | |
2618 | if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, | |
2619 | new->br_startblock, new->br_blockcount, | |
2620 | &i))) | |
2621 | goto done; | |
2622 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
2623 | /* new middle extent - newext */ | |
2624 | cur->bc_rec.b.br_state = new->br_state; | |
2625 | if ((error = xfs_btree_insert(cur, &i))) | |
2626 | goto done; | |
2627 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2628 | } | |
2629 | break; | |
2630 | ||
2631 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2632 | case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2633 | case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: | |
2634 | case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | |
2635 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2636 | case BMAP_LEFT_CONTIG: | |
2637 | case BMAP_RIGHT_CONTIG: | |
2638 | /* | |
2639 | * These cases are all impossible. | |
2640 | */ | |
2641 | ASSERT(0); | |
2642 | } | |
2643 | ||
2644 | /* update reverse mappings */ | |
2645 | error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new); | |
2646 | if (error) | |
2647 | goto done; | |
2648 | ||
2649 | /* convert to a btree if necessary */ | |
2650 | if (xfs_bmap_needs_btree(ip, whichfork)) { | |
2651 | int tmp_logflags; /* partial log flag return val */ | |
2652 | ||
2653 | ASSERT(cur == NULL); | |
2654 | error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur, | |
2655 | 0, &tmp_logflags, whichfork); | |
2656 | *logflagsp |= tmp_logflags; | |
2657 | if (error) | |
2658 | goto done; | |
2659 | } | |
2660 | ||
2661 | /* clear out the allocated field, done with it now in any case. */ | |
2662 | if (cur) { | |
2663 | cur->bc_private.b.allocated = 0; | |
2664 | *curp = cur; | |
2665 | } | |
2666 | ||
2667 | xfs_bmap_check_leaf_extents(*curp, ip, whichfork); | |
2668 | done: | |
2669 | *logflagsp |= rval; | |
2670 | return error; | |
2671 | #undef LEFT | |
2672 | #undef RIGHT | |
2673 | #undef PREV | |
2674 | } | |
2675 | ||
2676 | /* | |
2677 | * Convert a hole to a delayed allocation. | |
2678 | */ | |
2679 | STATIC void | |
2680 | xfs_bmap_add_extent_hole_delay( | |
2681 | xfs_inode_t *ip, /* incore inode pointer */ | |
2682 | int whichfork, | |
2683 | xfs_extnum_t *idx, /* extent number to update/insert */ | |
2684 | xfs_bmbt_irec_t *new) /* new data to add to file extents */ | |
2685 | { | |
2686 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
2687 | xfs_bmbt_irec_t left; /* left neighbor extent entry */ | |
2688 | xfs_filblks_t newlen=0; /* new indirect size */ | |
2689 | xfs_filblks_t oldlen=0; /* old indirect size */ | |
2690 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ | |
2691 | int state; /* state bits, accessed thru macros */ | |
2692 | xfs_filblks_t temp=0; /* temp for indirect calculations */ | |
2693 | ||
2694 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
2695 | state = 0; | |
2696 | if (whichfork == XFS_COW_FORK) | |
2697 | state |= BMAP_COWFORK; | |
2698 | ASSERT(isnullstartblock(new->br_startblock)); | |
2699 | ||
2700 | /* | |
2701 | * Check and set flags if this segment has a left neighbor | |
2702 | */ | |
2703 | if (*idx > 0) { | |
2704 | state |= BMAP_LEFT_VALID; | |
2705 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); | |
2706 | ||
2707 | if (isnullstartblock(left.br_startblock)) | |
2708 | state |= BMAP_LEFT_DELAY; | |
2709 | } | |
2710 | ||
2711 | /* | |
2712 | * Check and set flags if the current (right) segment exists. | |
2713 | * If it doesn't exist, we're converting the hole at end-of-file. | |
2714 | */ | |
2715 | if (*idx < xfs_iext_count(ifp)) { | |
2716 | state |= BMAP_RIGHT_VALID; | |
2717 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); | |
2718 | ||
2719 | if (isnullstartblock(right.br_startblock)) | |
2720 | state |= BMAP_RIGHT_DELAY; | |
2721 | } | |
2722 | ||
2723 | /* | |
2724 | * Set contiguity flags on the left and right neighbors. | |
2725 | * Don't let extents get too large, even if the pieces are contiguous. | |
2726 | */ | |
2727 | if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && | |
2728 | left.br_startoff + left.br_blockcount == new->br_startoff && | |
2729 | left.br_blockcount + new->br_blockcount <= MAXEXTLEN) | |
2730 | state |= BMAP_LEFT_CONTIG; | |
2731 | ||
2732 | if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && | |
2733 | new->br_startoff + new->br_blockcount == right.br_startoff && | |
2734 | new->br_blockcount + right.br_blockcount <= MAXEXTLEN && | |
2735 | (!(state & BMAP_LEFT_CONTIG) || | |
2736 | (left.br_blockcount + new->br_blockcount + | |
2737 | right.br_blockcount <= MAXEXTLEN))) | |
2738 | state |= BMAP_RIGHT_CONTIG; | |
2739 | ||
2740 | /* | |
2741 | * Switch out based on the contiguity flags. | |
2742 | */ | |
2743 | switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { | |
2744 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2745 | /* | |
2746 | * New allocation is contiguous with delayed allocations | |
2747 | * on the left and on the right. | |
2748 | * Merge all three into a single extent record. | |
2749 | */ | |
2750 | --*idx; | |
2751 | temp = left.br_blockcount + new->br_blockcount + | |
2752 | right.br_blockcount; | |
2753 | ||
2754 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2755 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); | |
2756 | oldlen = startblockval(left.br_startblock) + | |
2757 | startblockval(new->br_startblock) + | |
2758 | startblockval(right.br_startblock); | |
2759 | newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | |
2760 | oldlen); | |
2761 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), | |
2762 | nullstartblock((int)newlen)); | |
2763 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2764 | ||
2765 | xfs_iext_remove(ip, *idx + 1, 1, state); | |
2766 | break; | |
2767 | ||
2768 | case BMAP_LEFT_CONTIG: | |
2769 | /* | |
2770 | * New allocation is contiguous with a delayed allocation | |
2771 | * on the left. | |
2772 | * Merge the new allocation with the left neighbor. | |
2773 | */ | |
2774 | --*idx; | |
2775 | temp = left.br_blockcount + new->br_blockcount; | |
2776 | ||
2777 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2778 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); | |
2779 | oldlen = startblockval(left.br_startblock) + | |
2780 | startblockval(new->br_startblock); | |
2781 | newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | |
2782 | oldlen); | |
2783 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), | |
2784 | nullstartblock((int)newlen)); | |
2785 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2786 | break; | |
2787 | ||
2788 | case BMAP_RIGHT_CONTIG: | |
2789 | /* | |
2790 | * New allocation is contiguous with a delayed allocation | |
2791 | * on the right. | |
2792 | * Merge the new allocation with the right neighbor. | |
2793 | */ | |
2794 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2795 | temp = new->br_blockcount + right.br_blockcount; | |
2796 | oldlen = startblockval(new->br_startblock) + | |
2797 | startblockval(right.br_startblock); | |
2798 | newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | |
2799 | oldlen); | |
2800 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), | |
2801 | new->br_startoff, | |
2802 | nullstartblock((int)newlen), temp, right.br_state); | |
2803 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2804 | break; | |
2805 | ||
2806 | case 0: | |
2807 | /* | |
2808 | * New allocation is not contiguous with another | |
2809 | * delayed allocation. | |
2810 | * Insert a new entry. | |
2811 | */ | |
2812 | oldlen = newlen = 0; | |
2813 | xfs_iext_insert(ip, *idx, 1, new, state); | |
2814 | break; | |
2815 | } | |
2816 | if (oldlen != newlen) { | |
2817 | ASSERT(oldlen > newlen); | |
2818 | xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), | |
2819 | false); | |
2820 | /* | |
2821 | * Nothing to do for disk quota accounting here. | |
2822 | */ | |
2823 | } | |
2824 | } | |
2825 | ||
2826 | /* | |
2827 | * Convert a hole to a real allocation. | |
2828 | */ | |
2829 | STATIC int /* error */ | |
2830 | xfs_bmap_add_extent_hole_real( | |
2831 | struct xfs_trans *tp, | |
2832 | struct xfs_inode *ip, | |
2833 | int whichfork, | |
2834 | xfs_extnum_t *idx, | |
2835 | struct xfs_btree_cur **curp, | |
2836 | struct xfs_bmbt_irec *new, | |
2837 | xfs_fsblock_t *first, | |
2838 | struct xfs_defer_ops *dfops, | |
2839 | int *logflagsp) | |
2840 | { | |
2841 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
2842 | struct xfs_mount *mp = ip->i_mount; | |
2843 | struct xfs_btree_cur *cur = *curp; | |
2844 | int error; /* error return value */ | |
2845 | int i; /* temp state */ | |
2846 | xfs_bmbt_irec_t left; /* left neighbor extent entry */ | |
2847 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ | |
2848 | int rval=0; /* return value (logging flags) */ | |
2849 | int state; /* state bits, accessed thru macros */ | |
2850 | ||
2851 | ASSERT(*idx >= 0); | |
2852 | ASSERT(*idx <= xfs_iext_count(ifp)); | |
2853 | ASSERT(!isnullstartblock(new->br_startblock)); | |
2854 | ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); | |
2855 | ||
2856 | XFS_STATS_INC(mp, xs_add_exlist); | |
2857 | ||
2858 | state = 0; | |
2859 | if (whichfork == XFS_ATTR_FORK) | |
2860 | state |= BMAP_ATTRFORK; | |
2861 | if (whichfork == XFS_COW_FORK) | |
2862 | state |= BMAP_COWFORK; | |
2863 | ||
2864 | /* | |
2865 | * Check and set flags if this segment has a left neighbor. | |
2866 | */ | |
2867 | if (*idx > 0) { | |
2868 | state |= BMAP_LEFT_VALID; | |
2869 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); | |
2870 | if (isnullstartblock(left.br_startblock)) | |
2871 | state |= BMAP_LEFT_DELAY; | |
2872 | } | |
2873 | ||
2874 | /* | |
2875 | * Check and set flags if this segment has a current value. | |
2876 | * Not true if we're inserting into the "hole" at eof. | |
2877 | */ | |
2878 | if (*idx < xfs_iext_count(ifp)) { | |
2879 | state |= BMAP_RIGHT_VALID; | |
2880 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); | |
2881 | if (isnullstartblock(right.br_startblock)) | |
2882 | state |= BMAP_RIGHT_DELAY; | |
2883 | } | |
2884 | ||
2885 | /* | |
2886 | * We're inserting a real allocation between "left" and "right". | |
2887 | * Set the contiguity flags. Don't let extents get too large. | |
2888 | */ | |
2889 | if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && | |
2890 | left.br_startoff + left.br_blockcount == new->br_startoff && | |
2891 | left.br_startblock + left.br_blockcount == new->br_startblock && | |
2892 | left.br_state == new->br_state && | |
2893 | left.br_blockcount + new->br_blockcount <= MAXEXTLEN) | |
2894 | state |= BMAP_LEFT_CONTIG; | |
2895 | ||
2896 | if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && | |
2897 | new->br_startoff + new->br_blockcount == right.br_startoff && | |
2898 | new->br_startblock + new->br_blockcount == right.br_startblock && | |
2899 | new->br_state == right.br_state && | |
2900 | new->br_blockcount + right.br_blockcount <= MAXEXTLEN && | |
2901 | (!(state & BMAP_LEFT_CONTIG) || | |
2902 | left.br_blockcount + new->br_blockcount + | |
2903 | right.br_blockcount <= MAXEXTLEN)) | |
2904 | state |= BMAP_RIGHT_CONTIG; | |
2905 | ||
2906 | error = 0; | |
2907 | /* | |
2908 | * Select which case we're in here, and implement it. | |
2909 | */ | |
2910 | switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { | |
2911 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
2912 | /* | |
2913 | * New allocation is contiguous with real allocations on the | |
2914 | * left and on the right. | |
2915 | * Merge all three into a single extent record. | |
2916 | */ | |
2917 | --*idx; | |
2918 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2919 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | |
2920 | left.br_blockcount + new->br_blockcount + | |
2921 | right.br_blockcount); | |
2922 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2923 | ||
2924 | xfs_iext_remove(ip, *idx + 1, 1, state); | |
2925 | ||
2926 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
2927 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | |
2928 | if (cur == NULL) { | |
2929 | rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); | |
2930 | } else { | |
2931 | rval = XFS_ILOG_CORE; | |
2932 | error = xfs_bmbt_lookup_eq(cur, right.br_startoff, | |
2933 | right.br_startblock, right.br_blockcount, | |
2934 | &i); | |
2935 | if (error) | |
2936 | goto done; | |
2937 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2938 | error = xfs_btree_delete(cur, &i); | |
2939 | if (error) | |
2940 | goto done; | |
2941 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2942 | error = xfs_btree_decrement(cur, 0, &i); | |
2943 | if (error) | |
2944 | goto done; | |
2945 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2946 | error = xfs_bmbt_update(cur, left.br_startoff, | |
2947 | left.br_startblock, | |
2948 | left.br_blockcount + | |
2949 | new->br_blockcount + | |
2950 | right.br_blockcount, | |
2951 | left.br_state); | |
2952 | if (error) | |
2953 | goto done; | |
2954 | } | |
2955 | break; | |
2956 | ||
2957 | case BMAP_LEFT_CONTIG: | |
2958 | /* | |
2959 | * New allocation is contiguous with a real allocation | |
2960 | * on the left. | |
2961 | * Merge the new allocation with the left neighbor. | |
2962 | */ | |
2963 | --*idx; | |
2964 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2965 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | |
2966 | left.br_blockcount + new->br_blockcount); | |
2967 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
2968 | ||
2969 | if (cur == NULL) { | |
2970 | rval = xfs_ilog_fext(whichfork); | |
2971 | } else { | |
2972 | rval = 0; | |
2973 | error = xfs_bmbt_lookup_eq(cur, left.br_startoff, | |
2974 | left.br_startblock, left.br_blockcount, | |
2975 | &i); | |
2976 | if (error) | |
2977 | goto done; | |
2978 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
2979 | error = xfs_bmbt_update(cur, left.br_startoff, | |
2980 | left.br_startblock, | |
2981 | left.br_blockcount + | |
2982 | new->br_blockcount, | |
2983 | left.br_state); | |
2984 | if (error) | |
2985 | goto done; | |
2986 | } | |
2987 | break; | |
2988 | ||
2989 | case BMAP_RIGHT_CONTIG: | |
2990 | /* | |
2991 | * New allocation is contiguous with a real allocation | |
2992 | * on the right. | |
2993 | * Merge the new allocation with the right neighbor. | |
2994 | */ | |
2995 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
2996 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), | |
2997 | new->br_startoff, new->br_startblock, | |
2998 | new->br_blockcount + right.br_blockcount, | |
2999 | right.br_state); | |
3000 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
3001 | ||
3002 | if (cur == NULL) { | |
3003 | rval = xfs_ilog_fext(whichfork); | |
3004 | } else { | |
3005 | rval = 0; | |
3006 | error = xfs_bmbt_lookup_eq(cur, | |
3007 | right.br_startoff, | |
3008 | right.br_startblock, | |
3009 | right.br_blockcount, &i); | |
3010 | if (error) | |
3011 | goto done; | |
3012 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
3013 | error = xfs_bmbt_update(cur, new->br_startoff, | |
3014 | new->br_startblock, | |
3015 | new->br_blockcount + | |
3016 | right.br_blockcount, | |
3017 | right.br_state); | |
3018 | if (error) | |
3019 | goto done; | |
3020 | } | |
3021 | break; | |
3022 | ||
3023 | case 0: | |
3024 | /* | |
3025 | * New allocation is not contiguous with another | |
3026 | * real allocation. | |
3027 | * Insert a new entry. | |
3028 | */ | |
3029 | xfs_iext_insert(ip, *idx, 1, new, state); | |
3030 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
3031 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | |
3032 | if (cur == NULL) { | |
3033 | rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); | |
3034 | } else { | |
3035 | rval = XFS_ILOG_CORE; | |
3036 | error = xfs_bmbt_lookup_eq(cur, | |
3037 | new->br_startoff, | |
3038 | new->br_startblock, | |
3039 | new->br_blockcount, &i); | |
3040 | if (error) | |
3041 | goto done; | |
3042 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); | |
3043 | cur->bc_rec.b.br_state = new->br_state; | |
3044 | error = xfs_btree_insert(cur, &i); | |
3045 | if (error) | |
3046 | goto done; | |
3047 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
3048 | } | |
3049 | break; | |
3050 | } | |
3051 | ||
3052 | /* add reverse mapping */ | |
3053 | error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new); | |
3054 | if (error) | |
3055 | goto done; | |
3056 | ||
3057 | /* convert to a btree if necessary */ | |
3058 | if (xfs_bmap_needs_btree(ip, whichfork)) { | |
3059 | int tmp_logflags; /* partial log flag return val */ | |
3060 | ||
3061 | ASSERT(cur == NULL); | |
3062 | error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp, | |
3063 | 0, &tmp_logflags, whichfork); | |
3064 | *logflagsp |= tmp_logflags; | |
3065 | cur = *curp; | |
3066 | if (error) | |
3067 | goto done; | |
3068 | } | |
3069 | ||
3070 | /* clear out the allocated field, done with it now in any case. */ | |
3071 | if (cur) | |
3072 | cur->bc_private.b.allocated = 0; | |
3073 | ||
3074 | xfs_bmap_check_leaf_extents(cur, ip, whichfork); | |
3075 | done: | |
3076 | *logflagsp |= rval; | |
3077 | return error; | |
3078 | } | |
3079 | ||
3080 | /* | |
3081 | * Functions used in the extent read, allocate and remove paths | |
3082 | */ | |
3083 | ||
3084 | /* | |
3085 | * Adjust the size of the new extent based on di_extsize and rt extsize. | |
3086 | */ | |
3087 | int | |
3088 | xfs_bmap_extsize_align( | |
3089 | xfs_mount_t *mp, | |
3090 | xfs_bmbt_irec_t *gotp, /* next extent pointer */ | |
3091 | xfs_bmbt_irec_t *prevp, /* previous extent pointer */ | |
3092 | xfs_extlen_t extsz, /* align to this extent size */ | |
3093 | int rt, /* is this a realtime inode? */ | |
3094 | int eof, /* is extent at end-of-file? */ | |
3095 | int delay, /* creating delalloc extent? */ | |
3096 | int convert, /* overwriting unwritten extent? */ | |
3097 | xfs_fileoff_t *offp, /* in/out: aligned offset */ | |
3098 | xfs_extlen_t *lenp) /* in/out: aligned length */ | |
3099 | { | |
3100 | xfs_fileoff_t orig_off; /* original offset */ | |
3101 | xfs_extlen_t orig_alen; /* original length */ | |
3102 | xfs_fileoff_t orig_end; /* original off+len */ | |
3103 | xfs_fileoff_t nexto; /* next file offset */ | |
3104 | xfs_fileoff_t prevo; /* previous file offset */ | |
3105 | xfs_fileoff_t align_off; /* temp for offset */ | |
3106 | xfs_extlen_t align_alen; /* temp for length */ | |
3107 | xfs_extlen_t temp; /* temp for calculations */ | |
3108 | ||
3109 | if (convert) | |
3110 | return 0; | |
3111 | ||
3112 | orig_off = align_off = *offp; | |
3113 | orig_alen = align_alen = *lenp; | |
3114 | orig_end = orig_off + orig_alen; | |
3115 | ||
3116 | /* | |
3117 | * If this request overlaps an existing extent, then don't | |
3118 | * attempt to perform any additional alignment. | |
3119 | */ | |
3120 | if (!delay && !eof && | |
3121 | (orig_off >= gotp->br_startoff) && | |
3122 | (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { | |
3123 | return 0; | |
3124 | } | |
3125 | ||
3126 | /* | |
3127 | * If the file offset is unaligned vs. the extent size | |
3128 | * we need to align it. This will be possible unless | |
3129 | * the file was previously written with a kernel that didn't | |
3130 | * perform this alignment, or if a truncate shot us in the | |
3131 | * foot. | |
3132 | */ | |
3133 | temp = do_mod(orig_off, extsz); | |
3134 | if (temp) { | |
3135 | align_alen += temp; | |
3136 | align_off -= temp; | |
3137 | } | |
3138 | ||
3139 | /* Same adjustment for the end of the requested area. */ | |
3140 | temp = (align_alen % extsz); | |
3141 | if (temp) | |
3142 | align_alen += extsz - temp; | |
3143 | ||
3144 | /* | |
3145 | * For large extent hint sizes, the aligned extent might be larger than | |
3146 | * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls | |
3147 | * the length back under MAXEXTLEN. The outer allocation loops handle | |
3148 | * short allocation just fine, so it is safe to do this. We only want to | |
3149 | * do it when we are forced to, though, because it means more allocation | |
3150 | * operations are required. | |
3151 | */ | |
3152 | while (align_alen > MAXEXTLEN) | |
3153 | align_alen -= extsz; | |
3154 | ASSERT(align_alen <= MAXEXTLEN); | |
3155 | ||
3156 | /* | |
3157 | * If the previous block overlaps with this proposed allocation | |
3158 | * then move the start forward without adjusting the length. | |
3159 | */ | |
3160 | if (prevp->br_startoff != NULLFILEOFF) { | |
3161 | if (prevp->br_startblock == HOLESTARTBLOCK) | |
3162 | prevo = prevp->br_startoff; | |
3163 | else | |
3164 | prevo = prevp->br_startoff + prevp->br_blockcount; | |
3165 | } else | |
3166 | prevo = 0; | |
3167 | if (align_off != orig_off && align_off < prevo) | |
3168 | align_off = prevo; | |
3169 | /* | |
3170 | * If the next block overlaps with this proposed allocation | |
3171 | * then move the start back without adjusting the length, | |
3172 | * but not before offset 0. | |
3173 | * This may of course make the start overlap previous block, | |
3174 | * and if we hit the offset 0 limit then the next block | |
3175 | * can still overlap too. | |
3176 | */ | |
3177 | if (!eof && gotp->br_startoff != NULLFILEOFF) { | |
3178 | if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || | |
3179 | (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) | |
3180 | nexto = gotp->br_startoff + gotp->br_blockcount; | |
3181 | else | |
3182 | nexto = gotp->br_startoff; | |
3183 | } else | |
3184 | nexto = NULLFILEOFF; | |
3185 | if (!eof && | |
3186 | align_off + align_alen != orig_end && | |
3187 | align_off + align_alen > nexto) | |
3188 | align_off = nexto > align_alen ? nexto - align_alen : 0; | |
3189 | /* | |
3190 | * If we're now overlapping the next or previous extent that | |
3191 | * means we can't fit an extsz piece in this hole. Just move | |
3192 | * the start forward to the first valid spot and set | |
3193 | * the length so we hit the end. | |
3194 | */ | |
3195 | if (align_off != orig_off && align_off < prevo) | |
3196 | align_off = prevo; | |
3197 | if (align_off + align_alen != orig_end && | |
3198 | align_off + align_alen > nexto && | |
3199 | nexto != NULLFILEOFF) { | |
3200 | ASSERT(nexto > prevo); | |
3201 | align_alen = nexto - align_off; | |
3202 | } | |
3203 | ||
3204 | /* | |
3205 | * If realtime, and the result isn't a multiple of the realtime | |
3206 | * extent size we need to remove blocks until it is. | |
3207 | */ | |
3208 | if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { | |
3209 | /* | |
3210 | * We're not covering the original request, or | |
3211 | * we won't be able to once we fix the length. | |
3212 | */ | |
3213 | if (orig_off < align_off || | |
3214 | orig_end > align_off + align_alen || | |
3215 | align_alen - temp < orig_alen) | |
3216 | return -EINVAL; | |
3217 | /* | |
3218 | * Try to fix it by moving the start up. | |
3219 | */ | |
3220 | if (align_off + temp <= orig_off) { | |
3221 | align_alen -= temp; | |
3222 | align_off += temp; | |
3223 | } | |
3224 | /* | |
3225 | * Try to fix it by moving the end in. | |
3226 | */ | |
3227 | else if (align_off + align_alen - temp >= orig_end) | |
3228 | align_alen -= temp; | |
3229 | /* | |
3230 | * Set the start to the minimum then trim the length. | |
3231 | */ | |
3232 | else { | |
3233 | align_alen -= orig_off - align_off; | |
3234 | align_off = orig_off; | |
3235 | align_alen -= align_alen % mp->m_sb.sb_rextsize; | |
3236 | } | |
3237 | /* | |
3238 | * Result doesn't cover the request, fail it. | |
3239 | */ | |
3240 | if (orig_off < align_off || orig_end > align_off + align_alen) | |
3241 | return -EINVAL; | |
3242 | } else { | |
3243 | ASSERT(orig_off >= align_off); | |
3244 | /* see MAXEXTLEN handling above */ | |
3245 | ASSERT(orig_end <= align_off + align_alen || | |
3246 | align_alen + extsz > MAXEXTLEN); | |
3247 | } | |
3248 | ||
3249 | #ifdef DEBUG | |
3250 | if (!eof && gotp->br_startoff != NULLFILEOFF) | |
3251 | ASSERT(align_off + align_alen <= gotp->br_startoff); | |
3252 | if (prevp->br_startoff != NULLFILEOFF) | |
3253 | ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); | |
3254 | #endif | |
3255 | ||
3256 | *lenp = align_alen; | |
3257 | *offp = align_off; | |
3258 | return 0; | |
3259 | } | |
3260 | ||
3261 | #define XFS_ALLOC_GAP_UNITS 4 | |
3262 | ||
3263 | void | |
3264 | xfs_bmap_adjacent( | |
3265 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | |
3266 | { | |
3267 | xfs_fsblock_t adjust; /* adjustment to block numbers */ | |
3268 | xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ | |
3269 | xfs_mount_t *mp; /* mount point structure */ | |
3270 | int nullfb; /* true if ap->firstblock isn't set */ | |
3271 | int rt; /* true if inode is realtime */ | |
3272 | ||
3273 | #define ISVALID(x,y) \ | |
3274 | (rt ? \ | |
3275 | (x) < mp->m_sb.sb_rblocks : \ | |
3276 | XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ | |
3277 | XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ | |
3278 | XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) | |
3279 | ||
3280 | mp = ap->ip->i_mount; | |
3281 | nullfb = *ap->firstblock == NULLFSBLOCK; | |
3282 | rt = XFS_IS_REALTIME_INODE(ap->ip) && | |
3283 | xfs_alloc_is_userdata(ap->datatype); | |
3284 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); | |
3285 | /* | |
3286 | * If allocating at eof, and there's a previous real block, | |
3287 | * try to use its last block as our starting point. | |
3288 | */ | |
3289 | if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && | |
3290 | !isnullstartblock(ap->prev.br_startblock) && | |
3291 | ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, | |
3292 | ap->prev.br_startblock)) { | |
3293 | ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; | |
3294 | /* | |
3295 | * Adjust for the gap between prevp and us. | |
3296 | */ | |
3297 | adjust = ap->offset - | |
3298 | (ap->prev.br_startoff + ap->prev.br_blockcount); | |
3299 | if (adjust && | |
3300 | ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) | |
3301 | ap->blkno += adjust; | |
3302 | } | |
3303 | /* | |
3304 | * If not at eof, then compare the two neighbor blocks. | |
3305 | * Figure out whether either one gives us a good starting point, | |
3306 | * and pick the better one. | |
3307 | */ | |
3308 | else if (!ap->eof) { | |
3309 | xfs_fsblock_t gotbno; /* right side block number */ | |
3310 | xfs_fsblock_t gotdiff=0; /* right side difference */ | |
3311 | xfs_fsblock_t prevbno; /* left side block number */ | |
3312 | xfs_fsblock_t prevdiff=0; /* left side difference */ | |
3313 | ||
3314 | /* | |
3315 | * If there's a previous (left) block, select a requested | |
3316 | * start block based on it. | |
3317 | */ | |
3318 | if (ap->prev.br_startoff != NULLFILEOFF && | |
3319 | !isnullstartblock(ap->prev.br_startblock) && | |
3320 | (prevbno = ap->prev.br_startblock + | |
3321 | ap->prev.br_blockcount) && | |
3322 | ISVALID(prevbno, ap->prev.br_startblock)) { | |
3323 | /* | |
3324 | * Calculate gap to end of previous block. | |
3325 | */ | |
3326 | adjust = prevdiff = ap->offset - | |
3327 | (ap->prev.br_startoff + | |
3328 | ap->prev.br_blockcount); | |
3329 | /* | |
3330 | * Figure the startblock based on the previous block's | |
3331 | * end and the gap size. | |
3332 | * Heuristic! | |
3333 | * If the gap is large relative to the piece we're | |
3334 | * allocating, or using it gives us an invalid block | |
3335 | * number, then just use the end of the previous block. | |
3336 | */ | |
3337 | if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && | |
3338 | ISVALID(prevbno + prevdiff, | |
3339 | ap->prev.br_startblock)) | |
3340 | prevbno += adjust; | |
3341 | else | |
3342 | prevdiff += adjust; | |
3343 | /* | |
3344 | * If the firstblock forbids it, can't use it, | |
3345 | * must use default. | |
3346 | */ | |
3347 | if (!rt && !nullfb && | |
3348 | XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) | |
3349 | prevbno = NULLFSBLOCK; | |
3350 | } | |
3351 | /* | |
3352 | * No previous block or can't follow it, just default. | |
3353 | */ | |
3354 | else | |
3355 | prevbno = NULLFSBLOCK; | |
3356 | /* | |
3357 | * If there's a following (right) block, select a requested | |
3358 | * start block based on it. | |
3359 | */ | |
3360 | if (!isnullstartblock(ap->got.br_startblock)) { | |
3361 | /* | |
3362 | * Calculate gap to start of next block. | |
3363 | */ | |
3364 | adjust = gotdiff = ap->got.br_startoff - ap->offset; | |
3365 | /* | |
3366 | * Figure the startblock based on the next block's | |
3367 | * start and the gap size. | |
3368 | */ | |
3369 | gotbno = ap->got.br_startblock; | |
3370 | /* | |
3371 | * Heuristic! | |
3372 | * If the gap is large relative to the piece we're | |
3373 | * allocating, or using it gives us an invalid block | |
3374 | * number, then just use the start of the next block | |
3375 | * offset by our length. | |
3376 | */ | |
3377 | if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && | |
3378 | ISVALID(gotbno - gotdiff, gotbno)) | |
3379 | gotbno -= adjust; | |
3380 | else if (ISVALID(gotbno - ap->length, gotbno)) { | |
3381 | gotbno -= ap->length; | |
3382 | gotdiff += adjust - ap->length; | |
3383 | } else | |
3384 | gotdiff += adjust; | |
3385 | /* | |
3386 | * If the firstblock forbids it, can't use it, | |
3387 | * must use default. | |
3388 | */ | |
3389 | if (!rt && !nullfb && | |
3390 | XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) | |
3391 | gotbno = NULLFSBLOCK; | |
3392 | } | |
3393 | /* | |
3394 | * No next block, just default. | |
3395 | */ | |
3396 | else | |
3397 | gotbno = NULLFSBLOCK; | |
3398 | /* | |
3399 | * If both valid, pick the better one, else the only good | |
3400 | * one, else ap->blkno is already set (to 0 or the inode block). | |
3401 | */ | |
3402 | if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) | |
3403 | ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; | |
3404 | else if (prevbno != NULLFSBLOCK) | |
3405 | ap->blkno = prevbno; | |
3406 | else if (gotbno != NULLFSBLOCK) | |
3407 | ap->blkno = gotbno; | |
3408 | } | |
3409 | #undef ISVALID | |
3410 | } | |
3411 | ||
3412 | static int | |
3413 | xfs_bmap_longest_free_extent( | |
3414 | struct xfs_trans *tp, | |
3415 | xfs_agnumber_t ag, | |
3416 | xfs_extlen_t *blen, | |
3417 | int *notinit) | |
3418 | { | |
3419 | struct xfs_mount *mp = tp->t_mountp; | |
3420 | struct xfs_perag *pag; | |
3421 | xfs_extlen_t longest; | |
3422 | int error = 0; | |
3423 | ||
3424 | pag = xfs_perag_get(mp, ag); | |
3425 | if (!pag->pagf_init) { | |
3426 | error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); | |
3427 | if (error) | |
3428 | goto out; | |
3429 | ||
3430 | if (!pag->pagf_init) { | |
3431 | *notinit = 1; | |
3432 | goto out; | |
3433 | } | |
3434 | } | |
3435 | ||
3436 | longest = xfs_alloc_longest_free_extent(mp, pag, | |
3437 | xfs_alloc_min_freelist(mp, pag), | |
3438 | xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); | |
3439 | if (*blen < longest) | |
3440 | *blen = longest; | |
3441 | ||
3442 | out: | |
3443 | xfs_perag_put(pag); | |
3444 | return error; | |
3445 | } | |
3446 | ||
3447 | static void | |
3448 | xfs_bmap_select_minlen( | |
3449 | struct xfs_bmalloca *ap, | |
3450 | struct xfs_alloc_arg *args, | |
3451 | xfs_extlen_t *blen, | |
3452 | int notinit) | |
3453 | { | |
3454 | if (notinit || *blen < ap->minlen) { | |
3455 | /* | |
3456 | * Since we did a BUF_TRYLOCK above, it is possible that | |
3457 | * there is space for this request. | |
3458 | */ | |
3459 | args->minlen = ap->minlen; | |
3460 | } else if (*blen < args->maxlen) { | |
3461 | /* | |
3462 | * If the best seen length is less than the request length, | |
3463 | * use the best as the minimum. | |
3464 | */ | |
3465 | args->minlen = *blen; | |
3466 | } else { | |
3467 | /* | |
3468 | * Otherwise we've seen an extent as big as maxlen, use that | |
3469 | * as the minimum. | |
3470 | */ | |
3471 | args->minlen = args->maxlen; | |
3472 | } | |
3473 | } | |
3474 | ||
3475 | STATIC int | |
3476 | xfs_bmap_btalloc_nullfb( | |
3477 | struct xfs_bmalloca *ap, | |
3478 | struct xfs_alloc_arg *args, | |
3479 | xfs_extlen_t *blen) | |
3480 | { | |
3481 | struct xfs_mount *mp = ap->ip->i_mount; | |
3482 | xfs_agnumber_t ag, startag; | |
3483 | int notinit = 0; | |
3484 | int error; | |
3485 | ||
3486 | args->type = XFS_ALLOCTYPE_START_BNO; | |
3487 | args->total = ap->total; | |
3488 | ||
3489 | startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); | |
3490 | if (startag == NULLAGNUMBER) | |
3491 | startag = ag = 0; | |
3492 | ||
3493 | while (*blen < args->maxlen) { | |
3494 | error = xfs_bmap_longest_free_extent(args->tp, ag, blen, | |
3495 | ¬init); | |
3496 | if (error) | |
3497 | return error; | |
3498 | ||
3499 | if (++ag == mp->m_sb.sb_agcount) | |
3500 | ag = 0; | |
3501 | if (ag == startag) | |
3502 | break; | |
3503 | } | |
3504 | ||
3505 | xfs_bmap_select_minlen(ap, args, blen, notinit); | |
3506 | return 0; | |
3507 | } | |
3508 | ||
3509 | STATIC int | |
3510 | xfs_bmap_btalloc_filestreams( | |
3511 | struct xfs_bmalloca *ap, | |
3512 | struct xfs_alloc_arg *args, | |
3513 | xfs_extlen_t *blen) | |
3514 | { | |
3515 | struct xfs_mount *mp = ap->ip->i_mount; | |
3516 | xfs_agnumber_t ag; | |
3517 | int notinit = 0; | |
3518 | int error; | |
3519 | ||
3520 | args->type = XFS_ALLOCTYPE_NEAR_BNO; | |
3521 | args->total = ap->total; | |
3522 | ||
3523 | ag = XFS_FSB_TO_AGNO(mp, args->fsbno); | |
3524 | if (ag == NULLAGNUMBER) | |
3525 | ag = 0; | |
3526 | ||
3527 | error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); | |
3528 | if (error) | |
3529 | return error; | |
3530 | ||
3531 | if (*blen < args->maxlen) { | |
3532 | error = xfs_filestream_new_ag(ap, &ag); | |
3533 | if (error) | |
3534 | return error; | |
3535 | ||
3536 | error = xfs_bmap_longest_free_extent(args->tp, ag, blen, | |
3537 | ¬init); | |
3538 | if (error) | |
3539 | return error; | |
3540 | ||
3541 | } | |
3542 | ||
3543 | xfs_bmap_select_minlen(ap, args, blen, notinit); | |
3544 | ||
3545 | /* | |
3546 | * Set the failure fallback case to look in the selected AG as stream | |
3547 | * may have moved. | |
3548 | */ | |
3549 | ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); | |
3550 | return 0; | |
3551 | } | |
3552 | ||
3553 | STATIC int | |
3554 | xfs_bmap_btalloc( | |
3555 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | |
3556 | { | |
3557 | xfs_mount_t *mp; /* mount point structure */ | |
3558 | xfs_alloctype_t atype = 0; /* type for allocation routines */ | |
3559 | xfs_extlen_t align = 0; /* minimum allocation alignment */ | |
3560 | xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ | |
3561 | xfs_agnumber_t ag; | |
3562 | xfs_alloc_arg_t args; | |
3563 | xfs_extlen_t blen; | |
3564 | xfs_extlen_t nextminlen = 0; | |
3565 | int nullfb; /* true if ap->firstblock isn't set */ | |
3566 | int isaligned; | |
3567 | int tryagain; | |
3568 | int error; | |
3569 | int stripe_align; | |
3570 | ||
3571 | ASSERT(ap->length); | |
3572 | ||
3573 | mp = ap->ip->i_mount; | |
3574 | ||
3575 | /* stripe alignment for allocation is determined by mount parameters */ | |
3576 | stripe_align = 0; | |
3577 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) | |
3578 | stripe_align = mp->m_swidth; | |
3579 | else if (mp->m_dalign) | |
3580 | stripe_align = mp->m_dalign; | |
3581 | ||
3582 | if (ap->flags & XFS_BMAPI_COWFORK) | |
3583 | align = xfs_get_cowextsz_hint(ap->ip); | |
3584 | else if (xfs_alloc_is_userdata(ap->datatype)) | |
3585 | align = xfs_get_extsz_hint(ap->ip); | |
3586 | if (align) { | |
3587 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | |
3588 | align, 0, ap->eof, 0, ap->conv, | |
3589 | &ap->offset, &ap->length); | |
3590 | ASSERT(!error); | |
3591 | ASSERT(ap->length); | |
3592 | } | |
3593 | ||
3594 | ||
3595 | nullfb = *ap->firstblock == NULLFSBLOCK; | |
3596 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); | |
3597 | if (nullfb) { | |
3598 | if (xfs_alloc_is_userdata(ap->datatype) && | |
3599 | xfs_inode_is_filestream(ap->ip)) { | |
3600 | ag = xfs_filestream_lookup_ag(ap->ip); | |
3601 | ag = (ag != NULLAGNUMBER) ? ag : 0; | |
3602 | ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); | |
3603 | } else { | |
3604 | ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); | |
3605 | } | |
3606 | } else | |
3607 | ap->blkno = *ap->firstblock; | |
3608 | ||
3609 | xfs_bmap_adjacent(ap); | |
3610 | ||
3611 | /* | |
3612 | * If allowed, use ap->blkno; otherwise must use firstblock since | |
3613 | * it's in the right allocation group. | |
3614 | */ | |
3615 | if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) | |
3616 | ; | |
3617 | else | |
3618 | ap->blkno = *ap->firstblock; | |
3619 | /* | |
3620 | * Normal allocation, done through xfs_alloc_vextent. | |
3621 | */ | |
3622 | tryagain = isaligned = 0; | |
3623 | memset(&args, 0, sizeof(args)); | |
3624 | args.tp = ap->tp; | |
3625 | args.mp = mp; | |
3626 | args.fsbno = ap->blkno; | |
3627 | xfs_rmap_skip_owner_update(&args.oinfo); | |
3628 | ||
3629 | /* Trim the allocation back to the maximum an AG can fit. */ | |
3630 | args.maxlen = MIN(ap->length, mp->m_ag_max_usable); | |
3631 | args.firstblock = *ap->firstblock; | |
3632 | blen = 0; | |
3633 | if (nullfb) { | |
3634 | /* | |
3635 | * Search for an allocation group with a single extent large | |
3636 | * enough for the request. If one isn't found, then adjust | |
3637 | * the minimum allocation size to the largest space found. | |
3638 | */ | |
3639 | if (xfs_alloc_is_userdata(ap->datatype) && | |
3640 | xfs_inode_is_filestream(ap->ip)) | |
3641 | error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); | |
3642 | else | |
3643 | error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); | |
3644 | if (error) | |
3645 | return error; | |
3646 | } else if (ap->dfops->dop_low) { | |
3647 | if (xfs_inode_is_filestream(ap->ip)) | |
3648 | args.type = XFS_ALLOCTYPE_FIRST_AG; | |
3649 | else | |
3650 | args.type = XFS_ALLOCTYPE_START_BNO; | |
3651 | args.total = args.minlen = ap->minlen; | |
3652 | } else { | |
3653 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | |
3654 | args.total = ap->total; | |
3655 | args.minlen = ap->minlen; | |
3656 | } | |
3657 | /* apply extent size hints if obtained earlier */ | |
3658 | if (align) { | |
3659 | args.prod = align; | |
3660 | if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) | |
3661 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | |
3662 | } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { | |
3663 | args.prod = 1; | |
3664 | args.mod = 0; | |
3665 | } else { | |
3666 | args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; | |
3667 | if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) | |
3668 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | |
3669 | } | |
3670 | /* | |
3671 | * If we are not low on available data blocks, and the | |
3672 | * underlying logical volume manager is a stripe, and | |
3673 | * the file offset is zero then try to allocate data | |
3674 | * blocks on stripe unit boundary. | |
3675 | * NOTE: ap->aeof is only set if the allocation length | |
3676 | * is >= the stripe unit and the allocation offset is | |
3677 | * at the end of file. | |
3678 | */ | |
3679 | if (!ap->dfops->dop_low && ap->aeof) { | |
3680 | if (!ap->offset) { | |
3681 | args.alignment = stripe_align; | |
3682 | atype = args.type; | |
3683 | isaligned = 1; | |
3684 | /* | |
3685 | * Adjust for alignment | |
3686 | */ | |
3687 | if (blen > args.alignment && blen <= args.maxlen) | |
3688 | args.minlen = blen - args.alignment; | |
3689 | args.minalignslop = 0; | |
3690 | } else { | |
3691 | /* | |
3692 | * First try an exact bno allocation. | |
3693 | * If it fails then do a near or start bno | |
3694 | * allocation with alignment turned on. | |
3695 | */ | |
3696 | atype = args.type; | |
3697 | tryagain = 1; | |
3698 | args.type = XFS_ALLOCTYPE_THIS_BNO; | |
3699 | args.alignment = 1; | |
3700 | /* | |
3701 | * Compute the minlen+alignment for the | |
3702 | * next case. Set slop so that the value | |
3703 | * of minlen+alignment+slop doesn't go up | |
3704 | * between the calls. | |
3705 | */ | |
3706 | if (blen > stripe_align && blen <= args.maxlen) | |
3707 | nextminlen = blen - stripe_align; | |
3708 | else | |
3709 | nextminlen = args.minlen; | |
3710 | if (nextminlen + stripe_align > args.minlen + 1) | |
3711 | args.minalignslop = | |
3712 | nextminlen + stripe_align - | |
3713 | args.minlen - 1; | |
3714 | else | |
3715 | args.minalignslop = 0; | |
3716 | } | |
3717 | } else { | |
3718 | args.alignment = 1; | |
3719 | args.minalignslop = 0; | |
3720 | } | |
3721 | args.minleft = ap->minleft; | |
3722 | args.wasdel = ap->wasdel; | |
3723 | args.resv = XFS_AG_RESV_NONE; | |
3724 | args.datatype = ap->datatype; | |
3725 | if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) | |
3726 | args.ip = ap->ip; | |
3727 | ||
3728 | error = xfs_alloc_vextent(&args); | |
3729 | if (error) | |
3730 | return error; | |
3731 | ||
3732 | if (tryagain && args.fsbno == NULLFSBLOCK) { | |
3733 | /* | |
3734 | * Exact allocation failed. Now try with alignment | |
3735 | * turned on. | |
3736 | */ | |
3737 | args.type = atype; | |
3738 | args.fsbno = ap->blkno; | |
3739 | args.alignment = stripe_align; | |
3740 | args.minlen = nextminlen; | |
3741 | args.minalignslop = 0; | |
3742 | isaligned = 1; | |
3743 | if ((error = xfs_alloc_vextent(&args))) | |
3744 | return error; | |
3745 | } | |
3746 | if (isaligned && args.fsbno == NULLFSBLOCK) { | |
3747 | /* | |
3748 | * allocation failed, so turn off alignment and | |
3749 | * try again. | |
3750 | */ | |
3751 | args.type = atype; | |
3752 | args.fsbno = ap->blkno; | |
3753 | args.alignment = 0; | |
3754 | if ((error = xfs_alloc_vextent(&args))) | |
3755 | return error; | |
3756 | } | |
3757 | if (args.fsbno == NULLFSBLOCK && nullfb && | |
3758 | args.minlen > ap->minlen) { | |
3759 | args.minlen = ap->minlen; | |
3760 | args.type = XFS_ALLOCTYPE_START_BNO; | |
3761 | args.fsbno = ap->blkno; | |
3762 | if ((error = xfs_alloc_vextent(&args))) | |
3763 | return error; | |
3764 | } | |
3765 | if (args.fsbno == NULLFSBLOCK && nullfb) { | |
3766 | args.fsbno = 0; | |
3767 | args.type = XFS_ALLOCTYPE_FIRST_AG; | |
3768 | args.total = ap->minlen; | |
3769 | if ((error = xfs_alloc_vextent(&args))) | |
3770 | return error; | |
3771 | ap->dfops->dop_low = true; | |
3772 | } | |
3773 | if (args.fsbno != NULLFSBLOCK) { | |
3774 | /* | |
3775 | * check the allocation happened at the same or higher AG than | |
3776 | * the first block that was allocated. | |
3777 | */ | |
3778 | ASSERT(*ap->firstblock == NULLFSBLOCK || | |
3779 | XFS_FSB_TO_AGNO(mp, *ap->firstblock) <= | |
3780 | XFS_FSB_TO_AGNO(mp, args.fsbno)); | |
3781 | ||
3782 | ap->blkno = args.fsbno; | |
3783 | if (*ap->firstblock == NULLFSBLOCK) | |
3784 | *ap->firstblock = args.fsbno; | |
3785 | ASSERT(nullfb || fb_agno <= args.agno); | |
3786 | ap->length = args.len; | |
3787 | if (!(ap->flags & XFS_BMAPI_COWFORK)) | |
3788 | ap->ip->i_d.di_nblocks += args.len; | |
3789 | xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); | |
3790 | if (ap->wasdel) | |
3791 | ap->ip->i_delayed_blks -= args.len; | |
3792 | /* | |
3793 | * Adjust the disk quota also. This was reserved | |
3794 | * earlier. | |
3795 | */ | |
3796 | xfs_trans_mod_dquot_byino(ap->tp, ap->ip, | |
3797 | ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : | |
3798 | XFS_TRANS_DQ_BCOUNT, | |
3799 | (long) args.len); | |
3800 | } else { | |
3801 | ap->blkno = NULLFSBLOCK; | |
3802 | ap->length = 0; | |
3803 | } | |
3804 | return 0; | |
3805 | } | |
3806 | ||
3807 | /* | |
3808 | * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. | |
3809 | * It figures out where to ask the underlying allocator to put the new extent. | |
3810 | */ | |
3811 | STATIC int | |
3812 | xfs_bmap_alloc( | |
3813 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | |
3814 | { | |
3815 | if (XFS_IS_REALTIME_INODE(ap->ip) && | |
3816 | xfs_alloc_is_userdata(ap->datatype)) | |
3817 | return xfs_bmap_rtalloc(ap); | |
3818 | return xfs_bmap_btalloc(ap); | |
3819 | } | |
3820 | ||
3821 | /* Trim extent to fit a logical block range. */ | |
3822 | void | |
3823 | xfs_trim_extent( | |
3824 | struct xfs_bmbt_irec *irec, | |
3825 | xfs_fileoff_t bno, | |
3826 | xfs_filblks_t len) | |
3827 | { | |
3828 | xfs_fileoff_t distance; | |
3829 | xfs_fileoff_t end = bno + len; | |
3830 | ||
3831 | if (irec->br_startoff + irec->br_blockcount <= bno || | |
3832 | irec->br_startoff >= end) { | |
3833 | irec->br_blockcount = 0; | |
3834 | return; | |
3835 | } | |
3836 | ||
3837 | if (irec->br_startoff < bno) { | |
3838 | distance = bno - irec->br_startoff; | |
3839 | if (isnullstartblock(irec->br_startblock)) | |
3840 | irec->br_startblock = DELAYSTARTBLOCK; | |
3841 | if (irec->br_startblock != DELAYSTARTBLOCK && | |
3842 | irec->br_startblock != HOLESTARTBLOCK) | |
3843 | irec->br_startblock += distance; | |
3844 | irec->br_startoff += distance; | |
3845 | irec->br_blockcount -= distance; | |
3846 | } | |
3847 | ||
3848 | if (end < irec->br_startoff + irec->br_blockcount) { | |
3849 | distance = irec->br_startoff + irec->br_blockcount - end; | |
3850 | irec->br_blockcount -= distance; | |
3851 | } | |
3852 | } | |
3853 | ||
3854 | /* | |
3855 | * Trim the returned map to the required bounds | |
3856 | */ | |
3857 | STATIC void | |
3858 | xfs_bmapi_trim_map( | |
3859 | struct xfs_bmbt_irec *mval, | |
3860 | struct xfs_bmbt_irec *got, | |
3861 | xfs_fileoff_t *bno, | |
3862 | xfs_filblks_t len, | |
3863 | xfs_fileoff_t obno, | |
3864 | xfs_fileoff_t end, | |
3865 | int n, | |
3866 | int flags) | |
3867 | { | |
3868 | if ((flags & XFS_BMAPI_ENTIRE) || | |
3869 | got->br_startoff + got->br_blockcount <= obno) { | |
3870 | *mval = *got; | |
3871 | if (isnullstartblock(got->br_startblock)) | |
3872 | mval->br_startblock = DELAYSTARTBLOCK; | |
3873 | return; | |
3874 | } | |
3875 | ||
3876 | if (obno > *bno) | |
3877 | *bno = obno; | |
3878 | ASSERT((*bno >= obno) || (n == 0)); | |
3879 | ASSERT(*bno < end); | |
3880 | mval->br_startoff = *bno; | |
3881 | if (isnullstartblock(got->br_startblock)) | |
3882 | mval->br_startblock = DELAYSTARTBLOCK; | |
3883 | else | |
3884 | mval->br_startblock = got->br_startblock + | |
3885 | (*bno - got->br_startoff); | |
3886 | /* | |
3887 | * Return the minimum of what we got and what we asked for for | |
3888 | * the length. We can use the len variable here because it is | |
3889 | * modified below and we could have been there before coming | |
3890 | * here if the first part of the allocation didn't overlap what | |
3891 | * was asked for. | |
3892 | */ | |
3893 | mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, | |
3894 | got->br_blockcount - (*bno - got->br_startoff)); | |
3895 | mval->br_state = got->br_state; | |
3896 | ASSERT(mval->br_blockcount <= len); | |
3897 | return; | |
3898 | } | |
3899 | ||
3900 | /* | |
3901 | * Update and validate the extent map to return | |
3902 | */ | |
3903 | STATIC void | |
3904 | xfs_bmapi_update_map( | |
3905 | struct xfs_bmbt_irec **map, | |
3906 | xfs_fileoff_t *bno, | |
3907 | xfs_filblks_t *len, | |
3908 | xfs_fileoff_t obno, | |
3909 | xfs_fileoff_t end, | |
3910 | int *n, | |
3911 | int flags) | |
3912 | { | |
3913 | xfs_bmbt_irec_t *mval = *map; | |
3914 | ||
3915 | ASSERT((flags & XFS_BMAPI_ENTIRE) || | |
3916 | ((mval->br_startoff + mval->br_blockcount) <= end)); | |
3917 | ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || | |
3918 | (mval->br_startoff < obno)); | |
3919 | ||
3920 | *bno = mval->br_startoff + mval->br_blockcount; | |
3921 | *len = end - *bno; | |
3922 | if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { | |
3923 | /* update previous map with new information */ | |
3924 | ASSERT(mval->br_startblock == mval[-1].br_startblock); | |
3925 | ASSERT(mval->br_blockcount > mval[-1].br_blockcount); | |
3926 | ASSERT(mval->br_state == mval[-1].br_state); | |
3927 | mval[-1].br_blockcount = mval->br_blockcount; | |
3928 | mval[-1].br_state = mval->br_state; | |
3929 | } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && | |
3930 | mval[-1].br_startblock != DELAYSTARTBLOCK && | |
3931 | mval[-1].br_startblock != HOLESTARTBLOCK && | |
3932 | mval->br_startblock == mval[-1].br_startblock + | |
3933 | mval[-1].br_blockcount && | |
3934 | ((flags & XFS_BMAPI_IGSTATE) || | |
3935 | mval[-1].br_state == mval->br_state)) { | |
3936 | ASSERT(mval->br_startoff == | |
3937 | mval[-1].br_startoff + mval[-1].br_blockcount); | |
3938 | mval[-1].br_blockcount += mval->br_blockcount; | |
3939 | } else if (*n > 0 && | |
3940 | mval->br_startblock == DELAYSTARTBLOCK && | |
3941 | mval[-1].br_startblock == DELAYSTARTBLOCK && | |
3942 | mval->br_startoff == | |
3943 | mval[-1].br_startoff + mval[-1].br_blockcount) { | |
3944 | mval[-1].br_blockcount += mval->br_blockcount; | |
3945 | mval[-1].br_state = mval->br_state; | |
3946 | } else if (!((*n == 0) && | |
3947 | ((mval->br_startoff + mval->br_blockcount) <= | |
3948 | obno))) { | |
3949 | mval++; | |
3950 | (*n)++; | |
3951 | } | |
3952 | *map = mval; | |
3953 | } | |
3954 | ||
3955 | /* | |
3956 | * Map file blocks to filesystem blocks without allocation. | |
3957 | */ | |
3958 | int | |
3959 | xfs_bmapi_read( | |
3960 | struct xfs_inode *ip, | |
3961 | xfs_fileoff_t bno, | |
3962 | xfs_filblks_t len, | |
3963 | struct xfs_bmbt_irec *mval, | |
3964 | int *nmap, | |
3965 | int flags) | |
3966 | { | |
3967 | struct xfs_mount *mp = ip->i_mount; | |
3968 | struct xfs_ifork *ifp; | |
3969 | struct xfs_bmbt_irec got; | |
3970 | xfs_fileoff_t obno; | |
3971 | xfs_fileoff_t end; | |
3972 | xfs_extnum_t idx; | |
3973 | int error; | |
3974 | bool eof = false; | |
3975 | int n = 0; | |
3976 | int whichfork = xfs_bmapi_whichfork(flags); | |
3977 | ||
3978 | ASSERT(*nmap >= 1); | |
3979 | ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| | |
3980 | XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK))); | |
3981 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); | |
3982 | ||
3983 | if (unlikely(XFS_TEST_ERROR( | |
3984 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | |
3985 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | |
3986 | mp, XFS_ERRTAG_BMAPIFORMAT))) { | |
3987 | XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); | |
3988 | return -EFSCORRUPTED; | |
3989 | } | |
3990 | ||
3991 | if (XFS_FORCED_SHUTDOWN(mp)) | |
3992 | return -EIO; | |
3993 | ||
3994 | XFS_STATS_INC(mp, xs_blk_mapr); | |
3995 | ||
3996 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
3997 | ||
3998 | /* No CoW fork? Return a hole. */ | |
3999 | if (whichfork == XFS_COW_FORK && !ifp) { | |
4000 | mval->br_startoff = bno; | |
4001 | mval->br_startblock = HOLESTARTBLOCK; | |
4002 | mval->br_blockcount = len; | |
4003 | mval->br_state = XFS_EXT_NORM; | |
4004 | *nmap = 1; | |
4005 | return 0; | |
4006 | } | |
4007 | ||
4008 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
4009 | error = xfs_iread_extents(NULL, ip, whichfork); | |
4010 | if (error) | |
4011 | return error; | |
4012 | } | |
4013 | ||
4014 | if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) | |
4015 | eof = true; | |
4016 | end = bno + len; | |
4017 | obno = bno; | |
4018 | ||
4019 | while (bno < end && n < *nmap) { | |
4020 | /* Reading past eof, act as though there's a hole up to end. */ | |
4021 | if (eof) | |
4022 | got.br_startoff = end; | |
4023 | if (got.br_startoff > bno) { | |
4024 | /* Reading in a hole. */ | |
4025 | mval->br_startoff = bno; | |
4026 | mval->br_startblock = HOLESTARTBLOCK; | |
4027 | mval->br_blockcount = | |
4028 | XFS_FILBLKS_MIN(len, got.br_startoff - bno); | |
4029 | mval->br_state = XFS_EXT_NORM; | |
4030 | bno += mval->br_blockcount; | |
4031 | len -= mval->br_blockcount; | |
4032 | mval++; | |
4033 | n++; | |
4034 | continue; | |
4035 | } | |
4036 | ||
4037 | /* set up the extent map to return. */ | |
4038 | xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); | |
4039 | xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); | |
4040 | ||
4041 | /* If we're done, stop now. */ | |
4042 | if (bno >= end || n >= *nmap) | |
4043 | break; | |
4044 | ||
4045 | /* Else go on to the next record. */ | |
4046 | if (!xfs_iext_get_extent(ifp, ++idx, &got)) | |
4047 | eof = true; | |
4048 | } | |
4049 | *nmap = n; | |
4050 | return 0; | |
4051 | } | |
4052 | ||
4053 | /* | |
4054 | * Add a delayed allocation extent to an inode. Blocks are reserved from the | |
4055 | * global pool and the extent inserted into the inode in-core extent tree. | |
4056 | * | |
4057 | * On entry, got refers to the first extent beyond the offset of the extent to | |
4058 | * allocate or eof is specified if no such extent exists. On return, got refers | |
4059 | * to the extent record that was inserted to the inode fork. | |
4060 | * | |
4061 | * Note that the allocated extent may have been merged with contiguous extents | |
4062 | * during insertion into the inode fork. Thus, got does not reflect the current | |
4063 | * state of the inode fork on return. If necessary, the caller can use lastx to | |
4064 | * look up the updated record in the inode fork. | |
4065 | */ | |
4066 | int | |
4067 | xfs_bmapi_reserve_delalloc( | |
4068 | struct xfs_inode *ip, | |
4069 | int whichfork, | |
4070 | xfs_fileoff_t off, | |
4071 | xfs_filblks_t len, | |
4072 | xfs_filblks_t prealloc, | |
4073 | struct xfs_bmbt_irec *got, | |
4074 | xfs_extnum_t *lastx, | |
4075 | int eof) | |
4076 | { | |
4077 | struct xfs_mount *mp = ip->i_mount; | |
4078 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
4079 | xfs_extlen_t alen; | |
4080 | xfs_extlen_t indlen; | |
4081 | char rt = XFS_IS_REALTIME_INODE(ip); | |
4082 | xfs_extlen_t extsz; | |
4083 | int error; | |
4084 | xfs_fileoff_t aoff = off; | |
4085 | ||
4086 | /* | |
4087 | * Cap the alloc length. Keep track of prealloc so we know whether to | |
4088 | * tag the inode before we return. | |
4089 | */ | |
4090 | alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); | |
4091 | if (!eof) | |
4092 | alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); | |
4093 | if (prealloc && alen >= len) | |
4094 | prealloc = alen - len; | |
4095 | ||
4096 | /* Figure out the extent size, adjust alen */ | |
4097 | if (whichfork == XFS_COW_FORK) | |
4098 | extsz = xfs_get_cowextsz_hint(ip); | |
4099 | else | |
4100 | extsz = xfs_get_extsz_hint(ip); | |
4101 | if (extsz) { | |
4102 | struct xfs_bmbt_irec prev; | |
4103 | ||
4104 | if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev)) | |
4105 | prev.br_startoff = NULLFILEOFF; | |
4106 | ||
4107 | error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof, | |
4108 | 1, 0, &aoff, &alen); | |
4109 | ASSERT(!error); | |
4110 | } | |
4111 | ||
4112 | if (rt) | |
4113 | extsz = alen / mp->m_sb.sb_rextsize; | |
4114 | ||
4115 | /* | |
4116 | * Make a transaction-less quota reservation for delayed allocation | |
4117 | * blocks. This number gets adjusted later. We return if we haven't | |
4118 | * allocated blocks already inside this loop. | |
4119 | */ | |
4120 | error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, | |
4121 | rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); | |
4122 | if (error) | |
4123 | return error; | |
4124 | ||
4125 | /* | |
4126 | * Split changing sb for alen and indlen since they could be coming | |
4127 | * from different places. | |
4128 | */ | |
4129 | indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); | |
4130 | ASSERT(indlen > 0); | |
4131 | ||
4132 | if (rt) { | |
4133 | error = xfs_mod_frextents(mp, -((int64_t)extsz)); | |
4134 | } else { | |
4135 | error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); | |
4136 | } | |
4137 | ||
4138 | if (error) | |
4139 | goto out_unreserve_quota; | |
4140 | ||
4141 | error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); | |
4142 | if (error) | |
4143 | goto out_unreserve_blocks; | |
4144 | ||
4145 | ||
4146 | ip->i_delayed_blks += alen; | |
4147 | ||
4148 | got->br_startoff = aoff; | |
4149 | got->br_startblock = nullstartblock(indlen); | |
4150 | got->br_blockcount = alen; | |
4151 | got->br_state = XFS_EXT_NORM; | |
4152 | ||
4153 | xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); | |
4154 | ||
4155 | /* | |
4156 | * Tag the inode if blocks were preallocated. Note that COW fork | |
4157 | * preallocation can occur at the start or end of the extent, even when | |
4158 | * prealloc == 0, so we must also check the aligned offset and length. | |
4159 | */ | |
4160 | if (whichfork == XFS_DATA_FORK && prealloc) | |
4161 | xfs_inode_set_eofblocks_tag(ip); | |
4162 | if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) | |
4163 | xfs_inode_set_cowblocks_tag(ip); | |
4164 | ||
4165 | return 0; | |
4166 | ||
4167 | out_unreserve_blocks: | |
4168 | if (rt) | |
4169 | xfs_mod_frextents(mp, extsz); | |
4170 | else | |
4171 | xfs_mod_fdblocks(mp, alen, false); | |
4172 | out_unreserve_quota: | |
4173 | if (XFS_IS_QUOTA_ON(mp)) | |
4174 | xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? | |
4175 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); | |
4176 | return error; | |
4177 | } | |
4178 | ||
4179 | static int | |
4180 | xfs_bmapi_allocate( | |
4181 | struct xfs_bmalloca *bma) | |
4182 | { | |
4183 | struct xfs_mount *mp = bma->ip->i_mount; | |
4184 | int whichfork = xfs_bmapi_whichfork(bma->flags); | |
4185 | struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); | |
4186 | int tmp_logflags = 0; | |
4187 | int error; | |
4188 | ||
4189 | ASSERT(bma->length > 0); | |
4190 | ||
4191 | /* | |
4192 | * For the wasdelay case, we could also just allocate the stuff asked | |
4193 | * for in this bmap call but that wouldn't be as good. | |
4194 | */ | |
4195 | if (bma->wasdel) { | |
4196 | bma->length = (xfs_extlen_t)bma->got.br_blockcount; | |
4197 | bma->offset = bma->got.br_startoff; | |
4198 | if (bma->idx) { | |
4199 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), | |
4200 | &bma->prev); | |
4201 | } | |
4202 | } else { | |
4203 | bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); | |
4204 | if (!bma->eof) | |
4205 | bma->length = XFS_FILBLKS_MIN(bma->length, | |
4206 | bma->got.br_startoff - bma->offset); | |
4207 | } | |
4208 | ||
4209 | /* | |
4210 | * Set the data type being allocated. For the data fork, the first data | |
4211 | * in the file is treated differently to all other allocations. For the | |
4212 | * attribute fork, we only need to ensure the allocated range is not on | |
4213 | * the busy list. | |
4214 | */ | |
4215 | if (!(bma->flags & XFS_BMAPI_METADATA)) { | |
4216 | bma->datatype = XFS_ALLOC_NOBUSY; | |
4217 | if (whichfork == XFS_DATA_FORK) { | |
4218 | if (bma->offset == 0) | |
4219 | bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; | |
4220 | else | |
4221 | bma->datatype |= XFS_ALLOC_USERDATA; | |
4222 | } | |
4223 | if (bma->flags & XFS_BMAPI_ZERO) | |
4224 | bma->datatype |= XFS_ALLOC_USERDATA_ZERO; | |
4225 | } | |
4226 | ||
4227 | bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; | |
4228 | ||
4229 | /* | |
4230 | * Only want to do the alignment at the eof if it is userdata and | |
4231 | * allocation length is larger than a stripe unit. | |
4232 | */ | |
4233 | if (mp->m_dalign && bma->length >= mp->m_dalign && | |
4234 | !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { | |
4235 | error = xfs_bmap_isaeof(bma, whichfork); | |
4236 | if (error) | |
4237 | return error; | |
4238 | } | |
4239 | ||
4240 | error = xfs_bmap_alloc(bma); | |
4241 | if (error) | |
4242 | return error; | |
4243 | ||
4244 | if (bma->cur) | |
4245 | bma->cur->bc_private.b.firstblock = *bma->firstblock; | |
4246 | if (bma->blkno == NULLFSBLOCK) | |
4247 | return 0; | |
4248 | if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { | |
4249 | bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); | |
4250 | bma->cur->bc_private.b.firstblock = *bma->firstblock; | |
4251 | bma->cur->bc_private.b.dfops = bma->dfops; | |
4252 | } | |
4253 | /* | |
4254 | * Bump the number of extents we've allocated | |
4255 | * in this call. | |
4256 | */ | |
4257 | bma->nallocs++; | |
4258 | ||
4259 | if (bma->cur) | |
4260 | bma->cur->bc_private.b.flags = | |
4261 | bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; | |
4262 | ||
4263 | bma->got.br_startoff = bma->offset; | |
4264 | bma->got.br_startblock = bma->blkno; | |
4265 | bma->got.br_blockcount = bma->length; | |
4266 | bma->got.br_state = XFS_EXT_NORM; | |
4267 | ||
4268 | /* | |
4269 | * In the data fork, a wasdelay extent has been initialized, so | |
4270 | * shouldn't be flagged as unwritten. | |
4271 | * | |
4272 | * For the cow fork, however, we convert delalloc reservations | |
4273 | * (extents allocated for speculative preallocation) to | |
4274 | * allocated unwritten extents, and only convert the unwritten | |
4275 | * extents to real extents when we're about to write the data. | |
4276 | */ | |
4277 | if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && | |
4278 | (bma->flags & XFS_BMAPI_PREALLOC) && | |
4279 | xfs_sb_version_hasextflgbit(&mp->m_sb)) | |
4280 | bma->got.br_state = XFS_EXT_UNWRITTEN; | |
4281 | ||
4282 | if (bma->wasdel) | |
4283 | error = xfs_bmap_add_extent_delay_real(bma, whichfork); | |
4284 | else | |
4285 | error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, | |
4286 | whichfork, &bma->idx, &bma->cur, &bma->got, | |
4287 | bma->firstblock, bma->dfops, &bma->logflags); | |
4288 | ||
4289 | bma->logflags |= tmp_logflags; | |
4290 | if (error) | |
4291 | return error; | |
4292 | ||
4293 | /* | |
4294 | * Update our extent pointer, given that xfs_bmap_add_extent_delay_real | |
4295 | * or xfs_bmap_add_extent_hole_real might have merged it into one of | |
4296 | * the neighbouring ones. | |
4297 | */ | |
4298 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); | |
4299 | ||
4300 | ASSERT(bma->got.br_startoff <= bma->offset); | |
4301 | ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= | |
4302 | bma->offset + bma->length); | |
4303 | ASSERT(bma->got.br_state == XFS_EXT_NORM || | |
4304 | bma->got.br_state == XFS_EXT_UNWRITTEN); | |
4305 | return 0; | |
4306 | } | |
4307 | ||
4308 | STATIC int | |
4309 | xfs_bmapi_convert_unwritten( | |
4310 | struct xfs_bmalloca *bma, | |
4311 | struct xfs_bmbt_irec *mval, | |
4312 | xfs_filblks_t len, | |
4313 | int flags) | |
4314 | { | |
4315 | int whichfork = xfs_bmapi_whichfork(flags); | |
4316 | struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); | |
4317 | int tmp_logflags = 0; | |
4318 | int error; | |
4319 | ||
4320 | /* check if we need to do unwritten->real conversion */ | |
4321 | if (mval->br_state == XFS_EXT_UNWRITTEN && | |
4322 | (flags & XFS_BMAPI_PREALLOC)) | |
4323 | return 0; | |
4324 | ||
4325 | /* check if we need to do real->unwritten conversion */ | |
4326 | if (mval->br_state == XFS_EXT_NORM && | |
4327 | (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != | |
4328 | (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) | |
4329 | return 0; | |
4330 | ||
4331 | /* | |
4332 | * Modify (by adding) the state flag, if writing. | |
4333 | */ | |
4334 | ASSERT(mval->br_blockcount <= len); | |
4335 | if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { | |
4336 | bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, | |
4337 | bma->ip, whichfork); | |
4338 | bma->cur->bc_private.b.firstblock = *bma->firstblock; | |
4339 | bma->cur->bc_private.b.dfops = bma->dfops; | |
4340 | } | |
4341 | mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) | |
4342 | ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; | |
4343 | ||
4344 | /* | |
4345 | * Before insertion into the bmbt, zero the range being converted | |
4346 | * if required. | |
4347 | */ | |
4348 | if (flags & XFS_BMAPI_ZERO) { | |
4349 | error = xfs_zero_extent(bma->ip, mval->br_startblock, | |
4350 | mval->br_blockcount); | |
4351 | if (error) | |
4352 | return error; | |
4353 | } | |
4354 | ||
4355 | error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, | |
4356 | &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops, | |
4357 | &tmp_logflags); | |
4358 | /* | |
4359 | * Log the inode core unconditionally in the unwritten extent conversion | |
4360 | * path because the conversion might not have done so (e.g., if the | |
4361 | * extent count hasn't changed). We need to make sure the inode is dirty | |
4362 | * in the transaction for the sake of fsync(), even if nothing has | |
4363 | * changed, because fsync() will not force the log for this transaction | |
4364 | * unless it sees the inode pinned. | |
4365 | * | |
4366 | * Note: If we're only converting cow fork extents, there aren't | |
4367 | * any on-disk updates to make, so we don't need to log anything. | |
4368 | */ | |
4369 | if (whichfork != XFS_COW_FORK) | |
4370 | bma->logflags |= tmp_logflags | XFS_ILOG_CORE; | |
4371 | if (error) | |
4372 | return error; | |
4373 | ||
4374 | /* | |
4375 | * Update our extent pointer, given that | |
4376 | * xfs_bmap_add_extent_unwritten_real might have merged it into one | |
4377 | * of the neighbouring ones. | |
4378 | */ | |
4379 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); | |
4380 | ||
4381 | /* | |
4382 | * We may have combined previously unwritten space with written space, | |
4383 | * so generate another request. | |
4384 | */ | |
4385 | if (mval->br_blockcount < len) | |
4386 | return -EAGAIN; | |
4387 | return 0; | |
4388 | } | |
4389 | ||
4390 | /* | |
4391 | * Map file blocks to filesystem blocks, and allocate blocks or convert the | |
4392 | * extent state if necessary. Details behaviour is controlled by the flags | |
4393 | * parameter. Only allocates blocks from a single allocation group, to avoid | |
4394 | * locking problems. | |
4395 | * | |
4396 | * The returned value in "firstblock" from the first call in a transaction | |
4397 | * must be remembered and presented to subsequent calls in "firstblock". | |
4398 | * An upper bound for the number of blocks to be allocated is supplied to | |
4399 | * the first call in "total"; if no allocation group has that many free | |
4400 | * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). | |
4401 | */ | |
4402 | int | |
4403 | xfs_bmapi_write( | |
4404 | struct xfs_trans *tp, /* transaction pointer */ | |
4405 | struct xfs_inode *ip, /* incore inode */ | |
4406 | xfs_fileoff_t bno, /* starting file offs. mapped */ | |
4407 | xfs_filblks_t len, /* length to map in file */ | |
4408 | int flags, /* XFS_BMAPI_... */ | |
4409 | xfs_fsblock_t *firstblock, /* first allocated block | |
4410 | controls a.g. for allocs */ | |
4411 | xfs_extlen_t total, /* total blocks needed */ | |
4412 | struct xfs_bmbt_irec *mval, /* output: map values */ | |
4413 | int *nmap, /* i/o: mval size/count */ | |
4414 | struct xfs_defer_ops *dfops) /* i/o: list extents to free */ | |
4415 | { | |
4416 | struct xfs_mount *mp = ip->i_mount; | |
4417 | struct xfs_ifork *ifp; | |
4418 | struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ | |
4419 | xfs_fileoff_t end; /* end of mapped file region */ | |
4420 | bool eof = false; /* after the end of extents */ | |
4421 | int error; /* error return */ | |
4422 | int n; /* current extent index */ | |
4423 | xfs_fileoff_t obno; /* old block number (offset) */ | |
4424 | int whichfork; /* data or attr fork */ | |
4425 | ||
4426 | #ifdef DEBUG | |
4427 | xfs_fileoff_t orig_bno; /* original block number value */ | |
4428 | int orig_flags; /* original flags arg value */ | |
4429 | xfs_filblks_t orig_len; /* original value of len arg */ | |
4430 | struct xfs_bmbt_irec *orig_mval; /* original value of mval */ | |
4431 | int orig_nmap; /* original value of *nmap */ | |
4432 | ||
4433 | orig_bno = bno; | |
4434 | orig_len = len; | |
4435 | orig_flags = flags; | |
4436 | orig_mval = mval; | |
4437 | orig_nmap = *nmap; | |
4438 | #endif | |
4439 | whichfork = xfs_bmapi_whichfork(flags); | |
4440 | ||
4441 | ASSERT(*nmap >= 1); | |
4442 | ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); | |
4443 | ASSERT(!(flags & XFS_BMAPI_IGSTATE)); | |
4444 | ASSERT(tp != NULL || | |
4445 | (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) == | |
4446 | (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)); | |
4447 | ASSERT(len > 0); | |
4448 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); | |
4449 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
4450 | ASSERT(!(flags & XFS_BMAPI_REMAP)); | |
4451 | ||
4452 | /* zeroing is for currently only for data extents, not metadata */ | |
4453 | ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != | |
4454 | (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); | |
4455 | /* | |
4456 | * we can allocate unwritten extents or pre-zero allocated blocks, | |
4457 | * but it makes no sense to do both at once. This would result in | |
4458 | * zeroing the unwritten extent twice, but it still being an | |
4459 | * unwritten extent.... | |
4460 | */ | |
4461 | ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != | |
4462 | (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); | |
4463 | ||
4464 | if (unlikely(XFS_TEST_ERROR( | |
4465 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | |
4466 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | |
4467 | mp, XFS_ERRTAG_BMAPIFORMAT))) { | |
4468 | XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); | |
4469 | return -EFSCORRUPTED; | |
4470 | } | |
4471 | ||
4472 | if (XFS_FORCED_SHUTDOWN(mp)) | |
4473 | return -EIO; | |
4474 | ||
4475 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
4476 | ||
4477 | XFS_STATS_INC(mp, xs_blk_mapw); | |
4478 | ||
4479 | if (*firstblock == NULLFSBLOCK) { | |
4480 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) | |
4481 | bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; | |
4482 | else | |
4483 | bma.minleft = 1; | |
4484 | } else { | |
4485 | bma.minleft = 0; | |
4486 | } | |
4487 | ||
4488 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
4489 | error = xfs_iread_extents(tp, ip, whichfork); | |
4490 | if (error) | |
4491 | goto error0; | |
4492 | } | |
4493 | ||
4494 | n = 0; | |
4495 | end = bno + len; | |
4496 | obno = bno; | |
4497 | ||
4498 | if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got)) | |
4499 | eof = true; | |
4500 | if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev)) | |
4501 | bma.prev.br_startoff = NULLFILEOFF; | |
4502 | bma.tp = tp; | |
4503 | bma.ip = ip; | |
4504 | bma.total = total; | |
4505 | bma.datatype = 0; | |
4506 | bma.dfops = dfops; | |
4507 | bma.firstblock = firstblock; | |
4508 | ||
4509 | while (bno < end && n < *nmap) { | |
4510 | bool need_alloc = false, wasdelay = false; | |
4511 | ||
4512 | /* in hole or beyoned EOF? */ | |
4513 | if (eof || bma.got.br_startoff > bno) { | |
4514 | if (flags & XFS_BMAPI_DELALLOC) { | |
4515 | /* | |
4516 | * For the COW fork we can reasonably get a | |
4517 | * request for converting an extent that races | |
4518 | * with other threads already having converted | |
4519 | * part of it, as there converting COW to | |
4520 | * regular blocks is not protected using the | |
4521 | * IOLOCK. | |
4522 | */ | |
4523 | ASSERT(flags & XFS_BMAPI_COWFORK); | |
4524 | if (!(flags & XFS_BMAPI_COWFORK)) { | |
4525 | error = -EIO; | |
4526 | goto error0; | |
4527 | } | |
4528 | ||
4529 | if (eof || bno >= end) | |
4530 | break; | |
4531 | } else { | |
4532 | need_alloc = true; | |
4533 | } | |
4534 | } else if (isnullstartblock(bma.got.br_startblock)) { | |
4535 | wasdelay = true; | |
4536 | } | |
4537 | ||
4538 | /* | |
4539 | * First, deal with the hole before the allocated space | |
4540 | * that we found, if any. | |
4541 | */ | |
4542 | if (need_alloc || wasdelay) { | |
4543 | bma.eof = eof; | |
4544 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); | |
4545 | bma.wasdel = wasdelay; | |
4546 | bma.offset = bno; | |
4547 | bma.flags = flags; | |
4548 | ||
4549 | /* | |
4550 | * There's a 32/64 bit type mismatch between the | |
4551 | * allocation length request (which can be 64 bits in | |
4552 | * length) and the bma length request, which is | |
4553 | * xfs_extlen_t and therefore 32 bits. Hence we have to | |
4554 | * check for 32-bit overflows and handle them here. | |
4555 | */ | |
4556 | if (len > (xfs_filblks_t)MAXEXTLEN) | |
4557 | bma.length = MAXEXTLEN; | |
4558 | else | |
4559 | bma.length = len; | |
4560 | ||
4561 | ASSERT(len > 0); | |
4562 | ASSERT(bma.length > 0); | |
4563 | error = xfs_bmapi_allocate(&bma); | |
4564 | if (error) | |
4565 | goto error0; | |
4566 | if (bma.blkno == NULLFSBLOCK) | |
4567 | break; | |
4568 | ||
4569 | /* | |
4570 | * If this is a CoW allocation, record the data in | |
4571 | * the refcount btree for orphan recovery. | |
4572 | */ | |
4573 | if (whichfork == XFS_COW_FORK) { | |
4574 | error = xfs_refcount_alloc_cow_extent(mp, dfops, | |
4575 | bma.blkno, bma.length); | |
4576 | if (error) | |
4577 | goto error0; | |
4578 | } | |
4579 | } | |
4580 | ||
4581 | /* Deal with the allocated space we found. */ | |
4582 | xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, | |
4583 | end, n, flags); | |
4584 | ||
4585 | /* Execute unwritten extent conversion if necessary */ | |
4586 | error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); | |
4587 | if (error == -EAGAIN) | |
4588 | continue; | |
4589 | if (error) | |
4590 | goto error0; | |
4591 | ||
4592 | /* update the extent map to return */ | |
4593 | xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); | |
4594 | ||
4595 | /* | |
4596 | * If we're done, stop now. Stop when we've allocated | |
4597 | * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise | |
4598 | * the transaction may get too big. | |
4599 | */ | |
4600 | if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) | |
4601 | break; | |
4602 | ||
4603 | /* Else go on to the next record. */ | |
4604 | bma.prev = bma.got; | |
4605 | if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got)) | |
4606 | eof = true; | |
4607 | } | |
4608 | *nmap = n; | |
4609 | ||
4610 | /* | |
4611 | * Transform from btree to extents, give it cur. | |
4612 | */ | |
4613 | if (xfs_bmap_wants_extents(ip, whichfork)) { | |
4614 | int tmp_logflags = 0; | |
4615 | ||
4616 | ASSERT(bma.cur); | |
4617 | error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, | |
4618 | &tmp_logflags, whichfork); | |
4619 | bma.logflags |= tmp_logflags; | |
4620 | if (error) | |
4621 | goto error0; | |
4622 | } | |
4623 | ||
4624 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || | |
4625 | XFS_IFORK_NEXTENTS(ip, whichfork) > | |
4626 | XFS_IFORK_MAXEXT(ip, whichfork)); | |
4627 | error = 0; | |
4628 | error0: | |
4629 | /* | |
4630 | * Log everything. Do this after conversion, there's no point in | |
4631 | * logging the extent records if we've converted to btree format. | |
4632 | */ | |
4633 | if ((bma.logflags & xfs_ilog_fext(whichfork)) && | |
4634 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | |
4635 | bma.logflags &= ~xfs_ilog_fext(whichfork); | |
4636 | else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && | |
4637 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) | |
4638 | bma.logflags &= ~xfs_ilog_fbroot(whichfork); | |
4639 | /* | |
4640 | * Log whatever the flags say, even if error. Otherwise we might miss | |
4641 | * detecting a case where the data is changed, there's an error, | |
4642 | * and it's not logged so we don't shutdown when we should. | |
4643 | */ | |
4644 | if (bma.logflags) | |
4645 | xfs_trans_log_inode(tp, ip, bma.logflags); | |
4646 | ||
4647 | if (bma.cur) { | |
4648 | if (!error) { | |
4649 | ASSERT(*firstblock == NULLFSBLOCK || | |
4650 | XFS_FSB_TO_AGNO(mp, *firstblock) <= | |
4651 | XFS_FSB_TO_AGNO(mp, | |
4652 | bma.cur->bc_private.b.firstblock)); | |
4653 | *firstblock = bma.cur->bc_private.b.firstblock; | |
4654 | } | |
4655 | xfs_btree_del_cursor(bma.cur, | |
4656 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
4657 | } | |
4658 | if (!error) | |
4659 | xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, | |
4660 | orig_nmap, *nmap); | |
4661 | return error; | |
4662 | } | |
4663 | ||
4664 | static int | |
4665 | xfs_bmapi_remap( | |
4666 | struct xfs_trans *tp, | |
4667 | struct xfs_inode *ip, | |
4668 | xfs_fileoff_t bno, | |
4669 | xfs_filblks_t len, | |
4670 | xfs_fsblock_t startblock, | |
4671 | struct xfs_defer_ops *dfops) | |
4672 | { | |
4673 | struct xfs_mount *mp = ip->i_mount; | |
4674 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | |
4675 | struct xfs_btree_cur *cur = NULL; | |
4676 | xfs_fsblock_t firstblock = NULLFSBLOCK; | |
4677 | struct xfs_bmbt_irec got; | |
4678 | xfs_extnum_t idx; | |
4679 | int logflags = 0, error; | |
4680 | ||
4681 | ASSERT(len > 0); | |
4682 | ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); | |
4683 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
4684 | ||
4685 | if (unlikely(XFS_TEST_ERROR( | |
4686 | (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && | |
4687 | XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), | |
4688 | mp, XFS_ERRTAG_BMAPIFORMAT))) { | |
4689 | XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); | |
4690 | return -EFSCORRUPTED; | |
4691 | } | |
4692 | ||
4693 | if (XFS_FORCED_SHUTDOWN(mp)) | |
4694 | return -EIO; | |
4695 | ||
4696 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
4697 | error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); | |
4698 | if (error) | |
4699 | return error; | |
4700 | } | |
4701 | ||
4702 | if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) { | |
4703 | /* make sure we only reflink into a hole. */ | |
4704 | ASSERT(got.br_startoff > bno); | |
4705 | ASSERT(got.br_startoff - bno >= len); | |
4706 | } | |
4707 | ||
4708 | ip->i_d.di_nblocks += len; | |
4709 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
4710 | ||
4711 | if (ifp->if_flags & XFS_IFBROOT) { | |
4712 | cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); | |
4713 | cur->bc_private.b.firstblock = firstblock; | |
4714 | cur->bc_private.b.dfops = dfops; | |
4715 | cur->bc_private.b.flags = 0; | |
4716 | } | |
4717 | ||
4718 | got.br_startoff = bno; | |
4719 | got.br_startblock = startblock; | |
4720 | got.br_blockcount = len; | |
4721 | got.br_state = XFS_EXT_NORM; | |
4722 | ||
4723 | error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur, | |
4724 | &got, &firstblock, dfops, &logflags); | |
4725 | if (error) | |
4726 | goto error0; | |
4727 | ||
4728 | if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) { | |
4729 | int tmp_logflags = 0; | |
4730 | ||
4731 | error = xfs_bmap_btree_to_extents(tp, ip, cur, | |
4732 | &tmp_logflags, XFS_DATA_FORK); | |
4733 | logflags |= tmp_logflags; | |
4734 | } | |
4735 | ||
4736 | error0: | |
4737 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) | |
4738 | logflags &= ~XFS_ILOG_DEXT; | |
4739 | else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) | |
4740 | logflags &= ~XFS_ILOG_DBROOT; | |
4741 | ||
4742 | if (logflags) | |
4743 | xfs_trans_log_inode(tp, ip, logflags); | |
4744 | if (cur) { | |
4745 | xfs_btree_del_cursor(cur, | |
4746 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
4747 | } | |
4748 | return error; | |
4749 | } | |
4750 | ||
4751 | /* | |
4752 | * When a delalloc extent is split (e.g., due to a hole punch), the original | |
4753 | * indlen reservation must be shared across the two new extents that are left | |
4754 | * behind. | |
4755 | * | |
4756 | * Given the original reservation and the worst case indlen for the two new | |
4757 | * extents (as calculated by xfs_bmap_worst_indlen()), split the original | |
4758 | * reservation fairly across the two new extents. If necessary, steal available | |
4759 | * blocks from a deleted extent to make up a reservation deficiency (e.g., if | |
4760 | * ores == 1). The number of stolen blocks is returned. The availability and | |
4761 | * subsequent accounting of stolen blocks is the responsibility of the caller. | |
4762 | */ | |
4763 | static xfs_filblks_t | |
4764 | xfs_bmap_split_indlen( | |
4765 | xfs_filblks_t ores, /* original res. */ | |
4766 | xfs_filblks_t *indlen1, /* ext1 worst indlen */ | |
4767 | xfs_filblks_t *indlen2, /* ext2 worst indlen */ | |
4768 | xfs_filblks_t avail) /* stealable blocks */ | |
4769 | { | |
4770 | xfs_filblks_t len1 = *indlen1; | |
4771 | xfs_filblks_t len2 = *indlen2; | |
4772 | xfs_filblks_t nres = len1 + len2; /* new total res. */ | |
4773 | xfs_filblks_t stolen = 0; | |
4774 | xfs_filblks_t resfactor; | |
4775 | ||
4776 | /* | |
4777 | * Steal as many blocks as we can to try and satisfy the worst case | |
4778 | * indlen for both new extents. | |
4779 | */ | |
4780 | if (ores < nres && avail) | |
4781 | stolen = XFS_FILBLKS_MIN(nres - ores, avail); | |
4782 | ores += stolen; | |
4783 | ||
4784 | /* nothing else to do if we've satisfied the new reservation */ | |
4785 | if (ores >= nres) | |
4786 | return stolen; | |
4787 | ||
4788 | /* | |
4789 | * We can't meet the total required reservation for the two extents. | |
4790 | * Calculate the percent of the overall shortage between both extents | |
4791 | * and apply this percentage to each of the requested indlen values. | |
4792 | * This distributes the shortage fairly and reduces the chances that one | |
4793 | * of the two extents is left with nothing when extents are repeatedly | |
4794 | * split. | |
4795 | */ | |
4796 | resfactor = (ores * 100); | |
4797 | do_div(resfactor, nres); | |
4798 | len1 *= resfactor; | |
4799 | do_div(len1, 100); | |
4800 | len2 *= resfactor; | |
4801 | do_div(len2, 100); | |
4802 | ASSERT(len1 + len2 <= ores); | |
4803 | ASSERT(len1 < *indlen1 && len2 < *indlen2); | |
4804 | ||
4805 | /* | |
4806 | * Hand out the remainder to each extent. If one of the two reservations | |
4807 | * is zero, we want to make sure that one gets a block first. The loop | |
4808 | * below starts with len1, so hand len2 a block right off the bat if it | |
4809 | * is zero. | |
4810 | */ | |
4811 | ores -= (len1 + len2); | |
4812 | ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); | |
4813 | if (ores && !len2 && *indlen2) { | |
4814 | len2++; | |
4815 | ores--; | |
4816 | } | |
4817 | while (ores) { | |
4818 | if (len1 < *indlen1) { | |
4819 | len1++; | |
4820 | ores--; | |
4821 | } | |
4822 | if (!ores) | |
4823 | break; | |
4824 | if (len2 < *indlen2) { | |
4825 | len2++; | |
4826 | ores--; | |
4827 | } | |
4828 | } | |
4829 | ||
4830 | *indlen1 = len1; | |
4831 | *indlen2 = len2; | |
4832 | ||
4833 | return stolen; | |
4834 | } | |
4835 | ||
4836 | int | |
4837 | xfs_bmap_del_extent_delay( | |
4838 | struct xfs_inode *ip, | |
4839 | int whichfork, | |
4840 | xfs_extnum_t *idx, | |
4841 | struct xfs_bmbt_irec *got, | |
4842 | struct xfs_bmbt_irec *del) | |
4843 | { | |
4844 | struct xfs_mount *mp = ip->i_mount; | |
4845 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); | |
4846 | struct xfs_bmbt_irec new; | |
4847 | int64_t da_old, da_new, da_diff = 0; | |
4848 | xfs_fileoff_t del_endoff, got_endoff; | |
4849 | xfs_filblks_t got_indlen, new_indlen, stolen; | |
4850 | int error = 0, state = 0; | |
4851 | bool isrt; | |
4852 | ||
4853 | XFS_STATS_INC(mp, xs_del_exlist); | |
4854 | ||
4855 | isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); | |
4856 | del_endoff = del->br_startoff + del->br_blockcount; | |
4857 | got_endoff = got->br_startoff + got->br_blockcount; | |
4858 | da_old = startblockval(got->br_startblock); | |
4859 | da_new = 0; | |
4860 | ||
4861 | ASSERT(*idx >= 0); | |
4862 | ASSERT(*idx <= xfs_iext_count(ifp)); | |
4863 | ASSERT(del->br_blockcount > 0); | |
4864 | ASSERT(got->br_startoff <= del->br_startoff); | |
4865 | ASSERT(got_endoff >= del_endoff); | |
4866 | ||
4867 | if (isrt) { | |
4868 | uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); | |
4869 | ||
4870 | do_div(rtexts, mp->m_sb.sb_rextsize); | |
4871 | xfs_mod_frextents(mp, rtexts); | |
4872 | } | |
4873 | ||
4874 | /* | |
4875 | * Update the inode delalloc counter now and wait to update the | |
4876 | * sb counters as we might have to borrow some blocks for the | |
4877 | * indirect block accounting. | |
4878 | */ | |
4879 | error = xfs_trans_reserve_quota_nblks(NULL, ip, | |
4880 | -((long)del->br_blockcount), 0, | |
4881 | isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); | |
4882 | if (error) | |
4883 | return error; | |
4884 | ip->i_delayed_blks -= del->br_blockcount; | |
4885 | ||
4886 | if (whichfork == XFS_COW_FORK) | |
4887 | state |= BMAP_COWFORK; | |
4888 | ||
4889 | if (got->br_startoff == del->br_startoff) | |
4890 | state |= BMAP_LEFT_CONTIG; | |
4891 | if (got_endoff == del_endoff) | |
4892 | state |= BMAP_RIGHT_CONTIG; | |
4893 | ||
4894 | switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { | |
4895 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
4896 | /* | |
4897 | * Matches the whole extent. Delete the entry. | |
4898 | */ | |
4899 | xfs_iext_remove(ip, *idx, 1, state); | |
4900 | --*idx; | |
4901 | break; | |
4902 | case BMAP_LEFT_CONTIG: | |
4903 | /* | |
4904 | * Deleting the first part of the extent. | |
4905 | */ | |
4906 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
4907 | got->br_startoff = del_endoff; | |
4908 | got->br_blockcount -= del->br_blockcount; | |
4909 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, | |
4910 | got->br_blockcount), da_old); | |
4911 | got->br_startblock = nullstartblock((int)da_new); | |
4912 | xfs_iext_update_extent(ifp, *idx, got); | |
4913 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
4914 | break; | |
4915 | case BMAP_RIGHT_CONTIG: | |
4916 | /* | |
4917 | * Deleting the last part of the extent. | |
4918 | */ | |
4919 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
4920 | got->br_blockcount = got->br_blockcount - del->br_blockcount; | |
4921 | da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, | |
4922 | got->br_blockcount), da_old); | |
4923 | got->br_startblock = nullstartblock((int)da_new); | |
4924 | xfs_iext_update_extent(ifp, *idx, got); | |
4925 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
4926 | break; | |
4927 | case 0: | |
4928 | /* | |
4929 | * Deleting the middle of the extent. | |
4930 | * | |
4931 | * Distribute the original indlen reservation across the two new | |
4932 | * extents. Steal blocks from the deleted extent if necessary. | |
4933 | * Stealing blocks simply fudges the fdblocks accounting below. | |
4934 | * Warn if either of the new indlen reservations is zero as this | |
4935 | * can lead to delalloc problems. | |
4936 | */ | |
4937 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
4938 | ||
4939 | got->br_blockcount = del->br_startoff - got->br_startoff; | |
4940 | got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); | |
4941 | ||
4942 | new.br_blockcount = got_endoff - del_endoff; | |
4943 | new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); | |
4944 | ||
4945 | WARN_ON_ONCE(!got_indlen || !new_indlen); | |
4946 | stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, | |
4947 | del->br_blockcount); | |
4948 | ||
4949 | got->br_startblock = nullstartblock((int)got_indlen); | |
4950 | xfs_iext_update_extent(ifp, *idx, got); | |
4951 | trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_); | |
4952 | ||
4953 | new.br_startoff = del_endoff; | |
4954 | new.br_state = got->br_state; | |
4955 | new.br_startblock = nullstartblock((int)new_indlen); | |
4956 | ||
4957 | ++*idx; | |
4958 | xfs_iext_insert(ip, *idx, 1, &new, state); | |
4959 | ||
4960 | da_new = got_indlen + new_indlen - stolen; | |
4961 | del->br_blockcount -= stolen; | |
4962 | break; | |
4963 | } | |
4964 | ||
4965 | ASSERT(da_old >= da_new); | |
4966 | da_diff = da_old - da_new; | |
4967 | if (!isrt) | |
4968 | da_diff += del->br_blockcount; | |
4969 | if (da_diff) | |
4970 | xfs_mod_fdblocks(mp, da_diff, false); | |
4971 | return error; | |
4972 | } | |
4973 | ||
4974 | void | |
4975 | xfs_bmap_del_extent_cow( | |
4976 | struct xfs_inode *ip, | |
4977 | xfs_extnum_t *idx, | |
4978 | struct xfs_bmbt_irec *got, | |
4979 | struct xfs_bmbt_irec *del) | |
4980 | { | |
4981 | struct xfs_mount *mp = ip->i_mount; | |
4982 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); | |
4983 | struct xfs_bmbt_irec new; | |
4984 | xfs_fileoff_t del_endoff, got_endoff; | |
4985 | int state = BMAP_COWFORK; | |
4986 | ||
4987 | XFS_STATS_INC(mp, xs_del_exlist); | |
4988 | ||
4989 | del_endoff = del->br_startoff + del->br_blockcount; | |
4990 | got_endoff = got->br_startoff + got->br_blockcount; | |
4991 | ||
4992 | ASSERT(*idx >= 0); | |
4993 | ASSERT(*idx <= xfs_iext_count(ifp)); | |
4994 | ASSERT(del->br_blockcount > 0); | |
4995 | ASSERT(got->br_startoff <= del->br_startoff); | |
4996 | ASSERT(got_endoff >= del_endoff); | |
4997 | ASSERT(!isnullstartblock(got->br_startblock)); | |
4998 | ||
4999 | if (got->br_startoff == del->br_startoff) | |
5000 | state |= BMAP_LEFT_CONTIG; | |
5001 | if (got_endoff == del_endoff) | |
5002 | state |= BMAP_RIGHT_CONTIG; | |
5003 | ||
5004 | switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { | |
5005 | case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | |
5006 | /* | |
5007 | * Matches the whole extent. Delete the entry. | |
5008 | */ | |
5009 | xfs_iext_remove(ip, *idx, 1, state); | |
5010 | --*idx; | |
5011 | break; | |
5012 | case BMAP_LEFT_CONTIG: | |
5013 | /* | |
5014 | * Deleting the first part of the extent. | |
5015 | */ | |
5016 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5017 | got->br_startoff = del_endoff; | |
5018 | got->br_blockcount -= del->br_blockcount; | |
5019 | got->br_startblock = del->br_startblock + del->br_blockcount; | |
5020 | xfs_iext_update_extent(ifp, *idx, got); | |
5021 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5022 | break; | |
5023 | case BMAP_RIGHT_CONTIG: | |
5024 | /* | |
5025 | * Deleting the last part of the extent. | |
5026 | */ | |
5027 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5028 | got->br_blockcount -= del->br_blockcount; | |
5029 | xfs_iext_update_extent(ifp, *idx, got); | |
5030 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5031 | break; | |
5032 | case 0: | |
5033 | /* | |
5034 | * Deleting the middle of the extent. | |
5035 | */ | |
5036 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5037 | got->br_blockcount = del->br_startoff - got->br_startoff; | |
5038 | xfs_iext_update_extent(ifp, *idx, got); | |
5039 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5040 | ||
5041 | new.br_startoff = del_endoff; | |
5042 | new.br_blockcount = got_endoff - del_endoff; | |
5043 | new.br_state = got->br_state; | |
5044 | new.br_startblock = del->br_startblock + del->br_blockcount; | |
5045 | ||
5046 | ++*idx; | |
5047 | xfs_iext_insert(ip, *idx, 1, &new, state); | |
5048 | break; | |
5049 | } | |
5050 | } | |
5051 | ||
5052 | /* | |
5053 | * Called by xfs_bmapi to update file extent records and the btree | |
5054 | * after removing space (or undoing a delayed allocation). | |
5055 | */ | |
5056 | STATIC int /* error */ | |
5057 | xfs_bmap_del_extent( | |
5058 | xfs_inode_t *ip, /* incore inode pointer */ | |
5059 | xfs_trans_t *tp, /* current transaction pointer */ | |
5060 | xfs_extnum_t *idx, /* extent number to update/delete */ | |
5061 | struct xfs_defer_ops *dfops, /* list of extents to be freed */ | |
5062 | xfs_btree_cur_t *cur, /* if null, not a btree */ | |
5063 | xfs_bmbt_irec_t *del, /* data to remove from extents */ | |
5064 | int *logflagsp, /* inode logging flags */ | |
5065 | int whichfork, /* data or attr fork */ | |
5066 | int bflags) /* bmapi flags */ | |
5067 | { | |
5068 | xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ | |
5069 | xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ | |
5070 | xfs_fsblock_t del_endblock=0; /* first block past del */ | |
5071 | xfs_fileoff_t del_endoff; /* first offset past del */ | |
5072 | int delay; /* current block is delayed allocated */ | |
5073 | int do_fx; /* free extent at end of routine */ | |
5074 | xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ | |
5075 | int error; /* error return value */ | |
5076 | int flags; /* inode logging flags */ | |
5077 | xfs_bmbt_irec_t got; /* current extent entry */ | |
5078 | xfs_fileoff_t got_endoff; /* first offset past got */ | |
5079 | int i; /* temp state */ | |
5080 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
5081 | xfs_mount_t *mp; /* mount structure */ | |
5082 | xfs_filblks_t nblks; /* quota/sb block count */ | |
5083 | xfs_bmbt_irec_t new; /* new record to be inserted */ | |
5084 | /* REFERENCED */ | |
5085 | uint qfield; /* quota field to update */ | |
5086 | xfs_filblks_t temp; /* for indirect length calculations */ | |
5087 | xfs_filblks_t temp2; /* for indirect length calculations */ | |
5088 | int state = 0; | |
5089 | ||
5090 | mp = ip->i_mount; | |
5091 | XFS_STATS_INC(mp, xs_del_exlist); | |
5092 | ||
5093 | if (whichfork == XFS_ATTR_FORK) | |
5094 | state |= BMAP_ATTRFORK; | |
5095 | else if (whichfork == XFS_COW_FORK) | |
5096 | state |= BMAP_COWFORK; | |
5097 | ||
5098 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
5099 | ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp))); | |
5100 | ASSERT(del->br_blockcount > 0); | |
5101 | ep = xfs_iext_get_ext(ifp, *idx); | |
5102 | xfs_bmbt_get_all(ep, &got); | |
5103 | ASSERT(got.br_startoff <= del->br_startoff); | |
5104 | del_endoff = del->br_startoff + del->br_blockcount; | |
5105 | got_endoff = got.br_startoff + got.br_blockcount; | |
5106 | ASSERT(got_endoff >= del_endoff); | |
5107 | delay = isnullstartblock(got.br_startblock); | |
5108 | ASSERT(isnullstartblock(del->br_startblock) == delay); | |
5109 | flags = 0; | |
5110 | qfield = 0; | |
5111 | error = 0; | |
5112 | /* | |
5113 | * If deleting a real allocation, must free up the disk space. | |
5114 | */ | |
5115 | if (!delay) { | |
5116 | flags = XFS_ILOG_CORE; | |
5117 | /* | |
5118 | * Realtime allocation. Free it and record di_nblocks update. | |
5119 | */ | |
5120 | if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { | |
5121 | xfs_fsblock_t bno; | |
5122 | xfs_filblks_t len; | |
5123 | ||
5124 | ASSERT(do_mod(del->br_blockcount, | |
5125 | mp->m_sb.sb_rextsize) == 0); | |
5126 | ASSERT(do_mod(del->br_startblock, | |
5127 | mp->m_sb.sb_rextsize) == 0); | |
5128 | bno = del->br_startblock; | |
5129 | len = del->br_blockcount; | |
5130 | do_div(bno, mp->m_sb.sb_rextsize); | |
5131 | do_div(len, mp->m_sb.sb_rextsize); | |
5132 | error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); | |
5133 | if (error) | |
5134 | goto done; | |
5135 | do_fx = 0; | |
5136 | nblks = len * mp->m_sb.sb_rextsize; | |
5137 | qfield = XFS_TRANS_DQ_RTBCOUNT; | |
5138 | } | |
5139 | /* | |
5140 | * Ordinary allocation. | |
5141 | */ | |
5142 | else { | |
5143 | do_fx = 1; | |
5144 | nblks = del->br_blockcount; | |
5145 | qfield = XFS_TRANS_DQ_BCOUNT; | |
5146 | } | |
5147 | /* | |
5148 | * Set up del_endblock and cur for later. | |
5149 | */ | |
5150 | del_endblock = del->br_startblock + del->br_blockcount; | |
5151 | if (cur) { | |
5152 | if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, | |
5153 | got.br_startblock, got.br_blockcount, | |
5154 | &i))) | |
5155 | goto done; | |
5156 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
5157 | } | |
5158 | da_old = da_new = 0; | |
5159 | } else { | |
5160 | da_old = startblockval(got.br_startblock); | |
5161 | da_new = 0; | |
5162 | nblks = 0; | |
5163 | do_fx = 0; | |
5164 | } | |
5165 | ||
5166 | /* | |
5167 | * Set flag value to use in switch statement. | |
5168 | * Left-contig is 2, right-contig is 1. | |
5169 | */ | |
5170 | switch (((got.br_startoff == del->br_startoff) << 1) | | |
5171 | (got_endoff == del_endoff)) { | |
5172 | case 3: | |
5173 | /* | |
5174 | * Matches the whole extent. Delete the entry. | |
5175 | */ | |
5176 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5177 | xfs_iext_remove(ip, *idx, 1, | |
5178 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); | |
5179 | --*idx; | |
5180 | if (delay) | |
5181 | break; | |
5182 | ||
5183 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
5184 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | |
5185 | flags |= XFS_ILOG_CORE; | |
5186 | if (!cur) { | |
5187 | flags |= xfs_ilog_fext(whichfork); | |
5188 | break; | |
5189 | } | |
5190 | if ((error = xfs_btree_delete(cur, &i))) | |
5191 | goto done; | |
5192 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
5193 | break; | |
5194 | ||
5195 | case 2: | |
5196 | /* | |
5197 | * Deleting the first part of the extent. | |
5198 | */ | |
5199 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5200 | xfs_bmbt_set_startoff(ep, del_endoff); | |
5201 | temp = got.br_blockcount - del->br_blockcount; | |
5202 | xfs_bmbt_set_blockcount(ep, temp); | |
5203 | if (delay) { | |
5204 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | |
5205 | da_old); | |
5206 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | |
5207 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5208 | da_new = temp; | |
5209 | break; | |
5210 | } | |
5211 | xfs_bmbt_set_startblock(ep, del_endblock); | |
5212 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5213 | if (!cur) { | |
5214 | flags |= xfs_ilog_fext(whichfork); | |
5215 | break; | |
5216 | } | |
5217 | if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, | |
5218 | got.br_blockcount - del->br_blockcount, | |
5219 | got.br_state))) | |
5220 | goto done; | |
5221 | break; | |
5222 | ||
5223 | case 1: | |
5224 | /* | |
5225 | * Deleting the last part of the extent. | |
5226 | */ | |
5227 | temp = got.br_blockcount - del->br_blockcount; | |
5228 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5229 | xfs_bmbt_set_blockcount(ep, temp); | |
5230 | if (delay) { | |
5231 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | |
5232 | da_old); | |
5233 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | |
5234 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5235 | da_new = temp; | |
5236 | break; | |
5237 | } | |
5238 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5239 | if (!cur) { | |
5240 | flags |= xfs_ilog_fext(whichfork); | |
5241 | break; | |
5242 | } | |
5243 | if ((error = xfs_bmbt_update(cur, got.br_startoff, | |
5244 | got.br_startblock, | |
5245 | got.br_blockcount - del->br_blockcount, | |
5246 | got.br_state))) | |
5247 | goto done; | |
5248 | break; | |
5249 | ||
5250 | case 0: | |
5251 | /* | |
5252 | * Deleting the middle of the extent. | |
5253 | */ | |
5254 | temp = del->br_startoff - got.br_startoff; | |
5255 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | |
5256 | xfs_bmbt_set_blockcount(ep, temp); | |
5257 | new.br_startoff = del_endoff; | |
5258 | temp2 = got_endoff - del_endoff; | |
5259 | new.br_blockcount = temp2; | |
5260 | new.br_state = got.br_state; | |
5261 | if (!delay) { | |
5262 | new.br_startblock = del_endblock; | |
5263 | flags |= XFS_ILOG_CORE; | |
5264 | if (cur) { | |
5265 | if ((error = xfs_bmbt_update(cur, | |
5266 | got.br_startoff, | |
5267 | got.br_startblock, temp, | |
5268 | got.br_state))) | |
5269 | goto done; | |
5270 | if ((error = xfs_btree_increment(cur, 0, &i))) | |
5271 | goto done; | |
5272 | cur->bc_rec.b = new; | |
5273 | error = xfs_btree_insert(cur, &i); | |
5274 | if (error && error != -ENOSPC) | |
5275 | goto done; | |
5276 | /* | |
5277 | * If get no-space back from btree insert, | |
5278 | * it tried a split, and we have a zero | |
5279 | * block reservation. | |
5280 | * Fix up our state and return the error. | |
5281 | */ | |
5282 | if (error == -ENOSPC) { | |
5283 | /* | |
5284 | * Reset the cursor, don't trust | |
5285 | * it after any insert operation. | |
5286 | */ | |
5287 | if ((error = xfs_bmbt_lookup_eq(cur, | |
5288 | got.br_startoff, | |
5289 | got.br_startblock, | |
5290 | temp, &i))) | |
5291 | goto done; | |
5292 | XFS_WANT_CORRUPTED_GOTO(mp, | |
5293 | i == 1, done); | |
5294 | /* | |
5295 | * Update the btree record back | |
5296 | * to the original value. | |
5297 | */ | |
5298 | if ((error = xfs_bmbt_update(cur, | |
5299 | got.br_startoff, | |
5300 | got.br_startblock, | |
5301 | got.br_blockcount, | |
5302 | got.br_state))) | |
5303 | goto done; | |
5304 | /* | |
5305 | * Reset the extent record back | |
5306 | * to the original value. | |
5307 | */ | |
5308 | xfs_bmbt_set_blockcount(ep, | |
5309 | got.br_blockcount); | |
5310 | flags = 0; | |
5311 | error = -ENOSPC; | |
5312 | goto done; | |
5313 | } | |
5314 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); | |
5315 | } else | |
5316 | flags |= xfs_ilog_fext(whichfork); | |
5317 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
5318 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | |
5319 | } else { | |
5320 | xfs_filblks_t stolen; | |
5321 | ASSERT(whichfork == XFS_DATA_FORK); | |
5322 | ||
5323 | /* | |
5324 | * Distribute the original indlen reservation across the | |
5325 | * two new extents. Steal blocks from the deleted extent | |
5326 | * if necessary. Stealing blocks simply fudges the | |
5327 | * fdblocks accounting in xfs_bunmapi(). | |
5328 | */ | |
5329 | temp = xfs_bmap_worst_indlen(ip, got.br_blockcount); | |
5330 | temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount); | |
5331 | stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2, | |
5332 | del->br_blockcount); | |
5333 | da_new = temp + temp2 - stolen; | |
5334 | del->br_blockcount -= stolen; | |
5335 | ||
5336 | /* | |
5337 | * Set the reservation for each extent. Warn if either | |
5338 | * is zero as this can lead to delalloc problems. | |
5339 | */ | |
5340 | WARN_ON_ONCE(!temp || !temp2); | |
5341 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | |
5342 | new.br_startblock = nullstartblock((int)temp2); | |
5343 | } | |
5344 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); | |
5345 | xfs_iext_insert(ip, *idx + 1, 1, &new, state); | |
5346 | ++*idx; | |
5347 | break; | |
5348 | } | |
5349 | ||
5350 | /* remove reverse mapping */ | |
5351 | if (!delay) { | |
5352 | error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); | |
5353 | if (error) | |
5354 | goto done; | |
5355 | } | |
5356 | ||
5357 | /* | |
5358 | * If we need to, add to list of extents to delete. | |
5359 | */ | |
5360 | if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { | |
5361 | if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { | |
5362 | error = xfs_refcount_decrease_extent(mp, dfops, del); | |
5363 | if (error) | |
5364 | goto done; | |
5365 | } else | |
5366 | xfs_bmap_add_free(mp, dfops, del->br_startblock, | |
5367 | del->br_blockcount, NULL); | |
5368 | } | |
5369 | ||
5370 | /* | |
5371 | * Adjust inode # blocks in the file. | |
5372 | */ | |
5373 | if (nblks) | |
5374 | ip->i_d.di_nblocks -= nblks; | |
5375 | /* | |
5376 | * Adjust quota data. | |
5377 | */ | |
5378 | if (qfield && !(bflags & XFS_BMAPI_REMAP)) | |
5379 | xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); | |
5380 | ||
5381 | /* | |
5382 | * Account for change in delayed indirect blocks. | |
5383 | * Nothing to do for disk quota accounting here. | |
5384 | */ | |
5385 | ASSERT(da_old >= da_new); | |
5386 | if (da_old > da_new) | |
5387 | xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); | |
5388 | done: | |
5389 | *logflagsp = flags; | |
5390 | return error; | |
5391 | } | |
5392 | ||
5393 | /* | |
5394 | * Unmap (remove) blocks from a file. | |
5395 | * If nexts is nonzero then the number of extents to remove is limited to | |
5396 | * that value. If not all extents in the block range can be removed then | |
5397 | * *done is set. | |
5398 | */ | |
5399 | int /* error */ | |
5400 | __xfs_bunmapi( | |
5401 | xfs_trans_t *tp, /* transaction pointer */ | |
5402 | struct xfs_inode *ip, /* incore inode */ | |
5403 | xfs_fileoff_t bno, /* starting offset to unmap */ | |
5404 | xfs_filblks_t *rlen, /* i/o: amount remaining */ | |
5405 | int flags, /* misc flags */ | |
5406 | xfs_extnum_t nexts, /* number of extents max */ | |
5407 | xfs_fsblock_t *firstblock, /* first allocated block | |
5408 | controls a.g. for allocs */ | |
5409 | struct xfs_defer_ops *dfops) /* i/o: deferred updates */ | |
5410 | { | |
5411 | xfs_btree_cur_t *cur; /* bmap btree cursor */ | |
5412 | xfs_bmbt_irec_t del; /* extent being deleted */ | |
5413 | int error; /* error return value */ | |
5414 | xfs_extnum_t extno; /* extent number in list */ | |
5415 | xfs_bmbt_irec_t got; /* current extent record */ | |
5416 | xfs_ifork_t *ifp; /* inode fork pointer */ | |
5417 | int isrt; /* freeing in rt area */ | |
5418 | xfs_extnum_t lastx; /* last extent index used */ | |
5419 | int logflags; /* transaction logging flags */ | |
5420 | xfs_extlen_t mod; /* rt extent offset */ | |
5421 | xfs_mount_t *mp; /* mount structure */ | |
5422 | xfs_fileoff_t start; /* first file offset deleted */ | |
5423 | int tmp_logflags; /* partial logging flags */ | |
5424 | int wasdel; /* was a delayed alloc extent */ | |
5425 | int whichfork; /* data or attribute fork */ | |
5426 | xfs_fsblock_t sum; | |
5427 | xfs_filblks_t len = *rlen; /* length to unmap in file */ | |
5428 | xfs_fileoff_t max_len; | |
5429 | xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; | |
5430 | ||
5431 | trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); | |
5432 | ||
5433 | whichfork = xfs_bmapi_whichfork(flags); | |
5434 | ASSERT(whichfork != XFS_COW_FORK); | |
5435 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
5436 | if (unlikely( | |
5437 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | |
5438 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | |
5439 | XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, | |
5440 | ip->i_mount); | |
5441 | return -EFSCORRUPTED; | |
5442 | } | |
5443 | mp = ip->i_mount; | |
5444 | if (XFS_FORCED_SHUTDOWN(mp)) | |
5445 | return -EIO; | |
5446 | ||
5447 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
5448 | ASSERT(len > 0); | |
5449 | ASSERT(nexts >= 0); | |
5450 | ||
5451 | /* | |
5452 | * Guesstimate how many blocks we can unmap without running the risk of | |
5453 | * blowing out the transaction with a mix of EFIs and reflink | |
5454 | * adjustments. | |
5455 | */ | |
5456 | if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) | |
5457 | max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); | |
5458 | else | |
5459 | max_len = len; | |
5460 | ||
5461 | if (!(ifp->if_flags & XFS_IFEXTENTS) && | |
5462 | (error = xfs_iread_extents(tp, ip, whichfork))) | |
5463 | return error; | |
5464 | if (xfs_iext_count(ifp) == 0) { | |
5465 | *rlen = 0; | |
5466 | return 0; | |
5467 | } | |
5468 | XFS_STATS_INC(mp, xs_blk_unmap); | |
5469 | isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); | |
5470 | start = bno; | |
5471 | bno = start + len - 1; | |
5472 | ||
5473 | /* | |
5474 | * Check to see if the given block number is past the end of the | |
5475 | * file, back up to the last block if so... | |
5476 | */ | |
5477 | if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) { | |
5478 | ASSERT(lastx > 0); | |
5479 | xfs_iext_get_extent(ifp, --lastx, &got); | |
5480 | bno = got.br_startoff + got.br_blockcount - 1; | |
5481 | } | |
5482 | ||
5483 | logflags = 0; | |
5484 | if (ifp->if_flags & XFS_IFBROOT) { | |
5485 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); | |
5486 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); | |
5487 | cur->bc_private.b.firstblock = *firstblock; | |
5488 | cur->bc_private.b.dfops = dfops; | |
5489 | cur->bc_private.b.flags = 0; | |
5490 | } else | |
5491 | cur = NULL; | |
5492 | ||
5493 | if (isrt) { | |
5494 | /* | |
5495 | * Synchronize by locking the bitmap inode. | |
5496 | */ | |
5497 | xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); | |
5498 | xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); | |
5499 | xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); | |
5500 | xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); | |
5501 | } | |
5502 | ||
5503 | extno = 0; | |
5504 | while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && | |
5505 | (nexts == 0 || extno < nexts) && max_len > 0) { | |
5506 | /* | |
5507 | * Is the found extent after a hole in which bno lives? | |
5508 | * Just back up to the previous extent, if so. | |
5509 | */ | |
5510 | if (got.br_startoff > bno) { | |
5511 | if (--lastx < 0) | |
5512 | break; | |
5513 | xfs_iext_get_extent(ifp, lastx, &got); | |
5514 | } | |
5515 | /* | |
5516 | * Is the last block of this extent before the range | |
5517 | * we're supposed to delete? If so, we're done. | |
5518 | */ | |
5519 | bno = XFS_FILEOFF_MIN(bno, | |
5520 | got.br_startoff + got.br_blockcount - 1); | |
5521 | if (bno < start) | |
5522 | break; | |
5523 | /* | |
5524 | * Then deal with the (possibly delayed) allocated space | |
5525 | * we found. | |
5526 | */ | |
5527 | del = got; | |
5528 | wasdel = isnullstartblock(del.br_startblock); | |
5529 | ||
5530 | /* | |
5531 | * Make sure we don't touch multiple AGF headers out of order | |
5532 | * in a single transaction, as that could cause AB-BA deadlocks. | |
5533 | */ | |
5534 | if (!wasdel) { | |
5535 | agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); | |
5536 | if (prev_agno != NULLAGNUMBER && prev_agno > agno) | |
5537 | break; | |
5538 | prev_agno = agno; | |
5539 | } | |
5540 | if (got.br_startoff < start) { | |
5541 | del.br_startoff = start; | |
5542 | del.br_blockcount -= start - got.br_startoff; | |
5543 | if (!wasdel) | |
5544 | del.br_startblock += start - got.br_startoff; | |
5545 | } | |
5546 | if (del.br_startoff + del.br_blockcount > bno + 1) | |
5547 | del.br_blockcount = bno + 1 - del.br_startoff; | |
5548 | ||
5549 | /* How much can we safely unmap? */ | |
5550 | if (max_len < del.br_blockcount) { | |
5551 | del.br_startoff += del.br_blockcount - max_len; | |
5552 | if (!wasdel) | |
5553 | del.br_startblock += del.br_blockcount - max_len; | |
5554 | del.br_blockcount = max_len; | |
5555 | } | |
5556 | ||
5557 | sum = del.br_startblock + del.br_blockcount; | |
5558 | if (isrt && | |
5559 | (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { | |
5560 | /* | |
5561 | * Realtime extent not lined up at the end. | |
5562 | * The extent could have been split into written | |
5563 | * and unwritten pieces, or we could just be | |
5564 | * unmapping part of it. But we can't really | |
5565 | * get rid of part of a realtime extent. | |
5566 | */ | |
5567 | if (del.br_state == XFS_EXT_UNWRITTEN || | |
5568 | !xfs_sb_version_hasextflgbit(&mp->m_sb)) { | |
5569 | /* | |
5570 | * This piece is unwritten, or we're not | |
5571 | * using unwritten extents. Skip over it. | |
5572 | */ | |
5573 | ASSERT(bno >= mod); | |
5574 | bno -= mod > del.br_blockcount ? | |
5575 | del.br_blockcount : mod; | |
5576 | if (bno < got.br_startoff) { | |
5577 | if (--lastx >= 0) | |
5578 | xfs_bmbt_get_all(xfs_iext_get_ext( | |
5579 | ifp, lastx), &got); | |
5580 | } | |
5581 | continue; | |
5582 | } | |
5583 | /* | |
5584 | * It's written, turn it unwritten. | |
5585 | * This is better than zeroing it. | |
5586 | */ | |
5587 | ASSERT(del.br_state == XFS_EXT_NORM); | |
5588 | ASSERT(tp->t_blk_res > 0); | |
5589 | /* | |
5590 | * If this spans a realtime extent boundary, | |
5591 | * chop it back to the start of the one we end at. | |
5592 | */ | |
5593 | if (del.br_blockcount > mod) { | |
5594 | del.br_startoff += del.br_blockcount - mod; | |
5595 | del.br_startblock += del.br_blockcount - mod; | |
5596 | del.br_blockcount = mod; | |
5597 | } | |
5598 | del.br_state = XFS_EXT_UNWRITTEN; | |
5599 | error = xfs_bmap_add_extent_unwritten_real(tp, ip, | |
5600 | whichfork, &lastx, &cur, &del, | |
5601 | firstblock, dfops, &logflags); | |
5602 | if (error) | |
5603 | goto error0; | |
5604 | goto nodelete; | |
5605 | } | |
5606 | if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { | |
5607 | /* | |
5608 | * Realtime extent is lined up at the end but not | |
5609 | * at the front. We'll get rid of full extents if | |
5610 | * we can. | |
5611 | */ | |
5612 | mod = mp->m_sb.sb_rextsize - mod; | |
5613 | if (del.br_blockcount > mod) { | |
5614 | del.br_blockcount -= mod; | |
5615 | del.br_startoff += mod; | |
5616 | del.br_startblock += mod; | |
5617 | } else if ((del.br_startoff == start && | |
5618 | (del.br_state == XFS_EXT_UNWRITTEN || | |
5619 | tp->t_blk_res == 0)) || | |
5620 | !xfs_sb_version_hasextflgbit(&mp->m_sb)) { | |
5621 | /* | |
5622 | * Can't make it unwritten. There isn't | |
5623 | * a full extent here so just skip it. | |
5624 | */ | |
5625 | ASSERT(bno >= del.br_blockcount); | |
5626 | bno -= del.br_blockcount; | |
5627 | if (got.br_startoff > bno && --lastx >= 0) | |
5628 | xfs_iext_get_extent(ifp, lastx, &got); | |
5629 | continue; | |
5630 | } else if (del.br_state == XFS_EXT_UNWRITTEN) { | |
5631 | struct xfs_bmbt_irec prev; | |
5632 | ||
5633 | /* | |
5634 | * This one is already unwritten. | |
5635 | * It must have a written left neighbor. | |
5636 | * Unwrite the killed part of that one and | |
5637 | * try again. | |
5638 | */ | |
5639 | ASSERT(lastx > 0); | |
5640 | xfs_iext_get_extent(ifp, lastx - 1, &prev); | |
5641 | ASSERT(prev.br_state == XFS_EXT_NORM); | |
5642 | ASSERT(!isnullstartblock(prev.br_startblock)); | |
5643 | ASSERT(del.br_startblock == | |
5644 | prev.br_startblock + prev.br_blockcount); | |
5645 | if (prev.br_startoff < start) { | |
5646 | mod = start - prev.br_startoff; | |
5647 | prev.br_blockcount -= mod; | |
5648 | prev.br_startblock += mod; | |
5649 | prev.br_startoff = start; | |
5650 | } | |
5651 | prev.br_state = XFS_EXT_UNWRITTEN; | |
5652 | lastx--; | |
5653 | error = xfs_bmap_add_extent_unwritten_real(tp, | |
5654 | ip, whichfork, &lastx, &cur, | |
5655 | &prev, firstblock, dfops, | |
5656 | &logflags); | |
5657 | if (error) | |
5658 | goto error0; | |
5659 | goto nodelete; | |
5660 | } else { | |
5661 | ASSERT(del.br_state == XFS_EXT_NORM); | |
5662 | del.br_state = XFS_EXT_UNWRITTEN; | |
5663 | error = xfs_bmap_add_extent_unwritten_real(tp, | |
5664 | ip, whichfork, &lastx, &cur, | |
5665 | &del, firstblock, dfops, | |
5666 | &logflags); | |
5667 | if (error) | |
5668 | goto error0; | |
5669 | goto nodelete; | |
5670 | } | |
5671 | } | |
5672 | ||
5673 | /* | |
5674 | * If it's the case where the directory code is running | |
5675 | * with no block reservation, and the deleted block is in | |
5676 | * the middle of its extent, and the resulting insert | |
5677 | * of an extent would cause transformation to btree format, | |
5678 | * then reject it. The calling code will then swap | |
5679 | * blocks around instead. | |
5680 | * We have to do this now, rather than waiting for the | |
5681 | * conversion to btree format, since the transaction | |
5682 | * will be dirty. | |
5683 | */ | |
5684 | if (!wasdel && tp->t_blk_res == 0 && | |
5685 | XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && | |
5686 | XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ | |
5687 | XFS_IFORK_MAXEXT(ip, whichfork) && | |
5688 | del.br_startoff > got.br_startoff && | |
5689 | del.br_startoff + del.br_blockcount < | |
5690 | got.br_startoff + got.br_blockcount) { | |
5691 | error = -ENOSPC; | |
5692 | goto error0; | |
5693 | } | |
5694 | ||
5695 | /* | |
5696 | * Unreserve quota and update realtime free space, if | |
5697 | * appropriate. If delayed allocation, update the inode delalloc | |
5698 | * counter now and wait to update the sb counters as | |
5699 | * xfs_bmap_del_extent() might need to borrow some blocks. | |
5700 | */ | |
5701 | if (wasdel) { | |
5702 | ASSERT(startblockval(del.br_startblock) > 0); | |
5703 | if (isrt) { | |
5704 | xfs_filblks_t rtexts; | |
5705 | ||
5706 | rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); | |
5707 | do_div(rtexts, mp->m_sb.sb_rextsize); | |
5708 | xfs_mod_frextents(mp, (int64_t)rtexts); | |
5709 | (void)xfs_trans_reserve_quota_nblks(NULL, | |
5710 | ip, -((long)del.br_blockcount), 0, | |
5711 | XFS_QMOPT_RES_RTBLKS); | |
5712 | } else { | |
5713 | (void)xfs_trans_reserve_quota_nblks(NULL, | |
5714 | ip, -((long)del.br_blockcount), 0, | |
5715 | XFS_QMOPT_RES_REGBLKS); | |
5716 | } | |
5717 | ip->i_delayed_blks -= del.br_blockcount; | |
5718 | if (cur) | |
5719 | cur->bc_private.b.flags |= | |
5720 | XFS_BTCUR_BPRV_WASDEL; | |
5721 | } else if (cur) | |
5722 | cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; | |
5723 | ||
5724 | error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del, | |
5725 | &tmp_logflags, whichfork, flags); | |
5726 | logflags |= tmp_logflags; | |
5727 | if (error) | |
5728 | goto error0; | |
5729 | ||
5730 | if (!isrt && wasdel) | |
5731 | xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false); | |
5732 | ||
5733 | max_len -= del.br_blockcount; | |
5734 | bno = del.br_startoff - 1; | |
5735 | nodelete: | |
5736 | /* | |
5737 | * If not done go on to the next (previous) record. | |
5738 | */ | |
5739 | if (bno != (xfs_fileoff_t)-1 && bno >= start) { | |
5740 | if (lastx >= 0) { | |
5741 | xfs_iext_get_extent(ifp, lastx, &got); | |
5742 | if (got.br_startoff > bno && --lastx >= 0) | |
5743 | xfs_iext_get_extent(ifp, lastx, &got); | |
5744 | } | |
5745 | extno++; | |
5746 | } | |
5747 | } | |
5748 | if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0) | |
5749 | *rlen = 0; | |
5750 | else | |
5751 | *rlen = bno - start + 1; | |
5752 | ||
5753 | /* | |
5754 | * Convert to a btree if necessary. | |
5755 | */ | |
5756 | if (xfs_bmap_needs_btree(ip, whichfork)) { | |
5757 | ASSERT(cur == NULL); | |
5758 | error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, | |
5759 | &cur, 0, &tmp_logflags, whichfork); | |
5760 | logflags |= tmp_logflags; | |
5761 | if (error) | |
5762 | goto error0; | |
5763 | } | |
5764 | /* | |
5765 | * transform from btree to extents, give it cur | |
5766 | */ | |
5767 | else if (xfs_bmap_wants_extents(ip, whichfork)) { | |
5768 | ASSERT(cur != NULL); | |
5769 | error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, | |
5770 | whichfork); | |
5771 | logflags |= tmp_logflags; | |
5772 | if (error) | |
5773 | goto error0; | |
5774 | } | |
5775 | /* | |
5776 | * transform from extents to local? | |
5777 | */ | |
5778 | error = 0; | |
5779 | error0: | |
5780 | /* | |
5781 | * Log everything. Do this after conversion, there's no point in | |
5782 | * logging the extent records if we've converted to btree format. | |
5783 | */ | |
5784 | if ((logflags & xfs_ilog_fext(whichfork)) && | |
5785 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | |
5786 | logflags &= ~xfs_ilog_fext(whichfork); | |
5787 | else if ((logflags & xfs_ilog_fbroot(whichfork)) && | |
5788 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) | |
5789 | logflags &= ~xfs_ilog_fbroot(whichfork); | |
5790 | /* | |
5791 | * Log inode even in the error case, if the transaction | |
5792 | * is dirty we'll need to shut down the filesystem. | |
5793 | */ | |
5794 | if (logflags) | |
5795 | xfs_trans_log_inode(tp, ip, logflags); | |
5796 | if (cur) { | |
5797 | if (!error) { | |
5798 | *firstblock = cur->bc_private.b.firstblock; | |
5799 | cur->bc_private.b.allocated = 0; | |
5800 | } | |
5801 | xfs_btree_del_cursor(cur, | |
5802 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
5803 | } | |
5804 | return error; | |
5805 | } | |
5806 | ||
5807 | /* Unmap a range of a file. */ | |
5808 | int | |
5809 | xfs_bunmapi( | |
5810 | xfs_trans_t *tp, | |
5811 | struct xfs_inode *ip, | |
5812 | xfs_fileoff_t bno, | |
5813 | xfs_filblks_t len, | |
5814 | int flags, | |
5815 | xfs_extnum_t nexts, | |
5816 | xfs_fsblock_t *firstblock, | |
5817 | struct xfs_defer_ops *dfops, | |
5818 | int *done) | |
5819 | { | |
5820 | int error; | |
5821 | ||
5822 | error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock, | |
5823 | dfops); | |
5824 | *done = (len == 0); | |
5825 | return error; | |
5826 | } | |
5827 | ||
5828 | /* | |
5829 | * Determine whether an extent shift can be accomplished by a merge with the | |
5830 | * extent that precedes the target hole of the shift. | |
5831 | */ | |
5832 | STATIC bool | |
5833 | xfs_bmse_can_merge( | |
5834 | struct xfs_bmbt_irec *left, /* preceding extent */ | |
5835 | struct xfs_bmbt_irec *got, /* current extent to shift */ | |
5836 | xfs_fileoff_t shift) /* shift fsb */ | |
5837 | { | |
5838 | xfs_fileoff_t startoff; | |
5839 | ||
5840 | startoff = got->br_startoff - shift; | |
5841 | ||
5842 | /* | |
5843 | * The extent, once shifted, must be adjacent in-file and on-disk with | |
5844 | * the preceding extent. | |
5845 | */ | |
5846 | if ((left->br_startoff + left->br_blockcount != startoff) || | |
5847 | (left->br_startblock + left->br_blockcount != got->br_startblock) || | |
5848 | (left->br_state != got->br_state) || | |
5849 | (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) | |
5850 | return false; | |
5851 | ||
5852 | return true; | |
5853 | } | |
5854 | ||
5855 | /* | |
5856 | * A bmap extent shift adjusts the file offset of an extent to fill a preceding | |
5857 | * hole in the file. If an extent shift would result in the extent being fully | |
5858 | * adjacent to the extent that currently precedes the hole, we can merge with | |
5859 | * the preceding extent rather than do the shift. | |
5860 | * | |
5861 | * This function assumes the caller has verified a shift-by-merge is possible | |
5862 | * with the provided extents via xfs_bmse_can_merge(). | |
5863 | */ | |
5864 | STATIC int | |
5865 | xfs_bmse_merge( | |
5866 | struct xfs_inode *ip, | |
5867 | int whichfork, | |
5868 | xfs_fileoff_t shift, /* shift fsb */ | |
5869 | int current_ext, /* idx of gotp */ | |
5870 | struct xfs_bmbt_rec_host *gotp, /* extent to shift */ | |
5871 | struct xfs_bmbt_rec_host *leftp, /* preceding extent */ | |
5872 | struct xfs_btree_cur *cur, | |
5873 | int *logflags) /* output */ | |
5874 | { | |
5875 | struct xfs_bmbt_irec got; | |
5876 | struct xfs_bmbt_irec left; | |
5877 | xfs_filblks_t blockcount; | |
5878 | int error, i; | |
5879 | struct xfs_mount *mp = ip->i_mount; | |
5880 | ||
5881 | xfs_bmbt_get_all(gotp, &got); | |
5882 | xfs_bmbt_get_all(leftp, &left); | |
5883 | blockcount = left.br_blockcount + got.br_blockcount; | |
5884 | ||
5885 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
5886 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
5887 | ASSERT(xfs_bmse_can_merge(&left, &got, shift)); | |
5888 | ||
5889 | /* | |
5890 | * Merge the in-core extents. Note that the host record pointers and | |
5891 | * current_ext index are invalid once the extent has been removed via | |
5892 | * xfs_iext_remove(). | |
5893 | */ | |
5894 | xfs_bmbt_set_blockcount(leftp, blockcount); | |
5895 | xfs_iext_remove(ip, current_ext, 1, 0); | |
5896 | ||
5897 | /* | |
5898 | * Update the on-disk extent count, the btree if necessary and log the | |
5899 | * inode. | |
5900 | */ | |
5901 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
5902 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | |
5903 | *logflags |= XFS_ILOG_CORE; | |
5904 | if (!cur) { | |
5905 | *logflags |= XFS_ILOG_DEXT; | |
5906 | return 0; | |
5907 | } | |
5908 | ||
5909 | /* lookup and remove the extent to merge */ | |
5910 | error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, | |
5911 | got.br_blockcount, &i); | |
5912 | if (error) | |
5913 | return error; | |
5914 | XFS_WANT_CORRUPTED_RETURN(mp, i == 1); | |
5915 | ||
5916 | error = xfs_btree_delete(cur, &i); | |
5917 | if (error) | |
5918 | return error; | |
5919 | XFS_WANT_CORRUPTED_RETURN(mp, i == 1); | |
5920 | ||
5921 | /* lookup and update size of the previous extent */ | |
5922 | error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, | |
5923 | left.br_blockcount, &i); | |
5924 | if (error) | |
5925 | return error; | |
5926 | XFS_WANT_CORRUPTED_RETURN(mp, i == 1); | |
5927 | ||
5928 | left.br_blockcount = blockcount; | |
5929 | ||
5930 | return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock, | |
5931 | left.br_blockcount, left.br_state); | |
5932 | } | |
5933 | ||
5934 | /* | |
5935 | * Shift a single extent. | |
5936 | */ | |
5937 | STATIC int | |
5938 | xfs_bmse_shift_one( | |
5939 | struct xfs_inode *ip, | |
5940 | int whichfork, | |
5941 | xfs_fileoff_t offset_shift_fsb, | |
5942 | int *current_ext, | |
5943 | struct xfs_bmbt_rec_host *gotp, | |
5944 | struct xfs_btree_cur *cur, | |
5945 | int *logflags, | |
5946 | enum shift_direction direction, | |
5947 | struct xfs_defer_ops *dfops) | |
5948 | { | |
5949 | struct xfs_ifork *ifp; | |
5950 | struct xfs_mount *mp; | |
5951 | xfs_fileoff_t startoff; | |
5952 | struct xfs_bmbt_rec_host *adj_irecp; | |
5953 | struct xfs_bmbt_irec got; | |
5954 | struct xfs_bmbt_irec adj_irec; | |
5955 | int error; | |
5956 | int i; | |
5957 | int total_extents; | |
5958 | ||
5959 | mp = ip->i_mount; | |
5960 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
5961 | total_extents = xfs_iext_count(ifp); | |
5962 | ||
5963 | xfs_bmbt_get_all(gotp, &got); | |
5964 | ||
5965 | /* delalloc extents should be prevented by caller */ | |
5966 | XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); | |
5967 | ||
5968 | if (direction == SHIFT_LEFT) { | |
5969 | startoff = got.br_startoff - offset_shift_fsb; | |
5970 | ||
5971 | /* | |
5972 | * Check for merge if we've got an extent to the left, | |
5973 | * otherwise make sure there's enough room at the start | |
5974 | * of the file for the shift. | |
5975 | */ | |
5976 | if (!*current_ext) { | |
5977 | if (got.br_startoff < offset_shift_fsb) | |
5978 | return -EINVAL; | |
5979 | goto update_current_ext; | |
5980 | } | |
5981 | /* | |
5982 | * grab the left extent and check for a large | |
5983 | * enough hole. | |
5984 | */ | |
5985 | adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1); | |
5986 | xfs_bmbt_get_all(adj_irecp, &adj_irec); | |
5987 | ||
5988 | if (startoff < | |
5989 | adj_irec.br_startoff + adj_irec.br_blockcount) | |
5990 | return -EINVAL; | |
5991 | ||
5992 | /* check whether to merge the extent or shift it down */ | |
5993 | if (xfs_bmse_can_merge(&adj_irec, &got, | |
5994 | offset_shift_fsb)) { | |
5995 | error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb, | |
5996 | *current_ext, gotp, adj_irecp, | |
5997 | cur, logflags); | |
5998 | if (error) | |
5999 | return error; | |
6000 | adj_irec = got; | |
6001 | goto update_rmap; | |
6002 | } | |
6003 | } else { | |
6004 | startoff = got.br_startoff + offset_shift_fsb; | |
6005 | /* nothing to move if this is the last extent */ | |
6006 | if (*current_ext >= (total_extents - 1)) | |
6007 | goto update_current_ext; | |
6008 | /* | |
6009 | * If this is not the last extent in the file, make sure there | |
6010 | * is enough room between current extent and next extent for | |
6011 | * accommodating the shift. | |
6012 | */ | |
6013 | adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1); | |
6014 | xfs_bmbt_get_all(adj_irecp, &adj_irec); | |
6015 | if (startoff + got.br_blockcount > adj_irec.br_startoff) | |
6016 | return -EINVAL; | |
6017 | /* | |
6018 | * Unlike a left shift (which involves a hole punch), | |
6019 | * a right shift does not modify extent neighbors | |
6020 | * in any way. We should never find mergeable extents | |
6021 | * in this scenario. Check anyways and warn if we | |
6022 | * encounter two extents that could be one. | |
6023 | */ | |
6024 | if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb)) | |
6025 | WARN_ON_ONCE(1); | |
6026 | } | |
6027 | /* | |
6028 | * Increment the extent index for the next iteration, update the start | |
6029 | * offset of the in-core extent and update the btree if applicable. | |
6030 | */ | |
6031 | update_current_ext: | |
6032 | if (direction == SHIFT_LEFT) | |
6033 | (*current_ext)++; | |
6034 | else | |
6035 | (*current_ext)--; | |
6036 | xfs_bmbt_set_startoff(gotp, startoff); | |
6037 | *logflags |= XFS_ILOG_CORE; | |
6038 | adj_irec = got; | |
6039 | if (!cur) { | |
6040 | *logflags |= XFS_ILOG_DEXT; | |
6041 | goto update_rmap; | |
6042 | } | |
6043 | ||
6044 | error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, | |
6045 | got.br_blockcount, &i); | |
6046 | if (error) | |
6047 | return error; | |
6048 | XFS_WANT_CORRUPTED_RETURN(mp, i == 1); | |
6049 | ||
6050 | got.br_startoff = startoff; | |
6051 | error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, | |
6052 | got.br_blockcount, got.br_state); | |
6053 | if (error) | |
6054 | return error; | |
6055 | ||
6056 | update_rmap: | |
6057 | /* update reverse mapping */ | |
6058 | error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec); | |
6059 | if (error) | |
6060 | return error; | |
6061 | adj_irec.br_startoff = startoff; | |
6062 | return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec); | |
6063 | } | |
6064 | ||
6065 | /* | |
6066 | * Shift extent records to the left/right to cover/create a hole. | |
6067 | * | |
6068 | * The maximum number of extents to be shifted in a single operation is | |
6069 | * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the | |
6070 | * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb | |
6071 | * is the length by which each extent is shifted. If there is no hole to shift | |
6072 | * the extents into, this will be considered invalid operation and we abort | |
6073 | * immediately. | |
6074 | */ | |
6075 | int | |
6076 | xfs_bmap_shift_extents( | |
6077 | struct xfs_trans *tp, | |
6078 | struct xfs_inode *ip, | |
6079 | xfs_fileoff_t *next_fsb, | |
6080 | xfs_fileoff_t offset_shift_fsb, | |
6081 | int *done, | |
6082 | xfs_fileoff_t stop_fsb, | |
6083 | xfs_fsblock_t *firstblock, | |
6084 | struct xfs_defer_ops *dfops, | |
6085 | enum shift_direction direction, | |
6086 | int num_exts) | |
6087 | { | |
6088 | struct xfs_btree_cur *cur = NULL; | |
6089 | struct xfs_bmbt_rec_host *gotp; | |
6090 | struct xfs_bmbt_irec got; | |
6091 | struct xfs_mount *mp = ip->i_mount; | |
6092 | struct xfs_ifork *ifp; | |
6093 | xfs_extnum_t nexts = 0; | |
6094 | xfs_extnum_t current_ext; | |
6095 | xfs_extnum_t total_extents; | |
6096 | xfs_extnum_t stop_extent; | |
6097 | int error = 0; | |
6098 | int whichfork = XFS_DATA_FORK; | |
6099 | int logflags = 0; | |
6100 | ||
6101 | if (unlikely(XFS_TEST_ERROR( | |
6102 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | |
6103 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | |
6104 | mp, XFS_ERRTAG_BMAPIFORMAT))) { | |
6105 | XFS_ERROR_REPORT("xfs_bmap_shift_extents", | |
6106 | XFS_ERRLEVEL_LOW, mp); | |
6107 | return -EFSCORRUPTED; | |
6108 | } | |
6109 | ||
6110 | if (XFS_FORCED_SHUTDOWN(mp)) | |
6111 | return -EIO; | |
6112 | ||
6113 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
6114 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
6115 | ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); | |
6116 | ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT); | |
6117 | ||
6118 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
6119 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
6120 | /* Read in all the extents */ | |
6121 | error = xfs_iread_extents(tp, ip, whichfork); | |
6122 | if (error) | |
6123 | return error; | |
6124 | } | |
6125 | ||
6126 | if (ifp->if_flags & XFS_IFBROOT) { | |
6127 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); | |
6128 | cur->bc_private.b.firstblock = *firstblock; | |
6129 | cur->bc_private.b.dfops = dfops; | |
6130 | cur->bc_private.b.flags = 0; | |
6131 | } | |
6132 | ||
6133 | /* | |
6134 | * There may be delalloc extents in the data fork before the range we | |
6135 | * are collapsing out, so we cannot use the count of real extents here. | |
6136 | * Instead we have to calculate it from the incore fork. | |
6137 | */ | |
6138 | total_extents = xfs_iext_count(ifp); | |
6139 | if (total_extents == 0) { | |
6140 | *done = 1; | |
6141 | goto del_cursor; | |
6142 | } | |
6143 | ||
6144 | /* | |
6145 | * In case of first right shift, we need to initialize next_fsb | |
6146 | */ | |
6147 | if (*next_fsb == NULLFSBLOCK) { | |
6148 | gotp = xfs_iext_get_ext(ifp, total_extents - 1); | |
6149 | xfs_bmbt_get_all(gotp, &got); | |
6150 | *next_fsb = got.br_startoff; | |
6151 | if (stop_fsb > *next_fsb) { | |
6152 | *done = 1; | |
6153 | goto del_cursor; | |
6154 | } | |
6155 | } | |
6156 | ||
6157 | /* Lookup the extent index at which we have to stop */ | |
6158 | if (direction == SHIFT_RIGHT) { | |
6159 | gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent); | |
6160 | /* Make stop_extent exclusive of shift range */ | |
6161 | stop_extent--; | |
6162 | } else | |
6163 | stop_extent = total_extents; | |
6164 | ||
6165 | /* | |
6166 | * Look up the extent index for the fsb where we start shifting. We can | |
6167 | * henceforth iterate with current_ext as extent list changes are locked | |
6168 | * out via ilock. | |
6169 | * | |
6170 | * gotp can be null in 2 cases: 1) if there are no extents or 2) | |
6171 | * *next_fsb lies in a hole beyond which there are no extents. Either | |
6172 | * way, we are done. | |
6173 | */ | |
6174 | gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext); | |
6175 | if (!gotp) { | |
6176 | *done = 1; | |
6177 | goto del_cursor; | |
6178 | } | |
6179 | ||
6180 | /* some sanity checking before we finally start shifting extents */ | |
6181 | if ((direction == SHIFT_LEFT && current_ext >= stop_extent) || | |
6182 | (direction == SHIFT_RIGHT && current_ext <= stop_extent)) { | |
6183 | error = -EIO; | |
6184 | goto del_cursor; | |
6185 | } | |
6186 | ||
6187 | while (nexts++ < num_exts) { | |
6188 | error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, | |
6189 | ¤t_ext, gotp, cur, &logflags, | |
6190 | direction, dfops); | |
6191 | if (error) | |
6192 | goto del_cursor; | |
6193 | /* | |
6194 | * If there was an extent merge during the shift, the extent | |
6195 | * count can change. Update the total and grade the next record. | |
6196 | */ | |
6197 | if (direction == SHIFT_LEFT) { | |
6198 | total_extents = xfs_iext_count(ifp); | |
6199 | stop_extent = total_extents; | |
6200 | } | |
6201 | ||
6202 | if (current_ext == stop_extent) { | |
6203 | *done = 1; | |
6204 | *next_fsb = NULLFSBLOCK; | |
6205 | break; | |
6206 | } | |
6207 | gotp = xfs_iext_get_ext(ifp, current_ext); | |
6208 | } | |
6209 | ||
6210 | if (!*done) { | |
6211 | xfs_bmbt_get_all(gotp, &got); | |
6212 | *next_fsb = got.br_startoff; | |
6213 | } | |
6214 | ||
6215 | del_cursor: | |
6216 | if (cur) | |
6217 | xfs_btree_del_cursor(cur, | |
6218 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
6219 | ||
6220 | if (logflags) | |
6221 | xfs_trans_log_inode(tp, ip, logflags); | |
6222 | ||
6223 | return error; | |
6224 | } | |
6225 | ||
6226 | /* | |
6227 | * Splits an extent into two extents at split_fsb block such that it is | |
6228 | * the first block of the current_ext. @current_ext is a target extent | |
6229 | * to be split. @split_fsb is a block where the extents is split. | |
6230 | * If split_fsb lies in a hole or the first block of extents, just return 0. | |
6231 | */ | |
6232 | STATIC int | |
6233 | xfs_bmap_split_extent_at( | |
6234 | struct xfs_trans *tp, | |
6235 | struct xfs_inode *ip, | |
6236 | xfs_fileoff_t split_fsb, | |
6237 | xfs_fsblock_t *firstfsb, | |
6238 | struct xfs_defer_ops *dfops) | |
6239 | { | |
6240 | int whichfork = XFS_DATA_FORK; | |
6241 | struct xfs_btree_cur *cur = NULL; | |
6242 | struct xfs_bmbt_rec_host *gotp; | |
6243 | struct xfs_bmbt_irec got; | |
6244 | struct xfs_bmbt_irec new; /* split extent */ | |
6245 | struct xfs_mount *mp = ip->i_mount; | |
6246 | struct xfs_ifork *ifp; | |
6247 | xfs_fsblock_t gotblkcnt; /* new block count for got */ | |
6248 | xfs_extnum_t current_ext; | |
6249 | int error = 0; | |
6250 | int logflags = 0; | |
6251 | int i = 0; | |
6252 | ||
6253 | if (unlikely(XFS_TEST_ERROR( | |
6254 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | |
6255 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | |
6256 | mp, XFS_ERRTAG_BMAPIFORMAT))) { | |
6257 | XFS_ERROR_REPORT("xfs_bmap_split_extent_at", | |
6258 | XFS_ERRLEVEL_LOW, mp); | |
6259 | return -EFSCORRUPTED; | |
6260 | } | |
6261 | ||
6262 | if (XFS_FORCED_SHUTDOWN(mp)) | |
6263 | return -EIO; | |
6264 | ||
6265 | ifp = XFS_IFORK_PTR(ip, whichfork); | |
6266 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | |
6267 | /* Read in all the extents */ | |
6268 | error = xfs_iread_extents(tp, ip, whichfork); | |
6269 | if (error) | |
6270 | return error; | |
6271 | } | |
6272 | ||
6273 | /* | |
6274 | * gotp can be null in 2 cases: 1) if there are no extents | |
6275 | * or 2) split_fsb lies in a hole beyond which there are | |
6276 | * no extents. Either way, we are done. | |
6277 | */ | |
6278 | gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext); | |
6279 | if (!gotp) | |
6280 | return 0; | |
6281 | ||
6282 | xfs_bmbt_get_all(gotp, &got); | |
6283 | ||
6284 | /* | |
6285 | * Check split_fsb lies in a hole or the start boundary offset | |
6286 | * of the extent. | |
6287 | */ | |
6288 | if (got.br_startoff >= split_fsb) | |
6289 | return 0; | |
6290 | ||
6291 | gotblkcnt = split_fsb - got.br_startoff; | |
6292 | new.br_startoff = split_fsb; | |
6293 | new.br_startblock = got.br_startblock + gotblkcnt; | |
6294 | new.br_blockcount = got.br_blockcount - gotblkcnt; | |
6295 | new.br_state = got.br_state; | |
6296 | ||
6297 | if (ifp->if_flags & XFS_IFBROOT) { | |
6298 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); | |
6299 | cur->bc_private.b.firstblock = *firstfsb; | |
6300 | cur->bc_private.b.dfops = dfops; | |
6301 | cur->bc_private.b.flags = 0; | |
6302 | error = xfs_bmbt_lookup_eq(cur, got.br_startoff, | |
6303 | got.br_startblock, | |
6304 | got.br_blockcount, | |
6305 | &i); | |
6306 | if (error) | |
6307 | goto del_cursor; | |
6308 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); | |
6309 | } | |
6310 | ||
6311 | xfs_bmbt_set_blockcount(gotp, gotblkcnt); | |
6312 | got.br_blockcount = gotblkcnt; | |
6313 | ||
6314 | logflags = XFS_ILOG_CORE; | |
6315 | if (cur) { | |
6316 | error = xfs_bmbt_update(cur, got.br_startoff, | |
6317 | got.br_startblock, | |
6318 | got.br_blockcount, | |
6319 | got.br_state); | |
6320 | if (error) | |
6321 | goto del_cursor; | |
6322 | } else | |
6323 | logflags |= XFS_ILOG_DEXT; | |
6324 | ||
6325 | /* Add new extent */ | |
6326 | current_ext++; | |
6327 | xfs_iext_insert(ip, current_ext, 1, &new, 0); | |
6328 | XFS_IFORK_NEXT_SET(ip, whichfork, | |
6329 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | |
6330 | ||
6331 | if (cur) { | |
6332 | error = xfs_bmbt_lookup_eq(cur, new.br_startoff, | |
6333 | new.br_startblock, new.br_blockcount, | |
6334 | &i); | |
6335 | if (error) | |
6336 | goto del_cursor; | |
6337 | XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); | |
6338 | cur->bc_rec.b.br_state = new.br_state; | |
6339 | ||
6340 | error = xfs_btree_insert(cur, &i); | |
6341 | if (error) | |
6342 | goto del_cursor; | |
6343 | XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); | |
6344 | } | |
6345 | ||
6346 | /* | |
6347 | * Convert to a btree if necessary. | |
6348 | */ | |
6349 | if (xfs_bmap_needs_btree(ip, whichfork)) { | |
6350 | int tmp_logflags; /* partial log flag return val */ | |
6351 | ||
6352 | ASSERT(cur == NULL); | |
6353 | error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops, | |
6354 | &cur, 0, &tmp_logflags, whichfork); | |
6355 | logflags |= tmp_logflags; | |
6356 | } | |
6357 | ||
6358 | del_cursor: | |
6359 | if (cur) { | |
6360 | cur->bc_private.b.allocated = 0; | |
6361 | xfs_btree_del_cursor(cur, | |
6362 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | |
6363 | } | |
6364 | ||
6365 | if (logflags) | |
6366 | xfs_trans_log_inode(tp, ip, logflags); | |
6367 | return error; | |
6368 | } | |
6369 | ||
6370 | int | |
6371 | xfs_bmap_split_extent( | |
6372 | struct xfs_inode *ip, | |
6373 | xfs_fileoff_t split_fsb) | |
6374 | { | |
6375 | struct xfs_mount *mp = ip->i_mount; | |
6376 | struct xfs_trans *tp; | |
6377 | struct xfs_defer_ops dfops; | |
6378 | xfs_fsblock_t firstfsb; | |
6379 | int error; | |
6380 | ||
6381 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, | |
6382 | XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); | |
6383 | if (error) | |
6384 | return error; | |
6385 | ||
6386 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
6387 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | |
6388 | ||
6389 | xfs_defer_init(&dfops, &firstfsb); | |
6390 | ||
6391 | error = xfs_bmap_split_extent_at(tp, ip, split_fsb, | |
6392 | &firstfsb, &dfops); | |
6393 | if (error) | |
6394 | goto out; | |
6395 | ||
6396 | error = xfs_defer_finish(&tp, &dfops); | |
6397 | if (error) | |
6398 | goto out; | |
6399 | ||
6400 | return xfs_trans_commit(tp); | |
6401 | ||
6402 | out: | |
6403 | xfs_defer_cancel(&dfops); | |
6404 | xfs_trans_cancel(tp); | |
6405 | return error; | |
6406 | } | |
6407 | ||
6408 | /* Deferred mapping is only for real extents in the data fork. */ | |
6409 | static bool | |
6410 | xfs_bmap_is_update_needed( | |
6411 | struct xfs_bmbt_irec *bmap) | |
6412 | { | |
6413 | return bmap->br_startblock != HOLESTARTBLOCK && | |
6414 | bmap->br_startblock != DELAYSTARTBLOCK; | |
6415 | } | |
6416 | ||
6417 | /* Record a bmap intent. */ | |
6418 | static int | |
6419 | __xfs_bmap_add( | |
6420 | struct xfs_mount *mp, | |
6421 | struct xfs_defer_ops *dfops, | |
6422 | enum xfs_bmap_intent_type type, | |
6423 | struct xfs_inode *ip, | |
6424 | int whichfork, | |
6425 | struct xfs_bmbt_irec *bmap) | |
6426 | { | |
6427 | int error; | |
6428 | struct xfs_bmap_intent *bi; | |
6429 | ||
6430 | trace_xfs_bmap_defer(mp, | |
6431 | XFS_FSB_TO_AGNO(mp, bmap->br_startblock), | |
6432 | type, | |
6433 | XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), | |
6434 | ip->i_ino, whichfork, | |
6435 | bmap->br_startoff, | |
6436 | bmap->br_blockcount, | |
6437 | bmap->br_state); | |
6438 | ||
6439 | bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); | |
6440 | INIT_LIST_HEAD(&bi->bi_list); | |
6441 | bi->bi_type = type; | |
6442 | bi->bi_owner = ip; | |
6443 | bi->bi_whichfork = whichfork; | |
6444 | bi->bi_bmap = *bmap; | |
6445 | ||
6446 | error = xfs_defer_ijoin(dfops, bi->bi_owner); | |
6447 | if (error) { | |
6448 | kmem_free(bi); | |
6449 | return error; | |
6450 | } | |
6451 | ||
6452 | xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); | |
6453 | return 0; | |
6454 | } | |
6455 | ||
6456 | /* Map an extent into a file. */ | |
6457 | int | |
6458 | xfs_bmap_map_extent( | |
6459 | struct xfs_mount *mp, | |
6460 | struct xfs_defer_ops *dfops, | |
6461 | struct xfs_inode *ip, | |
6462 | struct xfs_bmbt_irec *PREV) | |
6463 | { | |
6464 | if (!xfs_bmap_is_update_needed(PREV)) | |
6465 | return 0; | |
6466 | ||
6467 | return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip, | |
6468 | XFS_DATA_FORK, PREV); | |
6469 | } | |
6470 | ||
6471 | /* Unmap an extent out of a file. */ | |
6472 | int | |
6473 | xfs_bmap_unmap_extent( | |
6474 | struct xfs_mount *mp, | |
6475 | struct xfs_defer_ops *dfops, | |
6476 | struct xfs_inode *ip, | |
6477 | struct xfs_bmbt_irec *PREV) | |
6478 | { | |
6479 | if (!xfs_bmap_is_update_needed(PREV)) | |
6480 | return 0; | |
6481 | ||
6482 | return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip, | |
6483 | XFS_DATA_FORK, PREV); | |
6484 | } | |
6485 | ||
6486 | /* | |
6487 | * Process one of the deferred bmap operations. We pass back the | |
6488 | * btree cursor to maintain our lock on the bmapbt between calls. | |
6489 | */ | |
6490 | int | |
6491 | xfs_bmap_finish_one( | |
6492 | struct xfs_trans *tp, | |
6493 | struct xfs_defer_ops *dfops, | |
6494 | struct xfs_inode *ip, | |
6495 | enum xfs_bmap_intent_type type, | |
6496 | int whichfork, | |
6497 | xfs_fileoff_t startoff, | |
6498 | xfs_fsblock_t startblock, | |
6499 | xfs_filblks_t *blockcount, | |
6500 | xfs_exntst_t state) | |
6501 | { | |
6502 | xfs_fsblock_t firstfsb; | |
6503 | int error = 0; | |
6504 | ||
6505 | /* | |
6506 | * firstfsb is tied to the transaction lifetime and is used to | |
6507 | * ensure correct AG locking order and schedule work item | |
6508 | * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us | |
6509 | * to only making one bmap call per transaction, so it should | |
6510 | * be safe to have it as a local variable here. | |
6511 | */ | |
6512 | firstfsb = NULLFSBLOCK; | |
6513 | ||
6514 | trace_xfs_bmap_deferred(tp->t_mountp, | |
6515 | XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, | |
6516 | XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), | |
6517 | ip->i_ino, whichfork, startoff, *blockcount, state); | |
6518 | ||
6519 | if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) | |
6520 | return -EFSCORRUPTED; | |
6521 | ||
6522 | if (XFS_TEST_ERROR(false, tp->t_mountp, | |
6523 | XFS_ERRTAG_BMAP_FINISH_ONE)) | |
6524 | return -EIO; | |
6525 | ||
6526 | switch (type) { | |
6527 | case XFS_BMAP_MAP: | |
6528 | error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, | |
6529 | startblock, dfops); | |
6530 | *blockcount = 0; | |
6531 | break; | |
6532 | case XFS_BMAP_UNMAP: | |
6533 | error = __xfs_bunmapi(tp, ip, startoff, blockcount, | |
6534 | XFS_BMAPI_REMAP, 1, &firstfsb, dfops); | |
6535 | break; | |
6536 | default: | |
6537 | ASSERT(0); | |
6538 | error = -EFSCORRUPTED; | |
6539 | } | |
6540 | ||
6541 | return error; | |
6542 | } |