]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_rmap.c
xfs_io: fix inode command help and argsmax
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_rmap.c
1 /*
2 * Copyright (c) 2014 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_alloc.h"
33 #include "xfs_rmap.h"
34 #include "xfs_rmap_btree.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_trace.h"
37 #include "xfs_bmap.h"
38 #include "xfs_inode.h"
39
40 /*
41 * Lookup the first record less than or equal to [bno, len, owner, offset]
42 * in the btree given by cur.
43 */
44 int
45 xfs_rmap_lookup_le(
46 struct xfs_btree_cur *cur,
47 xfs_agblock_t bno,
48 xfs_extlen_t len,
49 uint64_t owner,
50 uint64_t offset,
51 unsigned int flags,
52 int *stat)
53 {
54 cur->bc_rec.r.rm_startblock = bno;
55 cur->bc_rec.r.rm_blockcount = len;
56 cur->bc_rec.r.rm_owner = owner;
57 cur->bc_rec.r.rm_offset = offset;
58 cur->bc_rec.r.rm_flags = flags;
59 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
60 }
61
62 /*
63 * Lookup the record exactly matching [bno, len, owner, offset]
64 * in the btree given by cur.
65 */
66 int
67 xfs_rmap_lookup_eq(
68 struct xfs_btree_cur *cur,
69 xfs_agblock_t bno,
70 xfs_extlen_t len,
71 uint64_t owner,
72 uint64_t offset,
73 unsigned int flags,
74 int *stat)
75 {
76 cur->bc_rec.r.rm_startblock = bno;
77 cur->bc_rec.r.rm_blockcount = len;
78 cur->bc_rec.r.rm_owner = owner;
79 cur->bc_rec.r.rm_offset = offset;
80 cur->bc_rec.r.rm_flags = flags;
81 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
82 }
83
84 /*
85 * Update the record referred to by cur to the value given
86 * by [bno, len, owner, offset].
87 * This either works (return 0) or gets an EFSCORRUPTED error.
88 */
89 STATIC int
90 xfs_rmap_update(
91 struct xfs_btree_cur *cur,
92 struct xfs_rmap_irec *irec)
93 {
94 union xfs_btree_rec rec;
95 int error;
96
97 trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno,
98 irec->rm_startblock, irec->rm_blockcount,
99 irec->rm_owner, irec->rm_offset, irec->rm_flags);
100
101 rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
102 rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
103 rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
104 rec.rmap.rm_offset = cpu_to_be64(
105 xfs_rmap_irec_offset_pack(irec));
106 error = xfs_btree_update(cur, &rec);
107 if (error)
108 trace_xfs_rmap_update_error(cur->bc_mp,
109 cur->bc_private.a.agno, error, _RET_IP_);
110 return error;
111 }
112
113 int
114 xfs_rmap_insert(
115 struct xfs_btree_cur *rcur,
116 xfs_agblock_t agbno,
117 xfs_extlen_t len,
118 uint64_t owner,
119 uint64_t offset,
120 unsigned int flags)
121 {
122 int i;
123 int error;
124
125 trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
126 len, owner, offset, flags);
127
128 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
129 if (error)
130 goto done;
131 XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done);
132
133 rcur->bc_rec.r.rm_startblock = agbno;
134 rcur->bc_rec.r.rm_blockcount = len;
135 rcur->bc_rec.r.rm_owner = owner;
136 rcur->bc_rec.r.rm_offset = offset;
137 rcur->bc_rec.r.rm_flags = flags;
138 error = xfs_btree_insert(rcur, &i);
139 if (error)
140 goto done;
141 XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
142 done:
143 if (error)
144 trace_xfs_rmap_insert_error(rcur->bc_mp,
145 rcur->bc_private.a.agno, error, _RET_IP_);
146 return error;
147 }
148
149 static int
150 xfs_rmap_btrec_to_irec(
151 union xfs_btree_rec *rec,
152 struct xfs_rmap_irec *irec)
153 {
154 irec->rm_flags = 0;
155 irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
156 irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
157 irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
158 return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
159 irec);
160 }
161
162 /*
163 * Get the data from the pointed-to record.
164 */
165 int
166 xfs_rmap_get_rec(
167 struct xfs_btree_cur *cur,
168 struct xfs_rmap_irec *irec,
169 int *stat)
170 {
171 union xfs_btree_rec *rec;
172 int error;
173
174 error = xfs_btree_get_rec(cur, &rec, stat);
175 if (error || !*stat)
176 return error;
177
178 return xfs_rmap_btrec_to_irec(rec, irec);
179 }
180
181 /*
182 * Find the extent in the rmap btree and remove it.
183 *
184 * The record we find should always be an exact match for the extent that we're
185 * looking for, since we insert them into the btree without modification.
186 *
187 * Special Case #1: when growing the filesystem, we "free" an extent when
188 * growing the last AG. This extent is new space and so it is not tracked as
189 * used space in the btree. The growfs code will pass in an owner of
190 * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
191 * extent. We verify that - the extent lookup result in a record that does not
192 * overlap.
193 *
194 * Special Case #2: EFIs do not record the owner of the extent, so when
195 * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
196 * btree to ignore the owner (i.e. wildcard match) so we don't trigger
197 * corruption checks during log recovery.
198 */
199 STATIC int
200 xfs_rmap_unmap(
201 struct xfs_btree_cur *cur,
202 xfs_agblock_t bno,
203 xfs_extlen_t len,
204 bool unwritten,
205 struct xfs_owner_info *oinfo)
206 {
207 struct xfs_mount *mp = cur->bc_mp;
208 struct xfs_rmap_irec ltrec;
209 uint64_t ltoff;
210 int error = 0;
211 int i;
212 uint64_t owner;
213 uint64_t offset;
214 unsigned int flags;
215 bool ignore_off;
216
217 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
218 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
219 (flags & XFS_RMAP_BMBT_BLOCK);
220 if (unwritten)
221 flags |= XFS_RMAP_UNWRITTEN;
222 trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
223 unwritten, oinfo);
224
225 /*
226 * We should always have a left record because there's a static record
227 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
228 * will not ever be removed from the tree.
229 */
230 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i);
231 if (error)
232 goto out_error;
233 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
234
235 error = xfs_rmap_get_rec(cur, &ltrec, &i);
236 if (error)
237 goto out_error;
238 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
239 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
240 cur->bc_private.a.agno, ltrec.rm_startblock,
241 ltrec.rm_blockcount, ltrec.rm_owner,
242 ltrec.rm_offset, ltrec.rm_flags);
243 ltoff = ltrec.rm_offset;
244
245 /*
246 * For growfs, the incoming extent must be beyond the left record we
247 * just found as it is new space and won't be used by anyone. This is
248 * just a corruption check as we don't actually do anything with this
249 * extent. Note that we need to use >= instead of > because it might
250 * be the case that the "left" extent goes all the way to EOFS.
251 */
252 if (owner == XFS_RMAP_OWN_NULL) {
253 XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock +
254 ltrec.rm_blockcount, out_error);
255 goto out_done;
256 }
257
258 /* Make sure the unwritten flag matches. */
259 XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
260 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
261
262 /* Make sure the extent we found covers the entire freeing range. */
263 XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
264 ltrec.rm_startblock + ltrec.rm_blockcount >=
265 bno + len, out_error);
266
267 /* Make sure the owner matches what we expect to find in the tree. */
268 XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
269 XFS_RMAP_NON_INODE_OWNER(owner), out_error);
270
271 /* Check the offset, if necessary. */
272 if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
273 if (flags & XFS_RMAP_BMBT_BLOCK) {
274 XFS_WANT_CORRUPTED_GOTO(mp,
275 ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
276 out_error);
277 } else {
278 XFS_WANT_CORRUPTED_GOTO(mp,
279 ltrec.rm_offset <= offset, out_error);
280 XFS_WANT_CORRUPTED_GOTO(mp,
281 ltoff + ltrec.rm_blockcount >= offset + len,
282 out_error);
283 }
284 }
285
286 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
287 /* exact match, simply remove the record from rmap tree */
288 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
289 ltrec.rm_startblock, ltrec.rm_blockcount,
290 ltrec.rm_owner, ltrec.rm_offset,
291 ltrec.rm_flags);
292 error = xfs_btree_delete(cur, &i);
293 if (error)
294 goto out_error;
295 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
296 } else if (ltrec.rm_startblock == bno) {
297 /*
298 * overlap left hand side of extent: move the start, trim the
299 * length and update the current record.
300 *
301 * ltbno ltlen
302 * Orig: |oooooooooooooooooooo|
303 * Freeing: |fffffffff|
304 * Result: |rrrrrrrrrr|
305 * bno len
306 */
307 ltrec.rm_startblock += len;
308 ltrec.rm_blockcount -= len;
309 if (!ignore_off)
310 ltrec.rm_offset += len;
311 error = xfs_rmap_update(cur, &ltrec);
312 if (error)
313 goto out_error;
314 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
315 /*
316 * overlap right hand side of extent: trim the length and update
317 * the current record.
318 *
319 * ltbno ltlen
320 * Orig: |oooooooooooooooooooo|
321 * Freeing: |fffffffff|
322 * Result: |rrrrrrrrrr|
323 * bno len
324 */
325 ltrec.rm_blockcount -= len;
326 error = xfs_rmap_update(cur, &ltrec);
327 if (error)
328 goto out_error;
329 } else {
330
331 /*
332 * overlap middle of extent: trim the length of the existing
333 * record to the length of the new left-extent size, increment
334 * the insertion position so we can insert a new record
335 * containing the remaining right-extent space.
336 *
337 * ltbno ltlen
338 * Orig: |oooooooooooooooooooo|
339 * Freeing: |fffffffff|
340 * Result: |rrrrr| |rrrr|
341 * bno len
342 */
343 xfs_extlen_t orig_len = ltrec.rm_blockcount;
344
345 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
346 error = xfs_rmap_update(cur, &ltrec);
347 if (error)
348 goto out_error;
349
350 error = xfs_btree_increment(cur, 0, &i);
351 if (error)
352 goto out_error;
353
354 cur->bc_rec.r.rm_startblock = bno + len;
355 cur->bc_rec.r.rm_blockcount = orig_len - len -
356 ltrec.rm_blockcount;
357 cur->bc_rec.r.rm_owner = ltrec.rm_owner;
358 if (ignore_off)
359 cur->bc_rec.r.rm_offset = 0;
360 else
361 cur->bc_rec.r.rm_offset = offset + len;
362 cur->bc_rec.r.rm_flags = flags;
363 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
364 cur->bc_rec.r.rm_startblock,
365 cur->bc_rec.r.rm_blockcount,
366 cur->bc_rec.r.rm_owner,
367 cur->bc_rec.r.rm_offset,
368 cur->bc_rec.r.rm_flags);
369 error = xfs_btree_insert(cur, &i);
370 if (error)
371 goto out_error;
372 }
373
374 out_done:
375 trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
376 unwritten, oinfo);
377 out_error:
378 if (error)
379 trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno,
380 error, _RET_IP_);
381 return error;
382 }
383
384 /*
385 * Remove a reference to an extent in the rmap btree.
386 */
387 int
388 xfs_rmap_free(
389 struct xfs_trans *tp,
390 struct xfs_buf *agbp,
391 xfs_agnumber_t agno,
392 xfs_agblock_t bno,
393 xfs_extlen_t len,
394 struct xfs_owner_info *oinfo)
395 {
396 struct xfs_mount *mp = tp->t_mountp;
397 struct xfs_btree_cur *cur;
398 int error;
399
400 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
401 return 0;
402
403 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
404
405 error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
406 if (error)
407 goto out_error;
408
409 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
410 return 0;
411
412 out_error:
413 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
414 return error;
415 }
416
417 /*
418 * A mergeable rmap must have the same owner and the same values for
419 * the unwritten, attr_fork, and bmbt flags. The startblock and
420 * offset are checked separately.
421 */
422 static bool
423 xfs_rmap_is_mergeable(
424 struct xfs_rmap_irec *irec,
425 uint64_t owner,
426 unsigned int flags)
427 {
428 if (irec->rm_owner == XFS_RMAP_OWN_NULL)
429 return false;
430 if (irec->rm_owner != owner)
431 return false;
432 if ((flags & XFS_RMAP_UNWRITTEN) ^
433 (irec->rm_flags & XFS_RMAP_UNWRITTEN))
434 return false;
435 if ((flags & XFS_RMAP_ATTR_FORK) ^
436 (irec->rm_flags & XFS_RMAP_ATTR_FORK))
437 return false;
438 if ((flags & XFS_RMAP_BMBT_BLOCK) ^
439 (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
440 return false;
441 return true;
442 }
443
444 /*
445 * When we allocate a new block, the first thing we do is add a reference to
446 * the extent in the rmap btree. This takes the form of a [agbno, length,
447 * owner, offset] record. Flags are encoded in the high bits of the offset
448 * field.
449 */
450 STATIC int
451 xfs_rmap_map(
452 struct xfs_btree_cur *cur,
453 xfs_agblock_t bno,
454 xfs_extlen_t len,
455 bool unwritten,
456 struct xfs_owner_info *oinfo)
457 {
458 struct xfs_mount *mp = cur->bc_mp;
459 struct xfs_rmap_irec ltrec;
460 struct xfs_rmap_irec gtrec;
461 int have_gt;
462 int have_lt;
463 int error = 0;
464 int i;
465 uint64_t owner;
466 uint64_t offset;
467 unsigned int flags = 0;
468 bool ignore_off;
469
470 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
471 ASSERT(owner != 0);
472 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
473 (flags & XFS_RMAP_BMBT_BLOCK);
474 if (unwritten)
475 flags |= XFS_RMAP_UNWRITTEN;
476 trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
477 unwritten, oinfo);
478
479 /*
480 * For the initial lookup, look for an exact match or the left-adjacent
481 * record for our insertion point. This will also give us the record for
482 * start block contiguity tests.
483 */
484 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags,
485 &have_lt);
486 if (error)
487 goto out_error;
488 XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
489
490 error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
491 if (error)
492 goto out_error;
493 XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
494 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
495 cur->bc_private.a.agno, ltrec.rm_startblock,
496 ltrec.rm_blockcount, ltrec.rm_owner,
497 ltrec.rm_offset, ltrec.rm_flags);
498
499 if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
500 have_lt = 0;
501
502 XFS_WANT_CORRUPTED_GOTO(mp,
503 have_lt == 0 ||
504 ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error);
505
506 /*
507 * Increment the cursor to see if we have a right-adjacent record to our
508 * insertion point. This will give us the record for end block
509 * contiguity tests.
510 */
511 error = xfs_btree_increment(cur, 0, &have_gt);
512 if (error)
513 goto out_error;
514 if (have_gt) {
515 error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
516 if (error)
517 goto out_error;
518 XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
519 XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock,
520 out_error);
521 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
522 cur->bc_private.a.agno, gtrec.rm_startblock,
523 gtrec.rm_blockcount, gtrec.rm_owner,
524 gtrec.rm_offset, gtrec.rm_flags);
525 if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
526 have_gt = 0;
527 }
528
529 /*
530 * Note: cursor currently points one record to the right of ltrec, even
531 * if there is no record in the tree to the right.
532 */
533 if (have_lt &&
534 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
535 (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
536 /*
537 * left edge contiguous, merge into left record.
538 *
539 * ltbno ltlen
540 * orig: |ooooooooo|
541 * adding: |aaaaaaaaa|
542 * result: |rrrrrrrrrrrrrrrrrrr|
543 * bno len
544 */
545 ltrec.rm_blockcount += len;
546 if (have_gt &&
547 bno + len == gtrec.rm_startblock &&
548 (ignore_off || offset + len == gtrec.rm_offset) &&
549 (unsigned long)ltrec.rm_blockcount + len +
550 gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
551 /*
552 * right edge also contiguous, delete right record
553 * and merge into left record.
554 *
555 * ltbno ltlen gtbno gtlen
556 * orig: |ooooooooo| |ooooooooo|
557 * adding: |aaaaaaaaa|
558 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
559 */
560 ltrec.rm_blockcount += gtrec.rm_blockcount;
561 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
562 gtrec.rm_startblock,
563 gtrec.rm_blockcount,
564 gtrec.rm_owner,
565 gtrec.rm_offset,
566 gtrec.rm_flags);
567 error = xfs_btree_delete(cur, &i);
568 if (error)
569 goto out_error;
570 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
571 }
572
573 /* point the cursor back to the left record and update */
574 error = xfs_btree_decrement(cur, 0, &have_gt);
575 if (error)
576 goto out_error;
577 error = xfs_rmap_update(cur, &ltrec);
578 if (error)
579 goto out_error;
580 } else if (have_gt &&
581 bno + len == gtrec.rm_startblock &&
582 (ignore_off || offset + len == gtrec.rm_offset)) {
583 /*
584 * right edge contiguous, merge into right record.
585 *
586 * gtbno gtlen
587 * Orig: |ooooooooo|
588 * adding: |aaaaaaaaa|
589 * Result: |rrrrrrrrrrrrrrrrrrr|
590 * bno len
591 */
592 gtrec.rm_startblock = bno;
593 gtrec.rm_blockcount += len;
594 if (!ignore_off)
595 gtrec.rm_offset = offset;
596 error = xfs_rmap_update(cur, &gtrec);
597 if (error)
598 goto out_error;
599 } else {
600 /*
601 * no contiguous edge with identical owner, insert
602 * new record at current cursor position.
603 */
604 cur->bc_rec.r.rm_startblock = bno;
605 cur->bc_rec.r.rm_blockcount = len;
606 cur->bc_rec.r.rm_owner = owner;
607 cur->bc_rec.r.rm_offset = offset;
608 cur->bc_rec.r.rm_flags = flags;
609 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
610 owner, offset, flags);
611 error = xfs_btree_insert(cur, &i);
612 if (error)
613 goto out_error;
614 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
615 }
616
617 trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
618 unwritten, oinfo);
619 out_error:
620 if (error)
621 trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno,
622 error, _RET_IP_);
623 return error;
624 }
625
626 /*
627 * Add a reference to an extent in the rmap btree.
628 */
629 int
630 xfs_rmap_alloc(
631 struct xfs_trans *tp,
632 struct xfs_buf *agbp,
633 xfs_agnumber_t agno,
634 xfs_agblock_t bno,
635 xfs_extlen_t len,
636 struct xfs_owner_info *oinfo)
637 {
638 struct xfs_mount *mp = tp->t_mountp;
639 struct xfs_btree_cur *cur;
640 int error;
641
642 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
643 return 0;
644
645 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
646 error = xfs_rmap_map(cur, bno, len, false, oinfo);
647 if (error)
648 goto out_error;
649
650 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
651 return 0;
652
653 out_error:
654 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
655 return error;
656 }
657
658 #define RMAP_LEFT_CONTIG (1 << 0)
659 #define RMAP_RIGHT_CONTIG (1 << 1)
660 #define RMAP_LEFT_FILLING (1 << 2)
661 #define RMAP_RIGHT_FILLING (1 << 3)
662 #define RMAP_LEFT_VALID (1 << 6)
663 #define RMAP_RIGHT_VALID (1 << 7)
664
665 #define LEFT r[0]
666 #define RIGHT r[1]
667 #define PREV r[2]
668 #define NEW r[3]
669
670 /*
671 * Convert an unwritten extent to a real extent or vice versa.
672 * Does not handle overlapping extents.
673 */
674 STATIC int
675 xfs_rmap_convert(
676 struct xfs_btree_cur *cur,
677 xfs_agblock_t bno,
678 xfs_extlen_t len,
679 bool unwritten,
680 struct xfs_owner_info *oinfo)
681 {
682 struct xfs_mount *mp = cur->bc_mp;
683 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
684 /* left is 0, right is 1, prev is 2 */
685 /* new is 3 */
686 uint64_t owner;
687 uint64_t offset;
688 uint64_t new_endoff;
689 unsigned int oldext;
690 unsigned int newext;
691 unsigned int flags = 0;
692 int i;
693 int state = 0;
694 int error;
695
696 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
697 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
698 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
699 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
700 new_endoff = offset + len;
701 trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
702 unwritten, oinfo);
703
704 /*
705 * For the initial lookup, look for an exact match or the left-adjacent
706 * record for our insertion point. This will also give us the record for
707 * start block contiguity tests.
708 */
709 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
710 if (error)
711 goto done;
712 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
713
714 error = xfs_rmap_get_rec(cur, &PREV, &i);
715 if (error)
716 goto done;
717 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
718 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
719 cur->bc_private.a.agno, PREV.rm_startblock,
720 PREV.rm_blockcount, PREV.rm_owner,
721 PREV.rm_offset, PREV.rm_flags);
722
723 ASSERT(PREV.rm_offset <= offset);
724 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
725 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
726 newext = ~oldext & XFS_RMAP_UNWRITTEN;
727
728 /*
729 * Set flags determining what part of the previous oldext allocation
730 * extent is being replaced by a newext allocation.
731 */
732 if (PREV.rm_offset == offset)
733 state |= RMAP_LEFT_FILLING;
734 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
735 state |= RMAP_RIGHT_FILLING;
736
737 /*
738 * Decrement the cursor to see if we have a left-adjacent record to our
739 * insertion point. This will give us the record for end block
740 * contiguity tests.
741 */
742 error = xfs_btree_decrement(cur, 0, &i);
743 if (error)
744 goto done;
745 if (i) {
746 state |= RMAP_LEFT_VALID;
747 error = xfs_rmap_get_rec(cur, &LEFT, &i);
748 if (error)
749 goto done;
750 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
751 XFS_WANT_CORRUPTED_GOTO(mp,
752 LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
753 done);
754 trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
755 cur->bc_private.a.agno, LEFT.rm_startblock,
756 LEFT.rm_blockcount, LEFT.rm_owner,
757 LEFT.rm_offset, LEFT.rm_flags);
758 if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
759 LEFT.rm_offset + LEFT.rm_blockcount == offset &&
760 xfs_rmap_is_mergeable(&LEFT, owner, newext))
761 state |= RMAP_LEFT_CONTIG;
762 }
763
764 /*
765 * Increment the cursor to see if we have a right-adjacent record to our
766 * insertion point. This will give us the record for end block
767 * contiguity tests.
768 */
769 error = xfs_btree_increment(cur, 0, &i);
770 if (error)
771 goto done;
772 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
773 error = xfs_btree_increment(cur, 0, &i);
774 if (error)
775 goto done;
776 if (i) {
777 state |= RMAP_RIGHT_VALID;
778 error = xfs_rmap_get_rec(cur, &RIGHT, &i);
779 if (error)
780 goto done;
781 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
782 XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
783 done);
784 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
785 cur->bc_private.a.agno, RIGHT.rm_startblock,
786 RIGHT.rm_blockcount, RIGHT.rm_owner,
787 RIGHT.rm_offset, RIGHT.rm_flags);
788 if (bno + len == RIGHT.rm_startblock &&
789 offset + len == RIGHT.rm_offset &&
790 xfs_rmap_is_mergeable(&RIGHT, owner, newext))
791 state |= RMAP_RIGHT_CONTIG;
792 }
793
794 /* check that left + prev + right is not too long */
795 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
796 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
797 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
798 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
799 (unsigned long)LEFT.rm_blockcount + len +
800 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
801 state &= ~RMAP_RIGHT_CONTIG;
802
803 trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
804 _RET_IP_);
805
806 /* reset the cursor back to PREV */
807 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
808 if (error)
809 goto done;
810 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
811
812 /*
813 * Switch out based on the FILLING and CONTIG state bits.
814 */
815 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
816 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
817 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
818 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
819 /*
820 * Setting all of a previous oldext extent to newext.
821 * The left and right neighbors are both contiguous with new.
822 */
823 error = xfs_btree_increment(cur, 0, &i);
824 if (error)
825 goto done;
826 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
827 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
828 RIGHT.rm_startblock, RIGHT.rm_blockcount,
829 RIGHT.rm_owner, RIGHT.rm_offset,
830 RIGHT.rm_flags);
831 error = xfs_btree_delete(cur, &i);
832 if (error)
833 goto done;
834 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
835 error = xfs_btree_decrement(cur, 0, &i);
836 if (error)
837 goto done;
838 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
839 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
840 PREV.rm_startblock, PREV.rm_blockcount,
841 PREV.rm_owner, PREV.rm_offset,
842 PREV.rm_flags);
843 error = xfs_btree_delete(cur, &i);
844 if (error)
845 goto done;
846 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
847 error = xfs_btree_decrement(cur, 0, &i);
848 if (error)
849 goto done;
850 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
851 NEW = LEFT;
852 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
853 error = xfs_rmap_update(cur, &NEW);
854 if (error)
855 goto done;
856 break;
857
858 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
859 /*
860 * Setting all of a previous oldext extent to newext.
861 * The left neighbor is contiguous, the right is not.
862 */
863 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
864 PREV.rm_startblock, PREV.rm_blockcount,
865 PREV.rm_owner, PREV.rm_offset,
866 PREV.rm_flags);
867 error = xfs_btree_delete(cur, &i);
868 if (error)
869 goto done;
870 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
871 error = xfs_btree_decrement(cur, 0, &i);
872 if (error)
873 goto done;
874 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
875 NEW = LEFT;
876 NEW.rm_blockcount += PREV.rm_blockcount;
877 error = xfs_rmap_update(cur, &NEW);
878 if (error)
879 goto done;
880 break;
881
882 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
883 /*
884 * Setting all of a previous oldext extent to newext.
885 * The right neighbor is contiguous, the left is not.
886 */
887 error = xfs_btree_increment(cur, 0, &i);
888 if (error)
889 goto done;
890 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
891 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
892 RIGHT.rm_startblock, RIGHT.rm_blockcount,
893 RIGHT.rm_owner, RIGHT.rm_offset,
894 RIGHT.rm_flags);
895 error = xfs_btree_delete(cur, &i);
896 if (error)
897 goto done;
898 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
899 error = xfs_btree_decrement(cur, 0, &i);
900 if (error)
901 goto done;
902 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
903 NEW = PREV;
904 NEW.rm_blockcount = len + RIGHT.rm_blockcount;
905 NEW.rm_flags = newext;
906 error = xfs_rmap_update(cur, &NEW);
907 if (error)
908 goto done;
909 break;
910
911 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
912 /*
913 * Setting all of a previous oldext extent to newext.
914 * Neither the left nor right neighbors are contiguous with
915 * the new one.
916 */
917 NEW = PREV;
918 NEW.rm_flags = newext;
919 error = xfs_rmap_update(cur, &NEW);
920 if (error)
921 goto done;
922 break;
923
924 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
925 /*
926 * Setting the first part of a previous oldext extent to newext.
927 * The left neighbor is contiguous.
928 */
929 NEW = PREV;
930 NEW.rm_offset += len;
931 NEW.rm_startblock += len;
932 NEW.rm_blockcount -= len;
933 error = xfs_rmap_update(cur, &NEW);
934 if (error)
935 goto done;
936 error = xfs_btree_decrement(cur, 0, &i);
937 if (error)
938 goto done;
939 NEW = LEFT;
940 NEW.rm_blockcount += len;
941 error = xfs_rmap_update(cur, &NEW);
942 if (error)
943 goto done;
944 break;
945
946 case RMAP_LEFT_FILLING:
947 /*
948 * Setting the first part of a previous oldext extent to newext.
949 * The left neighbor is not contiguous.
950 */
951 NEW = PREV;
952 NEW.rm_startblock += len;
953 NEW.rm_offset += len;
954 NEW.rm_blockcount -= len;
955 error = xfs_rmap_update(cur, &NEW);
956 if (error)
957 goto done;
958 NEW.rm_startblock = bno;
959 NEW.rm_owner = owner;
960 NEW.rm_offset = offset;
961 NEW.rm_blockcount = len;
962 NEW.rm_flags = newext;
963 cur->bc_rec.r = NEW;
964 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
965 len, owner, offset, newext);
966 error = xfs_btree_insert(cur, &i);
967 if (error)
968 goto done;
969 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
970 break;
971
972 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
973 /*
974 * Setting the last part of a previous oldext extent to newext.
975 * The right neighbor is contiguous with the new allocation.
976 */
977 NEW = PREV;
978 NEW.rm_blockcount -= len;
979 error = xfs_rmap_update(cur, &NEW);
980 if (error)
981 goto done;
982 error = xfs_btree_increment(cur, 0, &i);
983 if (error)
984 goto done;
985 NEW = RIGHT;
986 NEW.rm_offset = offset;
987 NEW.rm_startblock = bno;
988 NEW.rm_blockcount += len;
989 error = xfs_rmap_update(cur, &NEW);
990 if (error)
991 goto done;
992 break;
993
994 case RMAP_RIGHT_FILLING:
995 /*
996 * Setting the last part of a previous oldext extent to newext.
997 * The right neighbor is not contiguous.
998 */
999 NEW = PREV;
1000 NEW.rm_blockcount -= len;
1001 error = xfs_rmap_update(cur, &NEW);
1002 if (error)
1003 goto done;
1004 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1005 oldext, &i);
1006 if (error)
1007 goto done;
1008 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1009 NEW.rm_startblock = bno;
1010 NEW.rm_owner = owner;
1011 NEW.rm_offset = offset;
1012 NEW.rm_blockcount = len;
1013 NEW.rm_flags = newext;
1014 cur->bc_rec.r = NEW;
1015 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
1016 len, owner, offset, newext);
1017 error = xfs_btree_insert(cur, &i);
1018 if (error)
1019 goto done;
1020 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1021 break;
1022
1023 case 0:
1024 /*
1025 * Setting the middle part of a previous oldext extent to
1026 * newext. Contiguity is impossible here.
1027 * One extent becomes three extents.
1028 */
1029 /* new right extent - oldext */
1030 NEW.rm_startblock = bno + len;
1031 NEW.rm_owner = owner;
1032 NEW.rm_offset = new_endoff;
1033 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
1034 new_endoff;
1035 NEW.rm_flags = PREV.rm_flags;
1036 error = xfs_rmap_update(cur, &NEW);
1037 if (error)
1038 goto done;
1039 /* new left extent - oldext */
1040 NEW = PREV;
1041 NEW.rm_blockcount = offset - PREV.rm_offset;
1042 cur->bc_rec.r = NEW;
1043 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
1044 NEW.rm_startblock, NEW.rm_blockcount,
1045 NEW.rm_owner, NEW.rm_offset,
1046 NEW.rm_flags);
1047 error = xfs_btree_insert(cur, &i);
1048 if (error)
1049 goto done;
1050 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1051 /*
1052 * Reset the cursor to the position of the new extent
1053 * we are about to insert as we can't trust it after
1054 * the previous insert.
1055 */
1056 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1057 oldext, &i);
1058 if (error)
1059 goto done;
1060 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1061 /* new middle extent - newext */
1062 cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
1063 cur->bc_rec.r.rm_flags |= newext;
1064 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
1065 owner, offset, newext);
1066 error = xfs_btree_insert(cur, &i);
1067 if (error)
1068 goto done;
1069 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1070 break;
1071
1072 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1073 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1074 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
1075 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1076 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1077 case RMAP_LEFT_CONTIG:
1078 case RMAP_RIGHT_CONTIG:
1079 /*
1080 * These cases are all impossible.
1081 */
1082 ASSERT(0);
1083 }
1084
1085 trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
1086 unwritten, oinfo);
1087 done:
1088 if (error)
1089 trace_xfs_rmap_convert_error(cur->bc_mp,
1090 cur->bc_private.a.agno, error, _RET_IP_);
1091 return error;
1092 }
1093
1094 #undef NEW
1095 #undef LEFT
1096 #undef RIGHT
1097 #undef PREV
1098
1099 struct xfs_rmap_query_range_info {
1100 xfs_rmap_query_range_fn fn;
1101 void *priv;
1102 };
1103
1104 /* Format btree record and pass to our callback. */
1105 STATIC int
1106 xfs_rmap_query_range_helper(
1107 struct xfs_btree_cur *cur,
1108 union xfs_btree_rec *rec,
1109 void *priv)
1110 {
1111 struct xfs_rmap_query_range_info *query = priv;
1112 struct xfs_rmap_irec irec;
1113 int error;
1114
1115 error = xfs_rmap_btrec_to_irec(rec, &irec);
1116 if (error)
1117 return error;
1118 return query->fn(cur, &irec, query->priv);
1119 }
1120
1121 /* Find all rmaps between two keys. */
1122 int
1123 xfs_rmap_query_range(
1124 struct xfs_btree_cur *cur,
1125 struct xfs_rmap_irec *low_rec,
1126 struct xfs_rmap_irec *high_rec,
1127 xfs_rmap_query_range_fn fn,
1128 void *priv)
1129 {
1130 union xfs_btree_irec low_brec;
1131 union xfs_btree_irec high_brec;
1132 struct xfs_rmap_query_range_info query;
1133
1134 low_brec.r = *low_rec;
1135 high_brec.r = *high_rec;
1136 query.priv = priv;
1137 query.fn = fn;
1138 return xfs_btree_query_range(cur, &low_brec, &high_brec,
1139 xfs_rmap_query_range_helper, &query);
1140 }
1141
1142 /* Clean up after calling xfs_rmap_finish_one. */
1143 void
1144 xfs_rmap_finish_one_cleanup(
1145 struct xfs_trans *tp,
1146 struct xfs_btree_cur *rcur,
1147 int error)
1148 {
1149 struct xfs_buf *agbp;
1150
1151 if (rcur == NULL)
1152 return;
1153 agbp = rcur->bc_private.a.agbp;
1154 xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1155 if (error)
1156 xfs_trans_brelse(tp, agbp);
1157 }
1158
1159 /*
1160 * Process one of the deferred rmap operations. We pass back the
1161 * btree cursor to maintain our lock on the rmapbt between calls.
1162 * This saves time and eliminates a buffer deadlock between the
1163 * superblock and the AGF because we'll always grab them in the same
1164 * order.
1165 */
1166 int
1167 xfs_rmap_finish_one(
1168 struct xfs_trans *tp,
1169 enum xfs_rmap_intent_type type,
1170 __uint64_t owner,
1171 int whichfork,
1172 xfs_fileoff_t startoff,
1173 xfs_fsblock_t startblock,
1174 xfs_filblks_t blockcount,
1175 xfs_exntst_t state,
1176 struct xfs_btree_cur **pcur)
1177 {
1178 struct xfs_mount *mp = tp->t_mountp;
1179 struct xfs_btree_cur *rcur;
1180 struct xfs_buf *agbp = NULL;
1181 int error = 0;
1182 xfs_agnumber_t agno;
1183 struct xfs_owner_info oinfo;
1184 xfs_agblock_t bno;
1185 bool unwritten;
1186
1187 agno = XFS_FSB_TO_AGNO(mp, startblock);
1188 ASSERT(agno != NULLAGNUMBER);
1189 bno = XFS_FSB_TO_AGBNO(mp, startblock);
1190
1191 trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork,
1192 startoff, blockcount, state);
1193
1194 if (XFS_TEST_ERROR(false, mp,
1195 XFS_ERRTAG_RMAP_FINISH_ONE,
1196 XFS_RANDOM_RMAP_FINISH_ONE))
1197 return -EIO;
1198
1199 /*
1200 * If we haven't gotten a cursor or the cursor AG doesn't match
1201 * the startblock, get one now.
1202 */
1203 rcur = *pcur;
1204 if (rcur != NULL && rcur->bc_private.a.agno != agno) {
1205 xfs_rmap_finish_one_cleanup(tp, rcur, 0);
1206 rcur = NULL;
1207 *pcur = NULL;
1208 }
1209 if (rcur == NULL) {
1210 /*
1211 * Refresh the freelist before we start changing the
1212 * rmapbt, because a shape change could cause us to
1213 * allocate blocks.
1214 */
1215 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
1216 if (error)
1217 return error;
1218 if (!agbp)
1219 return -EFSCORRUPTED;
1220
1221 rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
1222 if (!rcur) {
1223 error = -ENOMEM;
1224 goto out_cur;
1225 }
1226 }
1227 *pcur = rcur;
1228
1229 xfs_rmap_ino_owner(&oinfo, owner, whichfork, startoff);
1230 unwritten = state == XFS_EXT_UNWRITTEN;
1231 bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, startblock);
1232
1233 switch (type) {
1234 case XFS_RMAP_ALLOC:
1235 case XFS_RMAP_MAP:
1236 error = xfs_rmap_map(rcur, bno, blockcount, unwritten, &oinfo);
1237 break;
1238 case XFS_RMAP_FREE:
1239 case XFS_RMAP_UNMAP:
1240 error = xfs_rmap_unmap(rcur, bno, blockcount, unwritten,
1241 &oinfo);
1242 break;
1243 case XFS_RMAP_CONVERT:
1244 error = xfs_rmap_convert(rcur, bno, blockcount, !unwritten,
1245 &oinfo);
1246 break;
1247 default:
1248 ASSERT(0);
1249 error = -EFSCORRUPTED;
1250 }
1251 return error;
1252
1253 out_cur:
1254 xfs_trans_brelse(tp, agbp);
1255
1256 return error;
1257 }
1258
1259 /*
1260 * Don't defer an rmap if we aren't an rmap filesystem.
1261 */
1262 static bool
1263 xfs_rmap_update_is_needed(
1264 struct xfs_mount *mp)
1265 {
1266 return xfs_sb_version_hasrmapbt(&mp->m_sb);
1267 }
1268
1269 /*
1270 * Record a rmap intent; the list is kept sorted first by AG and then by
1271 * increasing age.
1272 */
1273 static int
1274 __xfs_rmap_add(
1275 struct xfs_mount *mp,
1276 struct xfs_defer_ops *dfops,
1277 enum xfs_rmap_intent_type type,
1278 __uint64_t owner,
1279 int whichfork,
1280 struct xfs_bmbt_irec *bmap)
1281 {
1282 struct xfs_rmap_intent *ri;
1283
1284 trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
1285 type,
1286 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
1287 owner, whichfork,
1288 bmap->br_startoff,
1289 bmap->br_blockcount,
1290 bmap->br_state);
1291
1292 ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS);
1293 INIT_LIST_HEAD(&ri->ri_list);
1294 ri->ri_type = type;
1295 ri->ri_owner = owner;
1296 ri->ri_whichfork = whichfork;
1297 ri->ri_bmap = *bmap;
1298
1299 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
1300 return 0;
1301 }
1302
1303 /* Map an extent into a file. */
1304 int
1305 xfs_rmap_map_extent(
1306 struct xfs_mount *mp,
1307 struct xfs_defer_ops *dfops,
1308 struct xfs_inode *ip,
1309 int whichfork,
1310 struct xfs_bmbt_irec *PREV)
1311 {
1312 if (!xfs_rmap_update_is_needed(mp))
1313 return 0;
1314
1315 return __xfs_rmap_add(mp, dfops, XFS_RMAP_MAP, ip->i_ino,
1316 whichfork, PREV);
1317 }
1318
1319 /* Unmap an extent out of a file. */
1320 int
1321 xfs_rmap_unmap_extent(
1322 struct xfs_mount *mp,
1323 struct xfs_defer_ops *dfops,
1324 struct xfs_inode *ip,
1325 int whichfork,
1326 struct xfs_bmbt_irec *PREV)
1327 {
1328 if (!xfs_rmap_update_is_needed(mp))
1329 return 0;
1330
1331 return __xfs_rmap_add(mp, dfops, XFS_RMAP_UNMAP, ip->i_ino,
1332 whichfork, PREV);
1333 }
1334
1335 /* Convert a data fork extent from unwritten to real or vice versa. */
1336 int
1337 xfs_rmap_convert_extent(
1338 struct xfs_mount *mp,
1339 struct xfs_defer_ops *dfops,
1340 struct xfs_inode *ip,
1341 int whichfork,
1342 struct xfs_bmbt_irec *PREV)
1343 {
1344 if (!xfs_rmap_update_is_needed(mp))
1345 return 0;
1346
1347 return __xfs_rmap_add(mp, dfops, XFS_RMAP_CONVERT, ip->i_ino,
1348 whichfork, PREV);
1349 }
1350
1351 /* Schedule the creation of an rmap for non-file data. */
1352 int
1353 xfs_rmap_alloc_extent(
1354 struct xfs_mount *mp,
1355 struct xfs_defer_ops *dfops,
1356 xfs_agnumber_t agno,
1357 xfs_agblock_t bno,
1358 xfs_extlen_t len,
1359 __uint64_t owner)
1360 {
1361 struct xfs_bmbt_irec bmap;
1362
1363 if (!xfs_rmap_update_is_needed(mp))
1364 return 0;
1365
1366 bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
1367 bmap.br_blockcount = len;
1368 bmap.br_startoff = 0;
1369 bmap.br_state = XFS_EXT_NORM;
1370
1371 return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
1372 XFS_DATA_FORK, &bmap);
1373 }
1374
1375 /* Schedule the deletion of an rmap for non-file data. */
1376 int
1377 xfs_rmap_free_extent(
1378 struct xfs_mount *mp,
1379 struct xfs_defer_ops *dfops,
1380 xfs_agnumber_t agno,
1381 xfs_agblock_t bno,
1382 xfs_extlen_t len,
1383 __uint64_t owner)
1384 {
1385 struct xfs_bmbt_irec bmap;
1386
1387 if (!xfs_rmap_update_is_needed(mp))
1388 return 0;
1389
1390 bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
1391 bmap.br_blockcount = len;
1392 bmap.br_startoff = 0;
1393 bmap.br_state = XFS_EXT_NORM;
1394
1395 return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
1396 XFS_DATA_FORK, &bmap);
1397 }