]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc.c
xfs: convert open coded corruption check to use XFS_IS_CORRUPT
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "libxfs_priv.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_btree.h"
17 #include "xfs_rmap.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_trace.h"
21 #include "xfs_trans.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_bmap.h"
24
25 extern kmem_zone_t *xfs_bmap_free_item_zone;
26
27 struct workqueue_struct *xfs_alloc_wq;
28
29 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
30
31 #define XFSA_FIXUP_BNO_OK 1
32 #define XFSA_FIXUP_CNT_OK 2
33
34 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
35 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
36 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
37
38 /*
39 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
40 * the beginning of the block for a proper header with the location information
41 * and CRC.
42 */
43 unsigned int
44 xfs_agfl_size(
45 struct xfs_mount *mp)
46 {
47 unsigned int size = mp->m_sb.sb_sectsize;
48
49 if (xfs_sb_version_hascrc(&mp->m_sb))
50 size -= sizeof(struct xfs_agfl);
51
52 return size / sizeof(xfs_agblock_t);
53 }
54
55 unsigned int
56 xfs_refc_block(
57 struct xfs_mount *mp)
58 {
59 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
60 return XFS_RMAP_BLOCK(mp) + 1;
61 if (xfs_sb_version_hasfinobt(&mp->m_sb))
62 return XFS_FIBT_BLOCK(mp) + 1;
63 return XFS_IBT_BLOCK(mp) + 1;
64 }
65
66 xfs_extlen_t
67 xfs_prealloc_blocks(
68 struct xfs_mount *mp)
69 {
70 if (xfs_sb_version_hasreflink(&mp->m_sb))
71 return xfs_refc_block(mp) + 1;
72 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
73 return XFS_RMAP_BLOCK(mp) + 1;
74 if (xfs_sb_version_hasfinobt(&mp->m_sb))
75 return XFS_FIBT_BLOCK(mp) + 1;
76 return XFS_IBT_BLOCK(mp) + 1;
77 }
78
79 /*
80 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
81 * AGF buffer (PV 947395), we place constraints on the relationship among
82 * actual allocations for data blocks, freelist blocks, and potential file data
83 * bmap btree blocks. However, these restrictions may result in no actual space
84 * allocated for a delayed extent, for example, a data block in a certain AG is
85 * allocated but there is no additional block for the additional bmap btree
86 * block due to a split of the bmap btree of the file. The result of this may
87 * lead to an infinite loop when the file gets flushed to disk and all delayed
88 * extents need to be actually allocated. To get around this, we explicitly set
89 * aside a few blocks which will not be reserved in delayed allocation.
90 *
91 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
92 * potential split of the file's bmap btree.
93 */
94 unsigned int
95 xfs_alloc_set_aside(
96 struct xfs_mount *mp)
97 {
98 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
99 }
100
101 /*
102 * When deciding how much space to allocate out of an AG, we limit the
103 * allocation maximum size to the size the AG. However, we cannot use all the
104 * blocks in the AG - some are permanently used by metadata. These
105 * blocks are generally:
106 * - the AG superblock, AGF, AGI and AGFL
107 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
108 * the AGI free inode and rmap btree root blocks.
109 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
110 * - the rmapbt root block
111 *
112 * The AG headers are sector sized, so the amount of space they take up is
113 * dependent on filesystem geometry. The others are all single blocks.
114 */
115 unsigned int
116 xfs_alloc_ag_max_usable(
117 struct xfs_mount *mp)
118 {
119 unsigned int blocks;
120
121 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
122 blocks += XFS_ALLOC_AGFL_RESERVE;
123 blocks += 3; /* AGF, AGI btree root blocks */
124 if (xfs_sb_version_hasfinobt(&mp->m_sb))
125 blocks++; /* finobt root block */
126 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
127 blocks++; /* rmap root block */
128 if (xfs_sb_version_hasreflink(&mp->m_sb))
129 blocks++; /* refcount root block */
130
131 return mp->m_sb.sb_agblocks - blocks;
132 }
133
134 /*
135 * Lookup the record equal to [bno, len] in the btree given by cur.
136 */
137 STATIC int /* error */
138 xfs_alloc_lookup_eq(
139 struct xfs_btree_cur *cur, /* btree cursor */
140 xfs_agblock_t bno, /* starting block of extent */
141 xfs_extlen_t len, /* length of extent */
142 int *stat) /* success/failure */
143 {
144 int error;
145
146 cur->bc_rec.a.ar_startblock = bno;
147 cur->bc_rec.a.ar_blockcount = len;
148 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
149 cur->bc_private.a.priv.abt.active = (*stat == 1);
150 return error;
151 }
152
153 /*
154 * Lookup the first record greater than or equal to [bno, len]
155 * in the btree given by cur.
156 */
157 int /* error */
158 xfs_alloc_lookup_ge(
159 struct xfs_btree_cur *cur, /* btree cursor */
160 xfs_agblock_t bno, /* starting block of extent */
161 xfs_extlen_t len, /* length of extent */
162 int *stat) /* success/failure */
163 {
164 int error;
165
166 cur->bc_rec.a.ar_startblock = bno;
167 cur->bc_rec.a.ar_blockcount = len;
168 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
169 cur->bc_private.a.priv.abt.active = (*stat == 1);
170 return error;
171 }
172
173 /*
174 * Lookup the first record less than or equal to [bno, len]
175 * in the btree given by cur.
176 */
177 int /* error */
178 xfs_alloc_lookup_le(
179 struct xfs_btree_cur *cur, /* btree cursor */
180 xfs_agblock_t bno, /* starting block of extent */
181 xfs_extlen_t len, /* length of extent */
182 int *stat) /* success/failure */
183 {
184 int error;
185 cur->bc_rec.a.ar_startblock = bno;
186 cur->bc_rec.a.ar_blockcount = len;
187 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
188 cur->bc_private.a.priv.abt.active = (*stat == 1);
189 return error;
190 }
191
192 static inline bool
193 xfs_alloc_cur_active(
194 struct xfs_btree_cur *cur)
195 {
196 return cur && cur->bc_private.a.priv.abt.active;
197 }
198
199 /*
200 * Update the record referred to by cur to the value given
201 * by [bno, len].
202 * This either works (return 0) or gets an EFSCORRUPTED error.
203 */
204 STATIC int /* error */
205 xfs_alloc_update(
206 struct xfs_btree_cur *cur, /* btree cursor */
207 xfs_agblock_t bno, /* starting block of extent */
208 xfs_extlen_t len) /* length of extent */
209 {
210 union xfs_btree_rec rec;
211
212 rec.alloc.ar_startblock = cpu_to_be32(bno);
213 rec.alloc.ar_blockcount = cpu_to_be32(len);
214 return xfs_btree_update(cur, &rec);
215 }
216
217 /*
218 * Get the data from the pointed-to record.
219 */
220 int /* error */
221 xfs_alloc_get_rec(
222 struct xfs_btree_cur *cur, /* btree cursor */
223 xfs_agblock_t *bno, /* output: starting block of extent */
224 xfs_extlen_t *len, /* output: length of extent */
225 int *stat) /* output: success/failure */
226 {
227 struct xfs_mount *mp = cur->bc_mp;
228 xfs_agnumber_t agno = cur->bc_private.a.agno;
229 union xfs_btree_rec *rec;
230 int error;
231
232 error = xfs_btree_get_rec(cur, &rec, stat);
233 if (error || !(*stat))
234 return error;
235
236 *bno = be32_to_cpu(rec->alloc.ar_startblock);
237 *len = be32_to_cpu(rec->alloc.ar_blockcount);
238
239 if (*len == 0)
240 goto out_bad_rec;
241
242 /* check for valid extent range, including overflow */
243 if (!xfs_verify_agbno(mp, agno, *bno))
244 goto out_bad_rec;
245 if (*bno > *bno + *len)
246 goto out_bad_rec;
247 if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
248 goto out_bad_rec;
249
250 return 0;
251
252 out_bad_rec:
253 xfs_warn(mp,
254 "%s Freespace BTree record corruption in AG %d detected!",
255 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
256 xfs_warn(mp,
257 "start block 0x%x block count 0x%x", *bno, *len);
258 return -EFSCORRUPTED;
259 }
260
261 /*
262 * Compute aligned version of the found extent.
263 * Takes alignment and min length into account.
264 */
265 STATIC bool
266 xfs_alloc_compute_aligned(
267 xfs_alloc_arg_t *args, /* allocation argument structure */
268 xfs_agblock_t foundbno, /* starting block in found extent */
269 xfs_extlen_t foundlen, /* length in found extent */
270 xfs_agblock_t *resbno, /* result block number */
271 xfs_extlen_t *reslen, /* result length */
272 unsigned *busy_gen)
273 {
274 xfs_agblock_t bno = foundbno;
275 xfs_extlen_t len = foundlen;
276 xfs_extlen_t diff;
277 bool busy;
278
279 /* Trim busy sections out of found extent */
280 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
281
282 /*
283 * If we have a largish extent that happens to start before min_agbno,
284 * see if we can shift it into range...
285 */
286 if (bno < args->min_agbno && bno + len > args->min_agbno) {
287 diff = args->min_agbno - bno;
288 if (len > diff) {
289 bno += diff;
290 len -= diff;
291 }
292 }
293
294 if (args->alignment > 1 && len >= args->minlen) {
295 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
296
297 diff = aligned_bno - bno;
298
299 *resbno = aligned_bno;
300 *reslen = diff >= len ? 0 : len - diff;
301 } else {
302 *resbno = bno;
303 *reslen = len;
304 }
305
306 return busy;
307 }
308
309 /*
310 * Compute best start block and diff for "near" allocations.
311 * freelen >= wantlen already checked by caller.
312 */
313 STATIC xfs_extlen_t /* difference value (absolute) */
314 xfs_alloc_compute_diff(
315 xfs_agblock_t wantbno, /* target starting block */
316 xfs_extlen_t wantlen, /* target length */
317 xfs_extlen_t alignment, /* target alignment */
318 int datatype, /* are we allocating data? */
319 xfs_agblock_t freebno, /* freespace's starting block */
320 xfs_extlen_t freelen, /* freespace's length */
321 xfs_agblock_t *newbnop) /* result: best start block from free */
322 {
323 xfs_agblock_t freeend; /* end of freespace extent */
324 xfs_agblock_t newbno1; /* return block number */
325 xfs_agblock_t newbno2; /* other new block number */
326 xfs_extlen_t newlen1=0; /* length with newbno1 */
327 xfs_extlen_t newlen2=0; /* length with newbno2 */
328 xfs_agblock_t wantend; /* end of target extent */
329 bool userdata = datatype & XFS_ALLOC_USERDATA;
330
331 ASSERT(freelen >= wantlen);
332 freeend = freebno + freelen;
333 wantend = wantbno + wantlen;
334 /*
335 * We want to allocate from the start of a free extent if it is past
336 * the desired block or if we are allocating user data and the free
337 * extent is before desired block. The second case is there to allow
338 * for contiguous allocation from the remaining free space if the file
339 * grows in the short term.
340 */
341 if (freebno >= wantbno || (userdata && freeend < wantend)) {
342 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
343 newbno1 = NULLAGBLOCK;
344 } else if (freeend >= wantend && alignment > 1) {
345 newbno1 = roundup(wantbno, alignment);
346 newbno2 = newbno1 - alignment;
347 if (newbno1 >= freeend)
348 newbno1 = NULLAGBLOCK;
349 else
350 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
351 if (newbno2 < freebno)
352 newbno2 = NULLAGBLOCK;
353 else
354 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
355 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
356 if (newlen1 < newlen2 ||
357 (newlen1 == newlen2 &&
358 XFS_ABSDIFF(newbno1, wantbno) >
359 XFS_ABSDIFF(newbno2, wantbno)))
360 newbno1 = newbno2;
361 } else if (newbno2 != NULLAGBLOCK)
362 newbno1 = newbno2;
363 } else if (freeend >= wantend) {
364 newbno1 = wantbno;
365 } else if (alignment > 1) {
366 newbno1 = roundup(freeend - wantlen, alignment);
367 if (newbno1 > freeend - wantlen &&
368 newbno1 - alignment >= freebno)
369 newbno1 -= alignment;
370 else if (newbno1 >= freeend)
371 newbno1 = NULLAGBLOCK;
372 } else
373 newbno1 = freeend - wantlen;
374 *newbnop = newbno1;
375 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
376 }
377
378 /*
379 * Fix up the length, based on mod and prod.
380 * len should be k * prod + mod for some k.
381 * If len is too small it is returned unchanged.
382 * If len hits maxlen it is left alone.
383 */
384 STATIC void
385 xfs_alloc_fix_len(
386 xfs_alloc_arg_t *args) /* allocation argument structure */
387 {
388 xfs_extlen_t k;
389 xfs_extlen_t rlen;
390
391 ASSERT(args->mod < args->prod);
392 rlen = args->len;
393 ASSERT(rlen >= args->minlen);
394 ASSERT(rlen <= args->maxlen);
395 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
396 (args->mod == 0 && rlen < args->prod))
397 return;
398 k = rlen % args->prod;
399 if (k == args->mod)
400 return;
401 if (k > args->mod)
402 rlen = rlen - (k - args->mod);
403 else
404 rlen = rlen - args->prod + (args->mod - k);
405 /* casts to (int) catch length underflows */
406 if ((int)rlen < (int)args->minlen)
407 return;
408 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
409 ASSERT(rlen % args->prod == args->mod);
410 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
411 rlen + args->minleft);
412 args->len = rlen;
413 }
414
415 /*
416 * Update the two btrees, logically removing from freespace the extent
417 * starting at rbno, rlen blocks. The extent is contained within the
418 * actual (current) free extent fbno for flen blocks.
419 * Flags are passed in indicating whether the cursors are set to the
420 * relevant records.
421 */
422 STATIC int /* error code */
423 xfs_alloc_fixup_trees(
424 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
425 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
426 xfs_agblock_t fbno, /* starting block of free extent */
427 xfs_extlen_t flen, /* length of free extent */
428 xfs_agblock_t rbno, /* starting block of returned extent */
429 xfs_extlen_t rlen, /* length of returned extent */
430 int flags) /* flags, XFSA_FIXUP_... */
431 {
432 int error; /* error code */
433 int i; /* operation results */
434 xfs_agblock_t nfbno1; /* first new free startblock */
435 xfs_agblock_t nfbno2; /* second new free startblock */
436 xfs_extlen_t nflen1=0; /* first new free length */
437 xfs_extlen_t nflen2=0; /* second new free length */
438 struct xfs_mount *mp;
439
440 mp = cnt_cur->bc_mp;
441
442 /*
443 * Look up the record in the by-size tree if necessary.
444 */
445 if (flags & XFSA_FIXUP_CNT_OK) {
446 #ifdef DEBUG
447 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
448 return error;
449 if (XFS_IS_CORRUPT(mp,
450 i != 1 ||
451 nfbno1 != fbno ||
452 nflen1 != flen))
453 return -EFSCORRUPTED;
454 #endif
455 } else {
456 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
457 return error;
458 if (XFS_IS_CORRUPT(mp, i != 1))
459 return -EFSCORRUPTED;
460 }
461 /*
462 * Look up the record in the by-block tree if necessary.
463 */
464 if (flags & XFSA_FIXUP_BNO_OK) {
465 #ifdef DEBUG
466 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
467 return error;
468 if (XFS_IS_CORRUPT(mp,
469 i != 1 ||
470 nfbno1 != fbno ||
471 nflen1 != flen))
472 return -EFSCORRUPTED;
473 #endif
474 } else {
475 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
476 return error;
477 if (XFS_IS_CORRUPT(mp, i != 1))
478 return -EFSCORRUPTED;
479 }
480
481 #ifdef DEBUG
482 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
483 struct xfs_btree_block *bnoblock;
484 struct xfs_btree_block *cntblock;
485
486 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
487 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
488
489 if (XFS_IS_CORRUPT(mp,
490 bnoblock->bb_numrecs !=
491 cntblock->bb_numrecs))
492 return -EFSCORRUPTED;
493 }
494 #endif
495
496 /*
497 * Deal with all four cases: the allocated record is contained
498 * within the freespace record, so we can have new freespace
499 * at either (or both) end, or no freespace remaining.
500 */
501 if (rbno == fbno && rlen == flen)
502 nfbno1 = nfbno2 = NULLAGBLOCK;
503 else if (rbno == fbno) {
504 nfbno1 = rbno + rlen;
505 nflen1 = flen - rlen;
506 nfbno2 = NULLAGBLOCK;
507 } else if (rbno + rlen == fbno + flen) {
508 nfbno1 = fbno;
509 nflen1 = flen - rlen;
510 nfbno2 = NULLAGBLOCK;
511 } else {
512 nfbno1 = fbno;
513 nflen1 = rbno - fbno;
514 nfbno2 = rbno + rlen;
515 nflen2 = (fbno + flen) - nfbno2;
516 }
517 /*
518 * Delete the entry from the by-size btree.
519 */
520 if ((error = xfs_btree_delete(cnt_cur, &i)))
521 return error;
522 if (XFS_IS_CORRUPT(mp, i != 1))
523 return -EFSCORRUPTED;
524 /*
525 * Add new by-size btree entry(s).
526 */
527 if (nfbno1 != NULLAGBLOCK) {
528 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
529 return error;
530 if (XFS_IS_CORRUPT(mp, i != 0))
531 return -EFSCORRUPTED;
532 if ((error = xfs_btree_insert(cnt_cur, &i)))
533 return error;
534 if (XFS_IS_CORRUPT(mp, i != 1))
535 return -EFSCORRUPTED;
536 }
537 if (nfbno2 != NULLAGBLOCK) {
538 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
539 return error;
540 if (XFS_IS_CORRUPT(mp, i != 0))
541 return -EFSCORRUPTED;
542 if ((error = xfs_btree_insert(cnt_cur, &i)))
543 return error;
544 if (XFS_IS_CORRUPT(mp, i != 1))
545 return -EFSCORRUPTED;
546 }
547 /*
548 * Fix up the by-block btree entry(s).
549 */
550 if (nfbno1 == NULLAGBLOCK) {
551 /*
552 * No remaining freespace, just delete the by-block tree entry.
553 */
554 if ((error = xfs_btree_delete(bno_cur, &i)))
555 return error;
556 if (XFS_IS_CORRUPT(mp, i != 1))
557 return -EFSCORRUPTED;
558 } else {
559 /*
560 * Update the by-block entry to start later|be shorter.
561 */
562 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
563 return error;
564 }
565 if (nfbno2 != NULLAGBLOCK) {
566 /*
567 * 2 resulting free entries, need to add one.
568 */
569 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
570 return error;
571 if (XFS_IS_CORRUPT(mp, i != 0))
572 return -EFSCORRUPTED;
573 if ((error = xfs_btree_insert(bno_cur, &i)))
574 return error;
575 if (XFS_IS_CORRUPT(mp, i != 1))
576 return -EFSCORRUPTED;
577 }
578 return 0;
579 }
580
581 static xfs_failaddr_t
582 xfs_agfl_verify(
583 struct xfs_buf *bp)
584 {
585 struct xfs_mount *mp = bp->b_mount;
586 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
587 int i;
588
589 /*
590 * There is no verification of non-crc AGFLs because mkfs does not
591 * initialise the AGFL to zero or NULL. Hence the only valid part of the
592 * AGFL is what the AGF says is active. We can't get to the AGF, so we
593 * can't verify just those entries are valid.
594 */
595 if (!xfs_sb_version_hascrc(&mp->m_sb))
596 return NULL;
597
598 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
599 return __this_address;
600 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
601 return __this_address;
602 /*
603 * during growfs operations, the perag is not fully initialised,
604 * so we can't use it for any useful checking. growfs ensures we can't
605 * use it by using uncached buffers that don't have the perag attached
606 * so we can detect and avoid this problem.
607 */
608 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
609 return __this_address;
610
611 for (i = 0; i < xfs_agfl_size(mp); i++) {
612 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
613 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
614 return __this_address;
615 }
616
617 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
618 return __this_address;
619 return NULL;
620 }
621
622 static void
623 xfs_agfl_read_verify(
624 struct xfs_buf *bp)
625 {
626 struct xfs_mount *mp = bp->b_mount;
627 xfs_failaddr_t fa;
628
629 /*
630 * There is no verification of non-crc AGFLs because mkfs does not
631 * initialise the AGFL to zero or NULL. Hence the only valid part of the
632 * AGFL is what the AGF says is active. We can't get to the AGF, so we
633 * can't verify just those entries are valid.
634 */
635 if (!xfs_sb_version_hascrc(&mp->m_sb))
636 return;
637
638 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
639 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
640 else {
641 fa = xfs_agfl_verify(bp);
642 if (fa)
643 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
644 }
645 }
646
647 static void
648 xfs_agfl_write_verify(
649 struct xfs_buf *bp)
650 {
651 struct xfs_mount *mp = bp->b_mount;
652 struct xfs_buf_log_item *bip = bp->b_log_item;
653 xfs_failaddr_t fa;
654
655 /* no verification of non-crc AGFLs */
656 if (!xfs_sb_version_hascrc(&mp->m_sb))
657 return;
658
659 fa = xfs_agfl_verify(bp);
660 if (fa) {
661 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
662 return;
663 }
664
665 if (bip)
666 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
667
668 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
669 }
670
671 const struct xfs_buf_ops xfs_agfl_buf_ops = {
672 .name = "xfs_agfl",
673 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
674 .verify_read = xfs_agfl_read_verify,
675 .verify_write = xfs_agfl_write_verify,
676 .verify_struct = xfs_agfl_verify,
677 };
678
679 /*
680 * Read in the allocation group free block array.
681 */
682 int /* error */
683 xfs_alloc_read_agfl(
684 xfs_mount_t *mp, /* mount point structure */
685 xfs_trans_t *tp, /* transaction pointer */
686 xfs_agnumber_t agno, /* allocation group number */
687 xfs_buf_t **bpp) /* buffer for the ag free block array */
688 {
689 xfs_buf_t *bp; /* return value */
690 int error;
691
692 ASSERT(agno != NULLAGNUMBER);
693 error = xfs_trans_read_buf(
694 mp, tp, mp->m_ddev_targp,
695 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
696 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
697 if (error)
698 return error;
699 xfs_buf_set_ref(bp, XFS_AGFL_REF);
700 *bpp = bp;
701 return 0;
702 }
703
704 STATIC int
705 xfs_alloc_update_counters(
706 struct xfs_trans *tp,
707 struct xfs_perag *pag,
708 struct xfs_buf *agbp,
709 long len)
710 {
711 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
712
713 pag->pagf_freeblks += len;
714 be32_add_cpu(&agf->agf_freeblks, len);
715
716 xfs_trans_agblocks_delta(tp, len);
717 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
718 be32_to_cpu(agf->agf_length))) {
719 xfs_buf_corruption_error(agbp);
720 return -EFSCORRUPTED;
721 }
722
723 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
724 return 0;
725 }
726
727 /*
728 * Block allocation algorithm and data structures.
729 */
730 struct xfs_alloc_cur {
731 struct xfs_btree_cur *cnt; /* btree cursors */
732 struct xfs_btree_cur *bnolt;
733 struct xfs_btree_cur *bnogt;
734 xfs_extlen_t cur_len;/* current search length */
735 xfs_agblock_t rec_bno;/* extent startblock */
736 xfs_extlen_t rec_len;/* extent length */
737 xfs_agblock_t bno; /* alloc bno */
738 xfs_extlen_t len; /* alloc len */
739 xfs_extlen_t diff; /* diff from search bno */
740 unsigned int busy_gen;/* busy state */
741 bool busy;
742 };
743
744 /*
745 * Set up cursors, etc. in the extent allocation cursor. This function can be
746 * called multiple times to reset an initialized structure without having to
747 * reallocate cursors.
748 */
749 static int
750 xfs_alloc_cur_setup(
751 struct xfs_alloc_arg *args,
752 struct xfs_alloc_cur *acur)
753 {
754 int error;
755 int i;
756
757 ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
758
759 acur->cur_len = args->maxlen;
760 acur->rec_bno = 0;
761 acur->rec_len = 0;
762 acur->bno = 0;
763 acur->len = 0;
764 acur->diff = -1;
765 acur->busy = false;
766 acur->busy_gen = 0;
767
768 /*
769 * Perform an initial cntbt lookup to check for availability of maxlen
770 * extents. If this fails, we'll return -ENOSPC to signal the caller to
771 * attempt a small allocation.
772 */
773 if (!acur->cnt)
774 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
775 args->agbp, args->agno, XFS_BTNUM_CNT);
776 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
777 if (error)
778 return error;
779
780 /*
781 * Allocate the bnobt left and right search cursors.
782 */
783 if (!acur->bnolt)
784 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
785 args->agbp, args->agno, XFS_BTNUM_BNO);
786 if (!acur->bnogt)
787 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
788 args->agbp, args->agno, XFS_BTNUM_BNO);
789 return i == 1 ? 0 : -ENOSPC;
790 }
791
792 static void
793 xfs_alloc_cur_close(
794 struct xfs_alloc_cur *acur,
795 bool error)
796 {
797 int cur_error = XFS_BTREE_NOERROR;
798
799 if (error)
800 cur_error = XFS_BTREE_ERROR;
801
802 if (acur->cnt)
803 xfs_btree_del_cursor(acur->cnt, cur_error);
804 if (acur->bnolt)
805 xfs_btree_del_cursor(acur->bnolt, cur_error);
806 if (acur->bnogt)
807 xfs_btree_del_cursor(acur->bnogt, cur_error);
808 acur->cnt = acur->bnolt = acur->bnogt = NULL;
809 }
810
811 /*
812 * Check an extent for allocation and track the best available candidate in the
813 * allocation structure. The cursor is deactivated if it has entered an out of
814 * range state based on allocation arguments. Optionally return the extent
815 * extent geometry and allocation status if requested by the caller.
816 */
817 static int
818 xfs_alloc_cur_check(
819 struct xfs_alloc_arg *args,
820 struct xfs_alloc_cur *acur,
821 struct xfs_btree_cur *cur,
822 int *new)
823 {
824 int error, i;
825 xfs_agblock_t bno, bnoa, bnew;
826 xfs_extlen_t len, lena, diff = -1;
827 bool busy;
828 unsigned busy_gen = 0;
829 bool deactivate = false;
830 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
831
832 *new = 0;
833
834 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
835 if (error)
836 return error;
837 if (XFS_IS_CORRUPT(args->mp, i != 1))
838 return -EFSCORRUPTED;
839
840 /*
841 * Check minlen and deactivate a cntbt cursor if out of acceptable size
842 * range (i.e., walking backwards looking for a minlen extent).
843 */
844 if (len < args->minlen) {
845 deactivate = !isbnobt;
846 goto out;
847 }
848
849 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
850 &busy_gen);
851 acur->busy |= busy;
852 if (busy)
853 acur->busy_gen = busy_gen;
854 /* deactivate a bnobt cursor outside of locality range */
855 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
856 deactivate = isbnobt;
857 goto out;
858 }
859 if (lena < args->minlen)
860 goto out;
861
862 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
863 xfs_alloc_fix_len(args);
864 ASSERT(args->len >= args->minlen);
865 if (args->len < acur->len)
866 goto out;
867
868 /*
869 * We have an aligned record that satisfies minlen and beats or matches
870 * the candidate extent size. Compare locality for near allocation mode.
871 */
872 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
873 diff = xfs_alloc_compute_diff(args->agbno, args->len,
874 args->alignment, args->datatype,
875 bnoa, lena, &bnew);
876 if (bnew == NULLAGBLOCK)
877 goto out;
878
879 /*
880 * Deactivate a bnobt cursor with worse locality than the current best.
881 */
882 if (diff > acur->diff) {
883 deactivate = isbnobt;
884 goto out;
885 }
886
887 ASSERT(args->len > acur->len ||
888 (args->len == acur->len && diff <= acur->diff));
889 acur->rec_bno = bno;
890 acur->rec_len = len;
891 acur->bno = bnew;
892 acur->len = args->len;
893 acur->diff = diff;
894 *new = 1;
895
896 /*
897 * We're done if we found a perfect allocation. This only deactivates
898 * the current cursor, but this is just an optimization to terminate a
899 * cntbt search that otherwise runs to the edge of the tree.
900 */
901 if (acur->diff == 0 && acur->len == args->maxlen)
902 deactivate = true;
903 out:
904 if (deactivate)
905 cur->bc_private.a.priv.abt.active = false;
906 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
907 *new);
908 return 0;
909 }
910
911 /*
912 * Complete an allocation of a candidate extent. Remove the extent from both
913 * trees and update the args structure.
914 */
915 STATIC int
916 xfs_alloc_cur_finish(
917 struct xfs_alloc_arg *args,
918 struct xfs_alloc_cur *acur)
919 {
920 int error;
921
922 ASSERT(acur->cnt && acur->bnolt);
923 ASSERT(acur->bno >= acur->rec_bno);
924 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
925 ASSERT(acur->rec_bno + acur->rec_len <=
926 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
927
928 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
929 acur->rec_len, acur->bno, acur->len, 0);
930 if (error)
931 return error;
932
933 args->agbno = acur->bno;
934 args->len = acur->len;
935 args->wasfromfl = 0;
936
937 trace_xfs_alloc_cur(args);
938 return 0;
939 }
940
941 /*
942 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
943 * bno optimized lookup to search for extents with ideal size and locality.
944 */
945 STATIC int
946 xfs_alloc_cntbt_iter(
947 struct xfs_alloc_arg *args,
948 struct xfs_alloc_cur *acur)
949 {
950 struct xfs_btree_cur *cur = acur->cnt;
951 xfs_agblock_t bno;
952 xfs_extlen_t len, cur_len;
953 int error;
954 int i;
955
956 if (!xfs_alloc_cur_active(cur))
957 return 0;
958
959 /* locality optimized lookup */
960 cur_len = acur->cur_len;
961 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
962 if (error)
963 return error;
964 if (i == 0)
965 return 0;
966 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
967 if (error)
968 return error;
969
970 /* check the current record and update search length from it */
971 error = xfs_alloc_cur_check(args, acur, cur, &i);
972 if (error)
973 return error;
974 ASSERT(len >= acur->cur_len);
975 acur->cur_len = len;
976
977 /*
978 * We looked up the first record >= [agbno, len] above. The agbno is a
979 * secondary key and so the current record may lie just before or after
980 * agbno. If it is past agbno, check the previous record too so long as
981 * the length matches as it may be closer. Don't check a smaller record
982 * because that could deactivate our cursor.
983 */
984 if (bno > args->agbno) {
985 error = xfs_btree_decrement(cur, 0, &i);
986 if (!error && i) {
987 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
988 if (!error && i && len == acur->cur_len)
989 error = xfs_alloc_cur_check(args, acur, cur,
990 &i);
991 }
992 if (error)
993 return error;
994 }
995
996 /*
997 * Increment the search key until we find at least one allocation
998 * candidate or if the extent we found was larger. Otherwise, double the
999 * search key to optimize the search. Efficiency is more important here
1000 * than absolute best locality.
1001 */
1002 cur_len <<= 1;
1003 if (!acur->len || acur->cur_len >= cur_len)
1004 acur->cur_len++;
1005 else
1006 acur->cur_len = cur_len;
1007
1008 return error;
1009 }
1010
1011 /*
1012 * Deal with the case where only small freespaces remain. Either return the
1013 * contents of the last freespace record, or allocate space from the freelist if
1014 * there is nothing in the tree.
1015 */
1016 STATIC int /* error */
1017 xfs_alloc_ag_vextent_small(
1018 struct xfs_alloc_arg *args, /* allocation argument structure */
1019 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1020 xfs_agblock_t *fbnop, /* result block number */
1021 xfs_extlen_t *flenp, /* result length */
1022 int *stat) /* status: 0-freelist, 1-normal/none */
1023 {
1024 int error = 0;
1025 xfs_agblock_t fbno = NULLAGBLOCK;
1026 xfs_extlen_t flen = 0;
1027 int i = 0;
1028
1029 /*
1030 * If a cntbt cursor is provided, try to allocate the largest record in
1031 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1032 * allocation. Make sure to respect minleft even when pulling from the
1033 * freelist.
1034 */
1035 if (ccur)
1036 error = xfs_btree_decrement(ccur, 0, &i);
1037 if (error)
1038 goto error;
1039 if (i) {
1040 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1041 if (error)
1042 goto error;
1043 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1044 error = -EFSCORRUPTED;
1045 goto error;
1046 }
1047 goto out;
1048 }
1049
1050 if (args->minlen != 1 || args->alignment != 1 ||
1051 args->resv == XFS_AG_RESV_AGFL ||
1052 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) <=
1053 args->minleft))
1054 goto out;
1055
1056 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1057 if (error)
1058 goto error;
1059 if (fbno == NULLAGBLOCK)
1060 goto out;
1061
1062 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1063 (args->datatype & XFS_ALLOC_NOBUSY));
1064
1065 if (args->datatype & XFS_ALLOC_USERDATA) {
1066 struct xfs_buf *bp;
1067
1068 bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
1069 if (XFS_IS_CORRUPT(args->mp, !bp)) {
1070 error = -EFSCORRUPTED;
1071 goto error;
1072 }
1073 xfs_trans_binval(args->tp, bp);
1074 }
1075 *fbnop = args->agbno = fbno;
1076 *flenp = args->len = 1;
1077 if (XFS_IS_CORRUPT(args->mp,
1078 fbno >= be32_to_cpu(
1079 XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
1080 error = -EFSCORRUPTED;
1081 goto error;
1082 }
1083 args->wasfromfl = 1;
1084 trace_xfs_alloc_small_freelist(args);
1085
1086 /*
1087 * If we're feeding an AGFL block to something that doesn't live in the
1088 * free space, we need to clear out the OWN_AG rmap.
1089 */
1090 error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1,
1091 &XFS_RMAP_OINFO_AG);
1092 if (error)
1093 goto error;
1094
1095 *stat = 0;
1096 return 0;
1097
1098 out:
1099 /*
1100 * Can't do the allocation, give up.
1101 */
1102 if (flen < args->minlen) {
1103 args->agbno = NULLAGBLOCK;
1104 trace_xfs_alloc_small_notenough(args);
1105 flen = 0;
1106 }
1107 *fbnop = fbno;
1108 *flenp = flen;
1109 *stat = 1;
1110 trace_xfs_alloc_small_done(args);
1111 return 0;
1112
1113 error:
1114 trace_xfs_alloc_small_error(args);
1115 return error;
1116 }
1117
1118 /*
1119 * Allocate a variable extent in the allocation group agno.
1120 * Type and bno are used to determine where in the allocation group the
1121 * extent will start.
1122 * Extent's length (returned in *len) will be between minlen and maxlen,
1123 * and of the form k * prod + mod unless there's nothing that large.
1124 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1125 */
1126 STATIC int /* error */
1127 xfs_alloc_ag_vextent(
1128 xfs_alloc_arg_t *args) /* argument structure for allocation */
1129 {
1130 int error=0;
1131
1132 ASSERT(args->minlen > 0);
1133 ASSERT(args->maxlen > 0);
1134 ASSERT(args->minlen <= args->maxlen);
1135 ASSERT(args->mod < args->prod);
1136 ASSERT(args->alignment > 0);
1137
1138 /*
1139 * Branch to correct routine based on the type.
1140 */
1141 args->wasfromfl = 0;
1142 switch (args->type) {
1143 case XFS_ALLOCTYPE_THIS_AG:
1144 error = xfs_alloc_ag_vextent_size(args);
1145 break;
1146 case XFS_ALLOCTYPE_NEAR_BNO:
1147 error = xfs_alloc_ag_vextent_near(args);
1148 break;
1149 case XFS_ALLOCTYPE_THIS_BNO:
1150 error = xfs_alloc_ag_vextent_exact(args);
1151 break;
1152 default:
1153 ASSERT(0);
1154 /* NOTREACHED */
1155 }
1156
1157 if (error || args->agbno == NULLAGBLOCK)
1158 return error;
1159
1160 ASSERT(args->len >= args->minlen);
1161 ASSERT(args->len <= args->maxlen);
1162 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
1163 ASSERT(args->agbno % args->alignment == 0);
1164
1165 /* if not file data, insert new block into the reverse map btree */
1166 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
1167 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
1168 args->agbno, args->len, &args->oinfo);
1169 if (error)
1170 return error;
1171 }
1172
1173 if (!args->wasfromfl) {
1174 error = xfs_alloc_update_counters(args->tp, args->pag,
1175 args->agbp,
1176 -((long)(args->len)));
1177 if (error)
1178 return error;
1179
1180 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
1181 args->agbno, args->len));
1182 }
1183
1184 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
1185
1186 XFS_STATS_INC(args->mp, xs_allocx);
1187 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
1188 return error;
1189 }
1190
1191 /*
1192 * Allocate a variable extent at exactly agno/bno.
1193 * Extent's length (returned in *len) will be between minlen and maxlen,
1194 * and of the form k * prod + mod unless there's nothing that large.
1195 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1196 */
1197 STATIC int /* error */
1198 xfs_alloc_ag_vextent_exact(
1199 xfs_alloc_arg_t *args) /* allocation argument structure */
1200 {
1201 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
1202 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
1203 int error;
1204 xfs_agblock_t fbno; /* start block of found extent */
1205 xfs_extlen_t flen; /* length of found extent */
1206 xfs_agblock_t tbno; /* start block of busy extent */
1207 xfs_extlen_t tlen; /* length of busy extent */
1208 xfs_agblock_t tend; /* end block of busy extent */
1209 int i; /* success/failure of operation */
1210 unsigned busy_gen;
1211
1212 ASSERT(args->alignment == 1);
1213
1214 /*
1215 * Allocate/initialize a cursor for the by-number freespace btree.
1216 */
1217 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1218 args->agno, XFS_BTNUM_BNO);
1219
1220 /*
1221 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1222 * Look for the closest free block <= bno, it must contain bno
1223 * if any free block does.
1224 */
1225 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1226 if (error)
1227 goto error0;
1228 if (!i)
1229 goto not_found;
1230
1231 /*
1232 * Grab the freespace record.
1233 */
1234 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1235 if (error)
1236 goto error0;
1237 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1238 error = -EFSCORRUPTED;
1239 goto error0;
1240 }
1241 ASSERT(fbno <= args->agbno);
1242
1243 /*
1244 * Check for overlapping busy extents.
1245 */
1246 tbno = fbno;
1247 tlen = flen;
1248 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1249
1250 /*
1251 * Give up if the start of the extent is busy, or the freespace isn't
1252 * long enough for the minimum request.
1253 */
1254 if (tbno > args->agbno)
1255 goto not_found;
1256 if (tlen < args->minlen)
1257 goto not_found;
1258 tend = tbno + tlen;
1259 if (tend < args->agbno + args->minlen)
1260 goto not_found;
1261
1262 /*
1263 * End of extent will be smaller of the freespace end and the
1264 * maximal requested end.
1265 *
1266 * Fix the length according to mod and prod if given.
1267 */
1268 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1269 - args->agbno;
1270 xfs_alloc_fix_len(args);
1271 ASSERT(args->agbno + args->len <= tend);
1272
1273 /*
1274 * We are allocating agbno for args->len
1275 * Allocate/initialize a cursor for the by-size btree.
1276 */
1277 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1278 args->agno, XFS_BTNUM_CNT);
1279 ASSERT(args->agbno + args->len <=
1280 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1281 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1282 args->len, XFSA_FIXUP_BNO_OK);
1283 if (error) {
1284 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1285 goto error0;
1286 }
1287
1288 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1289 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1290
1291 args->wasfromfl = 0;
1292 trace_xfs_alloc_exact_done(args);
1293 return 0;
1294
1295 not_found:
1296 /* Didn't find it, return null. */
1297 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1298 args->agbno = NULLAGBLOCK;
1299 trace_xfs_alloc_exact_notfound(args);
1300 return 0;
1301
1302 error0:
1303 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1304 trace_xfs_alloc_exact_error(args);
1305 return error;
1306 }
1307
1308 /*
1309 * Search a given number of btree records in a given direction. Check each
1310 * record against the good extent we've already found.
1311 */
1312 STATIC int
1313 xfs_alloc_walk_iter(
1314 struct xfs_alloc_arg *args,
1315 struct xfs_alloc_cur *acur,
1316 struct xfs_btree_cur *cur,
1317 bool increment,
1318 bool find_one, /* quit on first candidate */
1319 int count, /* rec count (-1 for infinite) */
1320 int *stat)
1321 {
1322 int error;
1323 int i;
1324
1325 *stat = 0;
1326
1327 /*
1328 * Search so long as the cursor is active or we find a better extent.
1329 * The cursor is deactivated if it extends beyond the range of the
1330 * current allocation candidate.
1331 */
1332 while (xfs_alloc_cur_active(cur) && count) {
1333 error = xfs_alloc_cur_check(args, acur, cur, &i);
1334 if (error)
1335 return error;
1336 if (i == 1) {
1337 *stat = 1;
1338 if (find_one)
1339 break;
1340 }
1341 if (!xfs_alloc_cur_active(cur))
1342 break;
1343
1344 if (increment)
1345 error = xfs_btree_increment(cur, 0, &i);
1346 else
1347 error = xfs_btree_decrement(cur, 0, &i);
1348 if (error)
1349 return error;
1350 if (i == 0)
1351 cur->bc_private.a.priv.abt.active = false;
1352
1353 if (count > 0)
1354 count--;
1355 }
1356
1357 return 0;
1358 }
1359
1360 /*
1361 * Search the by-bno and by-size btrees in parallel in search of an extent with
1362 * ideal locality based on the NEAR mode ->agbno locality hint.
1363 */
1364 STATIC int
1365 xfs_alloc_ag_vextent_locality(
1366 struct xfs_alloc_arg *args,
1367 struct xfs_alloc_cur *acur,
1368 int *stat)
1369 {
1370 struct xfs_btree_cur *fbcur = NULL;
1371 int error;
1372 int i;
1373 bool fbinc;
1374
1375 ASSERT(acur->len == 0);
1376 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
1377
1378 *stat = 0;
1379
1380 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1381 if (error)
1382 return error;
1383 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1384 if (error)
1385 return error;
1386 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1387 if (error)
1388 return error;
1389
1390 /*
1391 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1392 * right and lookup the closest extent to the locality hint for each
1393 * extent size key in the cntbt. The entire search terminates
1394 * immediately on a bnobt hit because that means we've found best case
1395 * locality. Otherwise the search continues until the cntbt cursor runs
1396 * off the end of the tree. If no allocation candidate is found at this
1397 * point, give up on locality, walk backwards from the end of the cntbt
1398 * and take the first available extent.
1399 *
1400 * The parallel tree searches balance each other out to provide fairly
1401 * consistent performance for various situations. The bnobt search can
1402 * have pathological behavior in the worst case scenario of larger
1403 * allocation requests and fragmented free space. On the other hand, the
1404 * bnobt is able to satisfy most smaller allocation requests much more
1405 * quickly than the cntbt. The cntbt search can sift through fragmented
1406 * free space and sets of free extents for larger allocation requests
1407 * more quickly than the bnobt. Since the locality hint is just a hint
1408 * and we don't want to scan the entire bnobt for perfect locality, the
1409 * cntbt search essentially bounds the bnobt search such that we can
1410 * find good enough locality at reasonable performance in most cases.
1411 */
1412 while (xfs_alloc_cur_active(acur->bnolt) ||
1413 xfs_alloc_cur_active(acur->bnogt) ||
1414 xfs_alloc_cur_active(acur->cnt)) {
1415
1416 trace_xfs_alloc_cur_lookup(args);
1417
1418 /*
1419 * Search the bnobt left and right. In the case of a hit, finish
1420 * the search in the opposite direction and we're done.
1421 */
1422 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1423 true, 1, &i);
1424 if (error)
1425 return error;
1426 if (i == 1) {
1427 trace_xfs_alloc_cur_left(args);
1428 fbcur = acur->bnogt;
1429 fbinc = true;
1430 break;
1431 }
1432 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1433 1, &i);
1434 if (error)
1435 return error;
1436 if (i == 1) {
1437 trace_xfs_alloc_cur_right(args);
1438 fbcur = acur->bnolt;
1439 fbinc = false;
1440 break;
1441 }
1442
1443 /*
1444 * Check the extent with best locality based on the current
1445 * extent size search key and keep track of the best candidate.
1446 */
1447 error = xfs_alloc_cntbt_iter(args, acur);
1448 if (error)
1449 return error;
1450 if (!xfs_alloc_cur_active(acur->cnt)) {
1451 trace_xfs_alloc_cur_lookup_done(args);
1452 break;
1453 }
1454 }
1455
1456 /*
1457 * If we failed to find anything due to busy extents, return empty
1458 * handed so the caller can flush and retry. If no busy extents were
1459 * found, walk backwards from the end of the cntbt as a last resort.
1460 */
1461 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1462 error = xfs_btree_decrement(acur->cnt, 0, &i);
1463 if (error)
1464 return error;
1465 if (i) {
1466 acur->cnt->bc_private.a.priv.abt.active = true;
1467 fbcur = acur->cnt;
1468 fbinc = false;
1469 }
1470 }
1471
1472 /*
1473 * Search in the opposite direction for a better entry in the case of
1474 * a bnobt hit or walk backwards from the end of the cntbt.
1475 */
1476 if (fbcur) {
1477 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1478 &i);
1479 if (error)
1480 return error;
1481 }
1482
1483 if (acur->len)
1484 *stat = 1;
1485
1486 return 0;
1487 }
1488
1489 /* Check the last block of the cnt btree for allocations. */
1490 static int
1491 xfs_alloc_ag_vextent_lastblock(
1492 struct xfs_alloc_arg *args,
1493 struct xfs_alloc_cur *acur,
1494 xfs_agblock_t *bno,
1495 xfs_extlen_t *len,
1496 bool *allocated)
1497 {
1498 int error;
1499 int i;
1500
1501 #ifdef DEBUG
1502 /* Randomly don't execute the first algorithm. */
1503 if (prandom_u32() & 1)
1504 return 0;
1505 #endif
1506
1507 /*
1508 * Start from the entry that lookup found, sequence through all larger
1509 * free blocks. If we're actually pointing at a record smaller than
1510 * maxlen, go to the start of this block, and skip all those smaller
1511 * than minlen.
1512 */
1513 if (len || args->alignment > 1) {
1514 acur->cnt->bc_ptrs[0] = 1;
1515 do {
1516 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1517 if (error)
1518 return error;
1519 if (XFS_IS_CORRUPT(args->mp, i != 1))
1520 return -EFSCORRUPTED;
1521 if (*len >= args->minlen)
1522 break;
1523 error = xfs_btree_increment(acur->cnt, 0, &i);
1524 if (error)
1525 return error;
1526 } while (i);
1527 ASSERT(*len >= args->minlen);
1528 if (!i)
1529 return 0;
1530 }
1531
1532 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1533 if (error)
1534 return error;
1535
1536 /*
1537 * It didn't work. We COULD be in a case where there's a good record
1538 * somewhere, so try again.
1539 */
1540 if (acur->len == 0)
1541 return 0;
1542
1543 trace_xfs_alloc_near_first(args);
1544 *allocated = true;
1545 return 0;
1546 }
1547
1548 /*
1549 * Allocate a variable extent near bno in the allocation group agno.
1550 * Extent's length (returned in len) will be between minlen and maxlen,
1551 * and of the form k * prod + mod unless there's nothing that large.
1552 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1553 */
1554 STATIC int
1555 xfs_alloc_ag_vextent_near(
1556 struct xfs_alloc_arg *args)
1557 {
1558 struct xfs_alloc_cur acur = {};
1559 int error; /* error code */
1560 int i; /* result code, temporary */
1561 xfs_agblock_t bno;
1562 xfs_extlen_t len;
1563
1564 /* handle uninitialized agbno range so caller doesn't have to */
1565 if (!args->min_agbno && !args->max_agbno)
1566 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1567 ASSERT(args->min_agbno <= args->max_agbno);
1568
1569 /* clamp agbno to the range if it's outside */
1570 if (args->agbno < args->min_agbno)
1571 args->agbno = args->min_agbno;
1572 if (args->agbno > args->max_agbno)
1573 args->agbno = args->max_agbno;
1574
1575 restart:
1576 len = 0;
1577
1578 /*
1579 * Set up cursors and see if there are any free extents as big as
1580 * maxlen. If not, pick the last entry in the tree unless the tree is
1581 * empty.
1582 */
1583 error = xfs_alloc_cur_setup(args, &acur);
1584 if (error == -ENOSPC) {
1585 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1586 &len, &i);
1587 if (error)
1588 goto out;
1589 if (i == 0 || len == 0) {
1590 trace_xfs_alloc_near_noentry(args);
1591 goto out;
1592 }
1593 ASSERT(i == 1);
1594 } else if (error) {
1595 goto out;
1596 }
1597
1598 /*
1599 * First algorithm.
1600 * If the requested extent is large wrt the freespaces available
1601 * in this a.g., then the cursor will be pointing to a btree entry
1602 * near the right edge of the tree. If it's in the last btree leaf
1603 * block, then we just examine all the entries in that block
1604 * that are big enough, and pick the best one.
1605 */
1606 if (xfs_btree_islastblock(acur.cnt, 0)) {
1607 bool allocated = false;
1608
1609 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1610 &allocated);
1611 if (error)
1612 goto out;
1613 if (allocated)
1614 goto alloc_finish;
1615 }
1616
1617 /*
1618 * Second algorithm. Combined cntbt and bnobt search to find ideal
1619 * locality.
1620 */
1621 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1622 if (error)
1623 goto out;
1624
1625 /*
1626 * If we couldn't get anything, give up.
1627 */
1628 if (!acur.len) {
1629 if (acur.busy) {
1630 trace_xfs_alloc_near_busy(args);
1631 xfs_extent_busy_flush(args->mp, args->pag,
1632 acur.busy_gen);
1633 goto restart;
1634 }
1635 trace_xfs_alloc_size_neither(args);
1636 args->agbno = NULLAGBLOCK;
1637 goto out;
1638 }
1639
1640 alloc_finish:
1641 /* fix up btrees on a successful allocation */
1642 error = xfs_alloc_cur_finish(args, &acur);
1643
1644 out:
1645 xfs_alloc_cur_close(&acur, error);
1646 return error;
1647 }
1648
1649 /*
1650 * Allocate a variable extent anywhere in the allocation group agno.
1651 * Extent's length (returned in len) will be between minlen and maxlen,
1652 * and of the form k * prod + mod unless there's nothing that large.
1653 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1654 */
1655 STATIC int /* error */
1656 xfs_alloc_ag_vextent_size(
1657 xfs_alloc_arg_t *args) /* allocation argument structure */
1658 {
1659 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1660 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1661 int error; /* error result */
1662 xfs_agblock_t fbno; /* start of found freespace */
1663 xfs_extlen_t flen; /* length of found freespace */
1664 int i; /* temp status variable */
1665 xfs_agblock_t rbno; /* returned block number */
1666 xfs_extlen_t rlen; /* length of returned extent */
1667 bool busy;
1668 unsigned busy_gen;
1669
1670 restart:
1671 /*
1672 * Allocate and initialize a cursor for the by-size btree.
1673 */
1674 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1675 args->agno, XFS_BTNUM_CNT);
1676 bno_cur = NULL;
1677 busy = false;
1678
1679 /*
1680 * Look for an entry >= maxlen+alignment-1 blocks.
1681 */
1682 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1683 args->maxlen + args->alignment - 1, &i)))
1684 goto error0;
1685
1686 /*
1687 * If none then we have to settle for a smaller extent. In the case that
1688 * there are no large extents, this will return the last entry in the
1689 * tree unless the tree is empty. In the case that there are only busy
1690 * large extents, this will return the largest small extent unless there
1691 * are no smaller extents available.
1692 */
1693 if (!i) {
1694 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1695 &fbno, &flen, &i);
1696 if (error)
1697 goto error0;
1698 if (i == 0 || flen == 0) {
1699 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1700 trace_xfs_alloc_size_noentry(args);
1701 return 0;
1702 }
1703 ASSERT(i == 1);
1704 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1705 &rlen, &busy_gen);
1706 } else {
1707 /*
1708 * Search for a non-busy extent that is large enough.
1709 */
1710 for (;;) {
1711 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1712 if (error)
1713 goto error0;
1714 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1715 error = -EFSCORRUPTED;
1716 goto error0;
1717 }
1718
1719 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1720 &rbno, &rlen, &busy_gen);
1721
1722 if (rlen >= args->maxlen)
1723 break;
1724
1725 error = xfs_btree_increment(cnt_cur, 0, &i);
1726 if (error)
1727 goto error0;
1728 if (i == 0) {
1729 /*
1730 * Our only valid extents must have been busy.
1731 * Make it unbusy by forcing the log out and
1732 * retrying.
1733 */
1734 xfs_btree_del_cursor(cnt_cur,
1735 XFS_BTREE_NOERROR);
1736 trace_xfs_alloc_size_busy(args);
1737 xfs_extent_busy_flush(args->mp,
1738 args->pag, busy_gen);
1739 goto restart;
1740 }
1741 }
1742 }
1743
1744 /*
1745 * In the first case above, we got the last entry in the
1746 * by-size btree. Now we check to see if the space hits maxlen
1747 * once aligned; if not, we search left for something better.
1748 * This can't happen in the second case above.
1749 */
1750 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1751 if (XFS_IS_CORRUPT(args->mp,
1752 rlen != 0 &&
1753 (rlen > flen ||
1754 rbno + rlen > fbno + flen))) {
1755 error = -EFSCORRUPTED;
1756 goto error0;
1757 }
1758 if (rlen < args->maxlen) {
1759 xfs_agblock_t bestfbno;
1760 xfs_extlen_t bestflen;
1761 xfs_agblock_t bestrbno;
1762 xfs_extlen_t bestrlen;
1763
1764 bestrlen = rlen;
1765 bestrbno = rbno;
1766 bestflen = flen;
1767 bestfbno = fbno;
1768 for (;;) {
1769 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1770 goto error0;
1771 if (i == 0)
1772 break;
1773 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1774 &i)))
1775 goto error0;
1776 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1777 error = -EFSCORRUPTED;
1778 goto error0;
1779 }
1780 if (flen < bestrlen)
1781 break;
1782 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1783 &rbno, &rlen, &busy_gen);
1784 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1785 if (XFS_IS_CORRUPT(args->mp,
1786 rlen != 0 &&
1787 (rlen > flen ||
1788 rbno + rlen > fbno + flen))) {
1789 error = -EFSCORRUPTED;
1790 goto error0;
1791 }
1792 if (rlen > bestrlen) {
1793 bestrlen = rlen;
1794 bestrbno = rbno;
1795 bestflen = flen;
1796 bestfbno = fbno;
1797 if (rlen == args->maxlen)
1798 break;
1799 }
1800 }
1801 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1802 &i)))
1803 goto error0;
1804 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1805 error = -EFSCORRUPTED;
1806 goto error0;
1807 }
1808 rlen = bestrlen;
1809 rbno = bestrbno;
1810 flen = bestflen;
1811 fbno = bestfbno;
1812 }
1813 args->wasfromfl = 0;
1814 /*
1815 * Fix up the length.
1816 */
1817 args->len = rlen;
1818 if (rlen < args->minlen) {
1819 if (busy) {
1820 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1821 trace_xfs_alloc_size_busy(args);
1822 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1823 goto restart;
1824 }
1825 goto out_nominleft;
1826 }
1827 xfs_alloc_fix_len(args);
1828
1829 rlen = args->len;
1830 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1831 error = -EFSCORRUPTED;
1832 goto error0;
1833 }
1834 /*
1835 * Allocate and initialize a cursor for the by-block tree.
1836 */
1837 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1838 args->agno, XFS_BTNUM_BNO);
1839 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1840 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1841 goto error0;
1842 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1843 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1844 cnt_cur = bno_cur = NULL;
1845 args->len = rlen;
1846 args->agbno = rbno;
1847 if (XFS_IS_CORRUPT(args->mp,
1848 args->agbno + args->len >
1849 be32_to_cpu(
1850 XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
1851 error = -EFSCORRUPTED;
1852 goto error0;
1853 }
1854 trace_xfs_alloc_size_done(args);
1855 return 0;
1856
1857 error0:
1858 trace_xfs_alloc_size_error(args);
1859 if (cnt_cur)
1860 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1861 if (bno_cur)
1862 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1863 return error;
1864
1865 out_nominleft:
1866 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1867 trace_xfs_alloc_size_nominleft(args);
1868 args->agbno = NULLAGBLOCK;
1869 return 0;
1870 }
1871
1872 /*
1873 * Free the extent starting at agno/bno for length.
1874 */
1875 STATIC int
1876 xfs_free_ag_extent(
1877 struct xfs_trans *tp,
1878 struct xfs_buf *agbp,
1879 xfs_agnumber_t agno,
1880 xfs_agblock_t bno,
1881 xfs_extlen_t len,
1882 const struct xfs_owner_info *oinfo,
1883 enum xfs_ag_resv_type type)
1884 {
1885 struct xfs_mount *mp;
1886 struct xfs_perag *pag;
1887 struct xfs_btree_cur *bno_cur;
1888 struct xfs_btree_cur *cnt_cur;
1889 xfs_agblock_t gtbno; /* start of right neighbor */
1890 xfs_extlen_t gtlen; /* length of right neighbor */
1891 xfs_agblock_t ltbno; /* start of left neighbor */
1892 xfs_extlen_t ltlen; /* length of left neighbor */
1893 xfs_agblock_t nbno; /* new starting block of freesp */
1894 xfs_extlen_t nlen; /* new length of freespace */
1895 int haveleft; /* have a left neighbor */
1896 int haveright; /* have a right neighbor */
1897 int i;
1898 int error;
1899
1900 bno_cur = cnt_cur = NULL;
1901 mp = tp->t_mountp;
1902
1903 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1904 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1905 if (error)
1906 goto error0;
1907 }
1908
1909 /*
1910 * Allocate and initialize a cursor for the by-block btree.
1911 */
1912 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1913 /*
1914 * Look for a neighboring block on the left (lower block numbers)
1915 * that is contiguous with this space.
1916 */
1917 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1918 goto error0;
1919 if (haveleft) {
1920 /*
1921 * There is a block to our left.
1922 */
1923 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1924 goto error0;
1925 if (XFS_IS_CORRUPT(mp, i != 1)) {
1926 error = -EFSCORRUPTED;
1927 goto error0;
1928 }
1929 /*
1930 * It's not contiguous, though.
1931 */
1932 if (ltbno + ltlen < bno)
1933 haveleft = 0;
1934 else {
1935 /*
1936 * If this failure happens the request to free this
1937 * space was invalid, it's (partly) already free.
1938 * Very bad.
1939 */
1940 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1941 error = -EFSCORRUPTED;
1942 goto error0;
1943 }
1944 }
1945 }
1946 /*
1947 * Look for a neighboring block on the right (higher block numbers)
1948 * that is contiguous with this space.
1949 */
1950 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1951 goto error0;
1952 if (haveright) {
1953 /*
1954 * There is a block to our right.
1955 */
1956 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1957 goto error0;
1958 if (XFS_IS_CORRUPT(mp, i != 1)) {
1959 error = -EFSCORRUPTED;
1960 goto error0;
1961 }
1962 /*
1963 * It's not contiguous, though.
1964 */
1965 if (bno + len < gtbno)
1966 haveright = 0;
1967 else {
1968 /*
1969 * If this failure happens the request to free this
1970 * space was invalid, it's (partly) already free.
1971 * Very bad.
1972 */
1973 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1974 error = -EFSCORRUPTED;
1975 goto error0;
1976 }
1977 }
1978 }
1979 /*
1980 * Now allocate and initialize a cursor for the by-size tree.
1981 */
1982 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1983 /*
1984 * Have both left and right contiguous neighbors.
1985 * Merge all three into a single free block.
1986 */
1987 if (haveleft && haveright) {
1988 /*
1989 * Delete the old by-size entry on the left.
1990 */
1991 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1992 goto error0;
1993 if (XFS_IS_CORRUPT(mp, i != 1)) {
1994 error = -EFSCORRUPTED;
1995 goto error0;
1996 }
1997 if ((error = xfs_btree_delete(cnt_cur, &i)))
1998 goto error0;
1999 if (XFS_IS_CORRUPT(mp, i != 1)) {
2000 error = -EFSCORRUPTED;
2001 goto error0;
2002 }
2003 /*
2004 * Delete the old by-size entry on the right.
2005 */
2006 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2007 goto error0;
2008 if (XFS_IS_CORRUPT(mp, i != 1)) {
2009 error = -EFSCORRUPTED;
2010 goto error0;
2011 }
2012 if ((error = xfs_btree_delete(cnt_cur, &i)))
2013 goto error0;
2014 if (XFS_IS_CORRUPT(mp, i != 1)) {
2015 error = -EFSCORRUPTED;
2016 goto error0;
2017 }
2018 /*
2019 * Delete the old by-block entry for the right block.
2020 */
2021 if ((error = xfs_btree_delete(bno_cur, &i)))
2022 goto error0;
2023 if (XFS_IS_CORRUPT(mp, i != 1)) {
2024 error = -EFSCORRUPTED;
2025 goto error0;
2026 }
2027 /*
2028 * Move the by-block cursor back to the left neighbor.
2029 */
2030 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2031 goto error0;
2032 if (XFS_IS_CORRUPT(mp, i != 1)) {
2033 error = -EFSCORRUPTED;
2034 goto error0;
2035 }
2036 #ifdef DEBUG
2037 /*
2038 * Check that this is the right record: delete didn't
2039 * mangle the cursor.
2040 */
2041 {
2042 xfs_agblock_t xxbno;
2043 xfs_extlen_t xxlen;
2044
2045 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2046 &i)))
2047 goto error0;
2048 if (XFS_IS_CORRUPT(mp,
2049 i != 1 ||
2050 xxbno != ltbno ||
2051 xxlen != ltlen)) {
2052 error = -EFSCORRUPTED;
2053 goto error0;
2054 }
2055 }
2056 #endif
2057 /*
2058 * Update remaining by-block entry to the new, joined block.
2059 */
2060 nbno = ltbno;
2061 nlen = len + ltlen + gtlen;
2062 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2063 goto error0;
2064 }
2065 /*
2066 * Have only a left contiguous neighbor.
2067 * Merge it together with the new freespace.
2068 */
2069 else if (haveleft) {
2070 /*
2071 * Delete the old by-size entry on the left.
2072 */
2073 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2074 goto error0;
2075 if (XFS_IS_CORRUPT(mp, i != 1)) {
2076 error = -EFSCORRUPTED;
2077 goto error0;
2078 }
2079 if ((error = xfs_btree_delete(cnt_cur, &i)))
2080 goto error0;
2081 if (XFS_IS_CORRUPT(mp, i != 1)) {
2082 error = -EFSCORRUPTED;
2083 goto error0;
2084 }
2085 /*
2086 * Back up the by-block cursor to the left neighbor, and
2087 * update its length.
2088 */
2089 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2090 goto error0;
2091 if (XFS_IS_CORRUPT(mp, i != 1)) {
2092 error = -EFSCORRUPTED;
2093 goto error0;
2094 }
2095 nbno = ltbno;
2096 nlen = len + ltlen;
2097 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2098 goto error0;
2099 }
2100 /*
2101 * Have only a right contiguous neighbor.
2102 * Merge it together with the new freespace.
2103 */
2104 else if (haveright) {
2105 /*
2106 * Delete the old by-size entry on the right.
2107 */
2108 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2109 goto error0;
2110 if (XFS_IS_CORRUPT(mp, i != 1)) {
2111 error = -EFSCORRUPTED;
2112 goto error0;
2113 }
2114 if ((error = xfs_btree_delete(cnt_cur, &i)))
2115 goto error0;
2116 if (XFS_IS_CORRUPT(mp, i != 1)) {
2117 error = -EFSCORRUPTED;
2118 goto error0;
2119 }
2120 /*
2121 * Update the starting block and length of the right
2122 * neighbor in the by-block tree.
2123 */
2124 nbno = bno;
2125 nlen = len + gtlen;
2126 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2127 goto error0;
2128 }
2129 /*
2130 * No contiguous neighbors.
2131 * Insert the new freespace into the by-block tree.
2132 */
2133 else {
2134 nbno = bno;
2135 nlen = len;
2136 if ((error = xfs_btree_insert(bno_cur, &i)))
2137 goto error0;
2138 if (XFS_IS_CORRUPT(mp, i != 1)) {
2139 error = -EFSCORRUPTED;
2140 goto error0;
2141 }
2142 }
2143 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2144 bno_cur = NULL;
2145 /*
2146 * In all cases we need to insert the new freespace in the by-size tree.
2147 */
2148 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2149 goto error0;
2150 if (XFS_IS_CORRUPT(mp, i != 0)) {
2151 error = -EFSCORRUPTED;
2152 goto error0;
2153 }
2154 if ((error = xfs_btree_insert(cnt_cur, &i)))
2155 goto error0;
2156 if (XFS_IS_CORRUPT(mp, i != 1)) {
2157 error = -EFSCORRUPTED;
2158 goto error0;
2159 }
2160 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2161 cnt_cur = NULL;
2162
2163 /*
2164 * Update the freespace totals in the ag and superblock.
2165 */
2166 pag = xfs_perag_get(mp, agno);
2167 error = xfs_alloc_update_counters(tp, pag, agbp, len);
2168 xfs_ag_resv_free_extent(pag, type, tp, len);
2169 xfs_perag_put(pag);
2170 if (error)
2171 goto error0;
2172
2173 XFS_STATS_INC(mp, xs_freex);
2174 XFS_STATS_ADD(mp, xs_freeb, len);
2175
2176 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2177
2178 return 0;
2179
2180 error0:
2181 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2182 if (bno_cur)
2183 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2184 if (cnt_cur)
2185 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2186 return error;
2187 }
2188
2189 /*
2190 * Visible (exported) allocation/free functions.
2191 * Some of these are used just by xfs_alloc_btree.c and this file.
2192 */
2193
2194 /*
2195 * Compute and fill in value of m_ag_maxlevels.
2196 */
2197 void
2198 xfs_alloc_compute_maxlevels(
2199 xfs_mount_t *mp) /* file system mount structure */
2200 {
2201 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2202 (mp->m_sb.sb_agblocks + 1) / 2);
2203 }
2204
2205 /*
2206 * Find the length of the longest extent in an AG. The 'need' parameter
2207 * specifies how much space we're going to need for the AGFL and the
2208 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2209 * other callers.
2210 */
2211 xfs_extlen_t
2212 xfs_alloc_longest_free_extent(
2213 struct xfs_perag *pag,
2214 xfs_extlen_t need,
2215 xfs_extlen_t reserved)
2216 {
2217 xfs_extlen_t delta = 0;
2218
2219 /*
2220 * If the AGFL needs a recharge, we'll have to subtract that from the
2221 * longest extent.
2222 */
2223 if (need > pag->pagf_flcount)
2224 delta = need - pag->pagf_flcount;
2225
2226 /*
2227 * If we cannot maintain others' reservations with space from the
2228 * not-longest freesp extents, we'll have to subtract /that/ from
2229 * the longest extent too.
2230 */
2231 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2232 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2233
2234 /*
2235 * If the longest extent is long enough to satisfy all the
2236 * reservations and AGFL rules in place, we can return this extent.
2237 */
2238 if (pag->pagf_longest > delta)
2239 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2240 pag->pagf_longest - delta);
2241
2242 /* Otherwise, let the caller try for 1 block if there's space. */
2243 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2244 }
2245
2246 unsigned int
2247 xfs_alloc_min_freelist(
2248 struct xfs_mount *mp,
2249 struct xfs_perag *pag)
2250 {
2251 unsigned int min_free;
2252
2253 /* space needed by-bno freespace btree */
2254 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2255 mp->m_ag_maxlevels);
2256 /* space needed by-size freespace btree */
2257 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2258 mp->m_ag_maxlevels);
2259 /* space needed reverse mapping used space btree */
2260 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2261 min_free += min_t(unsigned int,
2262 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2263 mp->m_rmap_maxlevels);
2264
2265 return min_free;
2266 }
2267
2268 /*
2269 * Check if the operation we are fixing up the freelist for should go ahead or
2270 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2271 * is dependent on whether the size and shape of free space available will
2272 * permit the requested allocation to take place.
2273 */
2274 static bool
2275 xfs_alloc_space_available(
2276 struct xfs_alloc_arg *args,
2277 xfs_extlen_t min_free,
2278 int flags)
2279 {
2280 struct xfs_perag *pag = args->pag;
2281 xfs_extlen_t alloc_len, longest;
2282 xfs_extlen_t reservation; /* blocks that are still reserved */
2283 int available;
2284 xfs_extlen_t agflcount;
2285
2286 if (flags & XFS_ALLOC_FLAG_FREEING)
2287 return true;
2288
2289 reservation = xfs_ag_resv_needed(pag, args->resv);
2290
2291 /* do we have enough contiguous free space for the allocation? */
2292 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2293 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2294 if (longest < alloc_len)
2295 return false;
2296
2297 /*
2298 * Do we have enough free space remaining for the allocation? Don't
2299 * account extra agfl blocks because we are about to defer free them,
2300 * making them unavailable until the current transaction commits.
2301 */
2302 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2303 available = (int)(pag->pagf_freeblks + agflcount -
2304 reservation - min_free - args->minleft);
2305 if (available < (int)max(args->total, alloc_len))
2306 return false;
2307
2308 /*
2309 * Clamp maxlen to the amount of free space available for the actual
2310 * extent allocation.
2311 */
2312 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2313 args->maxlen = available;
2314 ASSERT(args->maxlen > 0);
2315 ASSERT(args->maxlen >= args->minlen);
2316 }
2317
2318 return true;
2319 }
2320
2321 int
2322 xfs_free_agfl_block(
2323 struct xfs_trans *tp,
2324 xfs_agnumber_t agno,
2325 xfs_agblock_t agbno,
2326 struct xfs_buf *agbp,
2327 struct xfs_owner_info *oinfo)
2328 {
2329 int error;
2330 struct xfs_buf *bp;
2331
2332 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2333 XFS_AG_RESV_AGFL);
2334 if (error)
2335 return error;
2336
2337 bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
2338 if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
2339 return -EFSCORRUPTED;
2340 xfs_trans_binval(tp, bp);
2341
2342 return 0;
2343 }
2344
2345 /*
2346 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2347 * is to detect an agfl header padding mismatch between current and early v5
2348 * kernels. This problem manifests as a 1-slot size difference between the
2349 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2350 * may also catch variants of agfl count corruption unrelated to padding. Either
2351 * way, we'll reset the agfl and warn the user.
2352 *
2353 * Return true if a reset is required before the agfl can be used, false
2354 * otherwise.
2355 */
2356 static bool
2357 xfs_agfl_needs_reset(
2358 struct xfs_mount *mp,
2359 struct xfs_agf *agf)
2360 {
2361 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2362 uint32_t l = be32_to_cpu(agf->agf_fllast);
2363 uint32_t c = be32_to_cpu(agf->agf_flcount);
2364 int agfl_size = xfs_agfl_size(mp);
2365 int active;
2366
2367 /* no agfl header on v4 supers */
2368 if (!xfs_sb_version_hascrc(&mp->m_sb))
2369 return false;
2370
2371 /*
2372 * The agf read verifier catches severe corruption of these fields.
2373 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2374 * the verifier allows it.
2375 */
2376 if (f >= agfl_size || l >= agfl_size)
2377 return true;
2378 if (c > agfl_size)
2379 return true;
2380
2381 /*
2382 * Check consistency between the on-disk count and the active range. An
2383 * agfl padding mismatch manifests as an inconsistent flcount.
2384 */
2385 if (c && l >= f)
2386 active = l - f + 1;
2387 else if (c)
2388 active = agfl_size - f + l + 1;
2389 else
2390 active = 0;
2391
2392 return active != c;
2393 }
2394
2395 /*
2396 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2397 * agfl content cannot be trusted. Warn the user that a repair is required to
2398 * recover leaked blocks.
2399 *
2400 * The purpose of this mechanism is to handle filesystems affected by the agfl
2401 * header padding mismatch problem. A reset keeps the filesystem online with a
2402 * relatively minor free space accounting inconsistency rather than suffer the
2403 * inevitable crash from use of an invalid agfl block.
2404 */
2405 static void
2406 xfs_agfl_reset(
2407 struct xfs_trans *tp,
2408 struct xfs_buf *agbp,
2409 struct xfs_perag *pag)
2410 {
2411 struct xfs_mount *mp = tp->t_mountp;
2412 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
2413
2414 ASSERT(pag->pagf_agflreset);
2415 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2416
2417 xfs_warn(mp,
2418 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2419 "Please unmount and run xfs_repair.",
2420 pag->pag_agno, pag->pagf_flcount);
2421
2422 agf->agf_flfirst = 0;
2423 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2424 agf->agf_flcount = 0;
2425 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2426 XFS_AGF_FLCOUNT);
2427
2428 pag->pagf_flcount = 0;
2429 pag->pagf_agflreset = false;
2430 }
2431
2432 /*
2433 * Defer an AGFL block free. This is effectively equivalent to
2434 * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2435 *
2436 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2437 * allocation operations in a transaction. AGFL frees are prone to this problem
2438 * because for one they are always freed one at a time. Further, an immediate
2439 * AGFL block free can cause a btree join and require another block free before
2440 * the real allocation can proceed. Deferring the free disconnects freeing up
2441 * the AGFL slot from freeing the block.
2442 */
2443 STATIC void
2444 xfs_defer_agfl_block(
2445 struct xfs_trans *tp,
2446 xfs_agnumber_t agno,
2447 xfs_fsblock_t agbno,
2448 struct xfs_owner_info *oinfo)
2449 {
2450 struct xfs_mount *mp = tp->t_mountp;
2451 struct xfs_extent_free_item *new; /* new element */
2452
2453 ASSERT(xfs_bmap_free_item_zone != NULL);
2454 ASSERT(oinfo != NULL);
2455
2456 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
2457 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2458 new->xefi_blockcount = 1;
2459 new->xefi_oinfo = *oinfo;
2460
2461 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2462
2463 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2464 }
2465
2466 /*
2467 * Decide whether to use this allocation group for this allocation.
2468 * If so, fix up the btree freelist's size.
2469 */
2470 int /* error */
2471 xfs_alloc_fix_freelist(
2472 struct xfs_alloc_arg *args, /* allocation argument structure */
2473 int flags) /* XFS_ALLOC_FLAG_... */
2474 {
2475 struct xfs_mount *mp = args->mp;
2476 struct xfs_perag *pag = args->pag;
2477 struct xfs_trans *tp = args->tp;
2478 struct xfs_buf *agbp = NULL;
2479 struct xfs_buf *agflbp = NULL;
2480 struct xfs_alloc_arg targs; /* local allocation arguments */
2481 xfs_agblock_t bno; /* freelist block */
2482 xfs_extlen_t need; /* total blocks needed in freelist */
2483 int error = 0;
2484
2485 /* deferred ops (AGFL block frees) require permanent transactions */
2486 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2487
2488 if (!pag->pagf_init) {
2489 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2490 if (error)
2491 goto out_no_agbp;
2492 if (!pag->pagf_init) {
2493 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2494 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2495 goto out_agbp_relse;
2496 }
2497 }
2498
2499 /*
2500 * If this is a metadata preferred pag and we are user data then try
2501 * somewhere else if we are not being asked to try harder at this
2502 * point
2503 */
2504 if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
2505 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2506 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2507 goto out_agbp_relse;
2508 }
2509
2510 need = xfs_alloc_min_freelist(mp, pag);
2511 if (!xfs_alloc_space_available(args, need, flags |
2512 XFS_ALLOC_FLAG_CHECK))
2513 goto out_agbp_relse;
2514
2515 /*
2516 * Get the a.g. freespace buffer.
2517 * Can fail if we're not blocking on locks, and it's held.
2518 */
2519 if (!agbp) {
2520 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2521 if (error)
2522 goto out_no_agbp;
2523 if (!agbp) {
2524 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2525 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2526 goto out_no_agbp;
2527 }
2528 }
2529
2530 /* reset a padding mismatched agfl before final free space check */
2531 if (pag->pagf_agflreset)
2532 xfs_agfl_reset(tp, agbp, pag);
2533
2534 /* If there isn't enough total space or single-extent, reject it. */
2535 need = xfs_alloc_min_freelist(mp, pag);
2536 if (!xfs_alloc_space_available(args, need, flags))
2537 goto out_agbp_relse;
2538
2539 /*
2540 * Make the freelist shorter if it's too long.
2541 *
2542 * Note that from this point onwards, we will always release the agf and
2543 * agfl buffers on error. This handles the case where we error out and
2544 * the buffers are clean or may not have been joined to the transaction
2545 * and hence need to be released manually. If they have been joined to
2546 * the transaction, then xfs_trans_brelse() will handle them
2547 * appropriately based on the recursion count and dirty state of the
2548 * buffer.
2549 *
2550 * XXX (dgc): When we have lots of free space, does this buy us
2551 * anything other than extra overhead when we need to put more blocks
2552 * back on the free list? Maybe we should only do this when space is
2553 * getting low or the AGFL is more than half full?
2554 *
2555 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2556 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2557 * updating the rmapbt. Both flags are used in xfs_repair while we're
2558 * rebuilding the rmapbt, and neither are used by the kernel. They're
2559 * both required to ensure that rmaps are correctly recorded for the
2560 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2561 * repair/rmap.c in xfsprogs for details.
2562 */
2563 memset(&targs, 0, sizeof(targs));
2564 /* struct copy below */
2565 if (flags & XFS_ALLOC_FLAG_NORMAP)
2566 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2567 else
2568 targs.oinfo = XFS_RMAP_OINFO_AG;
2569 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2570 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2571 if (error)
2572 goto out_agbp_relse;
2573
2574 /* defer agfl frees */
2575 xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2576 }
2577
2578 targs.tp = tp;
2579 targs.mp = mp;
2580 targs.agbp = agbp;
2581 targs.agno = args->agno;
2582 targs.alignment = targs.minlen = targs.prod = 1;
2583 targs.type = XFS_ALLOCTYPE_THIS_AG;
2584 targs.pag = pag;
2585 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2586 if (error)
2587 goto out_agbp_relse;
2588
2589 /* Make the freelist longer if it's too short. */
2590 while (pag->pagf_flcount < need) {
2591 targs.agbno = 0;
2592 targs.maxlen = need - pag->pagf_flcount;
2593 targs.resv = XFS_AG_RESV_AGFL;
2594
2595 /* Allocate as many blocks as possible at once. */
2596 error = xfs_alloc_ag_vextent(&targs);
2597 if (error)
2598 goto out_agflbp_relse;
2599
2600 /*
2601 * Stop if we run out. Won't happen if callers are obeying
2602 * the restrictions correctly. Can happen for free calls
2603 * on a completely full ag.
2604 */
2605 if (targs.agbno == NULLAGBLOCK) {
2606 if (flags & XFS_ALLOC_FLAG_FREEING)
2607 break;
2608 goto out_agflbp_relse;
2609 }
2610 /*
2611 * Put each allocated block on the list.
2612 */
2613 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2614 error = xfs_alloc_put_freelist(tp, agbp,
2615 agflbp, bno, 0);
2616 if (error)
2617 goto out_agflbp_relse;
2618 }
2619 }
2620 xfs_trans_brelse(tp, agflbp);
2621 args->agbp = agbp;
2622 return 0;
2623
2624 out_agflbp_relse:
2625 xfs_trans_brelse(tp, agflbp);
2626 out_agbp_relse:
2627 if (agbp)
2628 xfs_trans_brelse(tp, agbp);
2629 out_no_agbp:
2630 args->agbp = NULL;
2631 return error;
2632 }
2633
2634 /*
2635 * Get a block from the freelist.
2636 * Returns with the buffer for the block gotten.
2637 */
2638 int /* error */
2639 xfs_alloc_get_freelist(
2640 xfs_trans_t *tp, /* transaction pointer */
2641 xfs_buf_t *agbp, /* buffer containing the agf structure */
2642 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2643 int btreeblk) /* destination is a AGF btree */
2644 {
2645 xfs_agf_t *agf; /* a.g. freespace structure */
2646 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2647 xfs_agblock_t bno; /* block number returned */
2648 __be32 *agfl_bno;
2649 int error;
2650 int logflags;
2651 xfs_mount_t *mp = tp->t_mountp;
2652 xfs_perag_t *pag; /* per allocation group data */
2653
2654 /*
2655 * Freelist is empty, give up.
2656 */
2657 agf = XFS_BUF_TO_AGF(agbp);
2658 if (!agf->agf_flcount) {
2659 *bnop = NULLAGBLOCK;
2660 return 0;
2661 }
2662 /*
2663 * Read the array of free blocks.
2664 */
2665 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2666 &agflbp);
2667 if (error)
2668 return error;
2669
2670
2671 /*
2672 * Get the block number and update the data structures.
2673 */
2674 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2675 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2676 be32_add_cpu(&agf->agf_flfirst, 1);
2677 xfs_trans_brelse(tp, agflbp);
2678 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2679 agf->agf_flfirst = 0;
2680
2681 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2682 ASSERT(!pag->pagf_agflreset);
2683 be32_add_cpu(&agf->agf_flcount, -1);
2684 xfs_trans_agflist_delta(tp, -1);
2685 pag->pagf_flcount--;
2686
2687 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2688 if (btreeblk) {
2689 be32_add_cpu(&agf->agf_btreeblks, 1);
2690 pag->pagf_btreeblks++;
2691 logflags |= XFS_AGF_BTREEBLKS;
2692 }
2693 xfs_perag_put(pag);
2694
2695 xfs_alloc_log_agf(tp, agbp, logflags);
2696 *bnop = bno;
2697
2698 return 0;
2699 }
2700
2701 /*
2702 * Log the given fields from the agf structure.
2703 */
2704 void
2705 xfs_alloc_log_agf(
2706 xfs_trans_t *tp, /* transaction pointer */
2707 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2708 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2709 {
2710 int first; /* first byte offset */
2711 int last; /* last byte offset */
2712 static const short offsets[] = {
2713 offsetof(xfs_agf_t, agf_magicnum),
2714 offsetof(xfs_agf_t, agf_versionnum),
2715 offsetof(xfs_agf_t, agf_seqno),
2716 offsetof(xfs_agf_t, agf_length),
2717 offsetof(xfs_agf_t, agf_roots[0]),
2718 offsetof(xfs_agf_t, agf_levels[0]),
2719 offsetof(xfs_agf_t, agf_flfirst),
2720 offsetof(xfs_agf_t, agf_fllast),
2721 offsetof(xfs_agf_t, agf_flcount),
2722 offsetof(xfs_agf_t, agf_freeblks),
2723 offsetof(xfs_agf_t, agf_longest),
2724 offsetof(xfs_agf_t, agf_btreeblks),
2725 offsetof(xfs_agf_t, agf_uuid),
2726 offsetof(xfs_agf_t, agf_rmap_blocks),
2727 offsetof(xfs_agf_t, agf_refcount_blocks),
2728 offsetof(xfs_agf_t, agf_refcount_root),
2729 offsetof(xfs_agf_t, agf_refcount_level),
2730 /* needed so that we don't log the whole rest of the structure: */
2731 offsetof(xfs_agf_t, agf_spare64),
2732 sizeof(xfs_agf_t)
2733 };
2734
2735 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2736
2737 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2738
2739 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2740 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2741 }
2742
2743 /*
2744 * Interface for inode allocation to force the pag data to be initialized.
2745 */
2746 int /* error */
2747 xfs_alloc_pagf_init(
2748 xfs_mount_t *mp, /* file system mount structure */
2749 xfs_trans_t *tp, /* transaction pointer */
2750 xfs_agnumber_t agno, /* allocation group number */
2751 int flags) /* XFS_ALLOC_FLAGS_... */
2752 {
2753 xfs_buf_t *bp;
2754 int error;
2755
2756 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2757 return error;
2758 if (bp)
2759 xfs_trans_brelse(tp, bp);
2760 return 0;
2761 }
2762
2763 /*
2764 * Put the block on the freelist for the allocation group.
2765 */
2766 int /* error */
2767 xfs_alloc_put_freelist(
2768 xfs_trans_t *tp, /* transaction pointer */
2769 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2770 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2771 xfs_agblock_t bno, /* block being freed */
2772 int btreeblk) /* block came from a AGF btree */
2773 {
2774 xfs_agf_t *agf; /* a.g. freespace structure */
2775 __be32 *blockp;/* pointer to array entry */
2776 int error;
2777 int logflags;
2778 xfs_mount_t *mp; /* mount structure */
2779 xfs_perag_t *pag; /* per allocation group data */
2780 __be32 *agfl_bno;
2781 int startoff;
2782
2783 agf = XFS_BUF_TO_AGF(agbp);
2784 mp = tp->t_mountp;
2785
2786 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2787 be32_to_cpu(agf->agf_seqno), &agflbp)))
2788 return error;
2789 be32_add_cpu(&agf->agf_fllast, 1);
2790 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2791 agf->agf_fllast = 0;
2792
2793 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2794 ASSERT(!pag->pagf_agflreset);
2795 be32_add_cpu(&agf->agf_flcount, 1);
2796 xfs_trans_agflist_delta(tp, 1);
2797 pag->pagf_flcount++;
2798
2799 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2800 if (btreeblk) {
2801 be32_add_cpu(&agf->agf_btreeblks, -1);
2802 pag->pagf_btreeblks--;
2803 logflags |= XFS_AGF_BTREEBLKS;
2804 }
2805 xfs_perag_put(pag);
2806
2807 xfs_alloc_log_agf(tp, agbp, logflags);
2808
2809 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2810
2811 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2812 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2813 *blockp = cpu_to_be32(bno);
2814 startoff = (char *)blockp - (char *)agflbp->b_addr;
2815
2816 xfs_alloc_log_agf(tp, agbp, logflags);
2817
2818 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2819 xfs_trans_log_buf(tp, agflbp, startoff,
2820 startoff + sizeof(xfs_agblock_t) - 1);
2821 return 0;
2822 }
2823
2824 static xfs_failaddr_t
2825 xfs_agf_verify(
2826 struct xfs_buf *bp)
2827 {
2828 struct xfs_mount *mp = bp->b_mount;
2829 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2830
2831 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2832 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2833 return __this_address;
2834 if (!xfs_log_check_lsn(mp,
2835 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2836 return __this_address;
2837 }
2838
2839 if (!xfs_verify_magic(bp, agf->agf_magicnum))
2840 return __this_address;
2841
2842 if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2843 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2844 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2845 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2846 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2847 return __this_address;
2848
2849 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2850 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2851 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2852 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2853 return __this_address;
2854
2855 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2856 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2857 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2858 return __this_address;
2859
2860 /*
2861 * during growfs operations, the perag is not fully initialised,
2862 * so we can't use it for any useful checking. growfs ensures we can't
2863 * use it by using uncached buffers that don't have the perag attached
2864 * so we can detect and avoid this problem.
2865 */
2866 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2867 return __this_address;
2868
2869 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2870 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2871 return __this_address;
2872
2873 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2874 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2875 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2876 return __this_address;
2877
2878 return NULL;
2879
2880 }
2881
2882 static void
2883 xfs_agf_read_verify(
2884 struct xfs_buf *bp)
2885 {
2886 struct xfs_mount *mp = bp->b_mount;
2887 xfs_failaddr_t fa;
2888
2889 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2890 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2891 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2892 else {
2893 fa = xfs_agf_verify(bp);
2894 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2895 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2896 }
2897 }
2898
2899 static void
2900 xfs_agf_write_verify(
2901 struct xfs_buf *bp)
2902 {
2903 struct xfs_mount *mp = bp->b_mount;
2904 struct xfs_buf_log_item *bip = bp->b_log_item;
2905 xfs_failaddr_t fa;
2906
2907 fa = xfs_agf_verify(bp);
2908 if (fa) {
2909 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2910 return;
2911 }
2912
2913 if (!xfs_sb_version_hascrc(&mp->m_sb))
2914 return;
2915
2916 if (bip)
2917 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2918
2919 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2920 }
2921
2922 const struct xfs_buf_ops xfs_agf_buf_ops = {
2923 .name = "xfs_agf",
2924 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
2925 .verify_read = xfs_agf_read_verify,
2926 .verify_write = xfs_agf_write_verify,
2927 .verify_struct = xfs_agf_verify,
2928 };
2929
2930 /*
2931 * Read in the allocation group header (free/alloc section).
2932 */
2933 int /* error */
2934 xfs_read_agf(
2935 struct xfs_mount *mp, /* mount point structure */
2936 struct xfs_trans *tp, /* transaction pointer */
2937 xfs_agnumber_t agno, /* allocation group number */
2938 int flags, /* XFS_BUF_ */
2939 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2940 {
2941 int error;
2942
2943 trace_xfs_read_agf(mp, agno);
2944
2945 ASSERT(agno != NULLAGNUMBER);
2946 error = xfs_trans_read_buf(
2947 mp, tp, mp->m_ddev_targp,
2948 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2949 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2950 if (error)
2951 return error;
2952 if (!*bpp)
2953 return 0;
2954
2955 ASSERT(!(*bpp)->b_error);
2956 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2957 return 0;
2958 }
2959
2960 /*
2961 * Read in the allocation group header (free/alloc section).
2962 */
2963 int /* error */
2964 xfs_alloc_read_agf(
2965 struct xfs_mount *mp, /* mount point structure */
2966 struct xfs_trans *tp, /* transaction pointer */
2967 xfs_agnumber_t agno, /* allocation group number */
2968 int flags, /* XFS_ALLOC_FLAG_... */
2969 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2970 {
2971 struct xfs_agf *agf; /* ag freelist header */
2972 struct xfs_perag *pag; /* per allocation group data */
2973 int error;
2974
2975 trace_xfs_alloc_read_agf(mp, agno);
2976
2977 ASSERT(agno != NULLAGNUMBER);
2978 error = xfs_read_agf(mp, tp, agno,
2979 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2980 bpp);
2981 if (error)
2982 return error;
2983 if (!*bpp)
2984 return 0;
2985 ASSERT(!(*bpp)->b_error);
2986
2987 agf = XFS_BUF_TO_AGF(*bpp);
2988 pag = xfs_perag_get(mp, agno);
2989 if (!pag->pagf_init) {
2990 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2991 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2992 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2993 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2994 pag->pagf_levels[XFS_BTNUM_BNOi] =
2995 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2996 pag->pagf_levels[XFS_BTNUM_CNTi] =
2997 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2998 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2999 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3000 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3001 pag->pagf_init = 1;
3002 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
3003 }
3004 #ifdef DEBUG
3005 else if (!XFS_FORCED_SHUTDOWN(mp)) {
3006 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3007 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3008 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3009 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3010 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3011 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3012 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3013 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3014 }
3015 #endif
3016 xfs_perag_put(pag);
3017 return 0;
3018 }
3019
3020 /*
3021 * Allocate an extent (variable-size).
3022 * Depending on the allocation type, we either look in a single allocation
3023 * group or loop over the allocation groups to find the result.
3024 */
3025 int /* error */
3026 xfs_alloc_vextent(
3027 struct xfs_alloc_arg *args) /* allocation argument structure */
3028 {
3029 xfs_agblock_t agsize; /* allocation group size */
3030 int error;
3031 int flags; /* XFS_ALLOC_FLAG_... locking flags */
3032 struct xfs_mount *mp; /* mount structure pointer */
3033 xfs_agnumber_t sagno; /* starting allocation group number */
3034 xfs_alloctype_t type; /* input allocation type */
3035 int bump_rotor = 0;
3036 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
3037
3038 mp = args->mp;
3039 type = args->otype = args->type;
3040 args->agbno = NULLAGBLOCK;
3041 /*
3042 * Just fix this up, for the case where the last a.g. is shorter
3043 * (or there's only one a.g.) and the caller couldn't easily figure
3044 * that out (xfs_bmap_alloc).
3045 */
3046 agsize = mp->m_sb.sb_agblocks;
3047 if (args->maxlen > agsize)
3048 args->maxlen = agsize;
3049 if (args->alignment == 0)
3050 args->alignment = 1;
3051 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
3052 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
3053 ASSERT(args->minlen <= args->maxlen);
3054 ASSERT(args->minlen <= agsize);
3055 ASSERT(args->mod < args->prod);
3056 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
3057 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
3058 args->minlen > args->maxlen || args->minlen > agsize ||
3059 args->mod >= args->prod) {
3060 args->fsbno = NULLFSBLOCK;
3061 trace_xfs_alloc_vextent_badargs(args);
3062 return 0;
3063 }
3064
3065 switch (type) {
3066 case XFS_ALLOCTYPE_THIS_AG:
3067 case XFS_ALLOCTYPE_NEAR_BNO:
3068 case XFS_ALLOCTYPE_THIS_BNO:
3069 /*
3070 * These three force us into a single a.g.
3071 */
3072 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3073 args->pag = xfs_perag_get(mp, args->agno);
3074 error = xfs_alloc_fix_freelist(args, 0);
3075 if (error) {
3076 trace_xfs_alloc_vextent_nofix(args);
3077 goto error0;
3078 }
3079 if (!args->agbp) {
3080 trace_xfs_alloc_vextent_noagbp(args);
3081 break;
3082 }
3083 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3084 if ((error = xfs_alloc_ag_vextent(args)))
3085 goto error0;
3086 break;
3087 case XFS_ALLOCTYPE_START_BNO:
3088 /*
3089 * Try near allocation first, then anywhere-in-ag after
3090 * the first a.g. fails.
3091 */
3092 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3093 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
3094 args->fsbno = XFS_AGB_TO_FSB(mp,
3095 ((mp->m_agfrotor / rotorstep) %
3096 mp->m_sb.sb_agcount), 0);
3097 bump_rotor = 1;
3098 }
3099 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3100 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3101 /* FALLTHROUGH */
3102 case XFS_ALLOCTYPE_FIRST_AG:
3103 /*
3104 * Rotate through the allocation groups looking for a winner.
3105 */
3106 if (type == XFS_ALLOCTYPE_FIRST_AG) {
3107 /*
3108 * Start with allocation group given by bno.
3109 */
3110 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3111 args->type = XFS_ALLOCTYPE_THIS_AG;
3112 sagno = 0;
3113 flags = 0;
3114 } else {
3115 /*
3116 * Start with the given allocation group.
3117 */
3118 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3119 flags = XFS_ALLOC_FLAG_TRYLOCK;
3120 }
3121 /*
3122 * Loop over allocation groups twice; first time with
3123 * trylock set, second time without.
3124 */
3125 for (;;) {
3126 args->pag = xfs_perag_get(mp, args->agno);
3127 error = xfs_alloc_fix_freelist(args, flags);
3128 if (error) {
3129 trace_xfs_alloc_vextent_nofix(args);
3130 goto error0;
3131 }
3132 /*
3133 * If we get a buffer back then the allocation will fly.
3134 */
3135 if (args->agbp) {
3136 if ((error = xfs_alloc_ag_vextent(args)))
3137 goto error0;
3138 break;
3139 }
3140
3141 trace_xfs_alloc_vextent_loopfailed(args);
3142
3143 /*
3144 * Didn't work, figure out the next iteration.
3145 */
3146 if (args->agno == sagno &&
3147 type == XFS_ALLOCTYPE_START_BNO)
3148 args->type = XFS_ALLOCTYPE_THIS_AG;
3149 /*
3150 * For the first allocation, we can try any AG to get
3151 * space. However, if we already have allocated a
3152 * block, we don't want to try AGs whose number is below
3153 * sagno. Otherwise, we may end up with out-of-order
3154 * locking of AGF, which might cause deadlock.
3155 */
3156 if (++(args->agno) == mp->m_sb.sb_agcount) {
3157 if (args->tp->t_firstblock != NULLFSBLOCK)
3158 args->agno = sagno;
3159 else
3160 args->agno = 0;
3161 }
3162 /*
3163 * Reached the starting a.g., must either be done
3164 * or switch to non-trylock mode.
3165 */
3166 if (args->agno == sagno) {
3167 if (flags == 0) {
3168 args->agbno = NULLAGBLOCK;
3169 trace_xfs_alloc_vextent_allfailed(args);
3170 break;
3171 }
3172
3173 flags = 0;
3174 if (type == XFS_ALLOCTYPE_START_BNO) {
3175 args->agbno = XFS_FSB_TO_AGBNO(mp,
3176 args->fsbno);
3177 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3178 }
3179 }
3180 xfs_perag_put(args->pag);
3181 }
3182 if (bump_rotor) {
3183 if (args->agno == sagno)
3184 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3185 (mp->m_sb.sb_agcount * rotorstep);
3186 else
3187 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3188 (mp->m_sb.sb_agcount * rotorstep);
3189 }
3190 break;
3191 default:
3192 ASSERT(0);
3193 /* NOTREACHED */
3194 }
3195 if (args->agbno == NULLAGBLOCK)
3196 args->fsbno = NULLFSBLOCK;
3197 else {
3198 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3199 #ifdef DEBUG
3200 ASSERT(args->len >= args->minlen);
3201 ASSERT(args->len <= args->maxlen);
3202 ASSERT(args->agbno % args->alignment == 0);
3203 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
3204 args->len);
3205 #endif
3206
3207 }
3208 xfs_perag_put(args->pag);
3209 return 0;
3210 error0:
3211 xfs_perag_put(args->pag);
3212 return error;
3213 }
3214
3215 /* Ensure that the freelist is at full capacity. */
3216 int
3217 xfs_free_extent_fix_freelist(
3218 struct xfs_trans *tp,
3219 xfs_agnumber_t agno,
3220 struct xfs_buf **agbp)
3221 {
3222 struct xfs_alloc_arg args;
3223 int error;
3224
3225 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3226 args.tp = tp;
3227 args.mp = tp->t_mountp;
3228 args.agno = agno;
3229
3230 /*
3231 * validate that the block number is legal - the enables us to detect
3232 * and handle a silent filesystem corruption rather than crashing.
3233 */
3234 if (args.agno >= args.mp->m_sb.sb_agcount)
3235 return -EFSCORRUPTED;
3236
3237 args.pag = xfs_perag_get(args.mp, args.agno);
3238 ASSERT(args.pag);
3239
3240 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3241 if (error)
3242 goto out;
3243
3244 *agbp = args.agbp;
3245 out:
3246 xfs_perag_put(args.pag);
3247 return error;
3248 }
3249
3250 /*
3251 * Free an extent.
3252 * Just break up the extent address and hand off to xfs_free_ag_extent
3253 * after fixing up the freelist.
3254 */
3255 int
3256 __xfs_free_extent(
3257 struct xfs_trans *tp,
3258 xfs_fsblock_t bno,
3259 xfs_extlen_t len,
3260 const struct xfs_owner_info *oinfo,
3261 enum xfs_ag_resv_type type,
3262 bool skip_discard)
3263 {
3264 struct xfs_mount *mp = tp->t_mountp;
3265 struct xfs_buf *agbp;
3266 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3267 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3268 int error;
3269 unsigned int busy_flags = 0;
3270
3271 ASSERT(len != 0);
3272 ASSERT(type != XFS_AG_RESV_AGFL);
3273
3274 if (XFS_TEST_ERROR(false, mp,
3275 XFS_ERRTAG_FREE_EXTENT))
3276 return -EIO;
3277
3278 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3279 if (error)
3280 return error;
3281
3282 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3283 error = -EFSCORRUPTED;
3284 goto err;
3285 }
3286
3287 /* validate the extent size is legal now we have the agf locked */
3288 if (XFS_IS_CORRUPT(mp,
3289 agbno + len >
3290 be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length))) {
3291 error = -EFSCORRUPTED;
3292 goto err;
3293 }
3294
3295 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3296 if (error)
3297 goto err;
3298
3299 if (skip_discard)
3300 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3301 xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3302 return 0;
3303
3304 err:
3305 xfs_trans_brelse(tp, agbp);
3306 return error;
3307 }
3308
3309 struct xfs_alloc_query_range_info {
3310 xfs_alloc_query_range_fn fn;
3311 void *priv;
3312 };
3313
3314 /* Format btree record and pass to our callback. */
3315 STATIC int
3316 xfs_alloc_query_range_helper(
3317 struct xfs_btree_cur *cur,
3318 union xfs_btree_rec *rec,
3319 void *priv)
3320 {
3321 struct xfs_alloc_query_range_info *query = priv;
3322 struct xfs_alloc_rec_incore irec;
3323
3324 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3325 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3326 return query->fn(cur, &irec, query->priv);
3327 }
3328
3329 /* Find all free space within a given range of blocks. */
3330 int
3331 xfs_alloc_query_range(
3332 struct xfs_btree_cur *cur,
3333 struct xfs_alloc_rec_incore *low_rec,
3334 struct xfs_alloc_rec_incore *high_rec,
3335 xfs_alloc_query_range_fn fn,
3336 void *priv)
3337 {
3338 union xfs_btree_irec low_brec;
3339 union xfs_btree_irec high_brec;
3340 struct xfs_alloc_query_range_info query;
3341
3342 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3343 low_brec.a = *low_rec;
3344 high_brec.a = *high_rec;
3345 query.priv = priv;
3346 query.fn = fn;
3347 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3348 xfs_alloc_query_range_helper, &query);
3349 }
3350
3351 /* Find all free space records. */
3352 int
3353 xfs_alloc_query_all(
3354 struct xfs_btree_cur *cur,
3355 xfs_alloc_query_range_fn fn,
3356 void *priv)
3357 {
3358 struct xfs_alloc_query_range_info query;
3359
3360 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3361 query.priv = priv;
3362 query.fn = fn;
3363 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3364 }
3365
3366 /* Is there a record covering a given extent? */
3367 int
3368 xfs_alloc_has_record(
3369 struct xfs_btree_cur *cur,
3370 xfs_agblock_t bno,
3371 xfs_extlen_t len,
3372 bool *exists)
3373 {
3374 union xfs_btree_irec low;
3375 union xfs_btree_irec high;
3376
3377 memset(&low, 0, sizeof(low));
3378 low.a.ar_startblock = bno;
3379 memset(&high, 0xFF, sizeof(high));
3380 high.a.ar_startblock = bno + len - 1;
3381
3382 return xfs_btree_has_record(cur, &low, &high, exists);
3383 }
3384
3385 /*
3386 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3387 * error code or XFS_ITER_*.
3388 */
3389 int
3390 xfs_agfl_walk(
3391 struct xfs_mount *mp,
3392 struct xfs_agf *agf,
3393 struct xfs_buf *agflbp,
3394 xfs_agfl_walk_fn walk_fn,
3395 void *priv)
3396 {
3397 __be32 *agfl_bno;
3398 unsigned int i;
3399 int error;
3400
3401 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3402 i = be32_to_cpu(agf->agf_flfirst);
3403
3404 /* Nothing to walk in an empty AGFL. */
3405 if (agf->agf_flcount == cpu_to_be32(0))
3406 return 0;
3407
3408 /* Otherwise, walk from first to last, wrapping as needed. */
3409 for (;;) {
3410 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3411 if (error)
3412 return error;
3413 if (i == be32_to_cpu(agf->agf_fllast))
3414 break;
3415 if (++i == xfs_agfl_size(mp))
3416 i = 0;
3417 }
3418
3419 return 0;
3420 }