]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc.c
xfs: Define max extent length based on on-disk format definition
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "libxfs_priv.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_errortag.h"
20 #include "xfs_trace.h"
21 #include "xfs_trans.h"
22 #include "xfs_ag.h"
23 #include "xfs_ag_resv.h"
24 #include "xfs_bmap.h"
25
26 struct kmem_cache *xfs_extfree_item_cache;
27
28 struct workqueue_struct *xfs_alloc_wq;
29
30 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
31
32 #define XFSA_FIXUP_BNO_OK 1
33 #define XFSA_FIXUP_CNT_OK 2
34
35 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
36 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
37 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
38
39 /*
40 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
41 * the beginning of the block for a proper header with the location information
42 * and CRC.
43 */
44 unsigned int
45 xfs_agfl_size(
46 struct xfs_mount *mp)
47 {
48 unsigned int size = mp->m_sb.sb_sectsize;
49
50 if (xfs_has_crc(mp))
51 size -= sizeof(struct xfs_agfl);
52
53 return size / sizeof(xfs_agblock_t);
54 }
55
56 unsigned int
57 xfs_refc_block(
58 struct xfs_mount *mp)
59 {
60 if (xfs_has_rmapbt(mp))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_has_finobt(mp))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
65 }
66
67 xfs_extlen_t
68 xfs_prealloc_blocks(
69 struct xfs_mount *mp)
70 {
71 if (xfs_has_reflink(mp))
72 return xfs_refc_block(mp) + 1;
73 if (xfs_has_rmapbt(mp))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_has_finobt(mp))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78 }
79
80 /*
81 * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
82 * guarantee that we can refill the AGFL prior to allocating space in a nearly
83 * full AG. Although the the space described by the free space btrees, the
84 * blocks used by the freesp btrees themselves, and the blocks owned by the
85 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
86 * free space in the AG drop so low that the free space btrees cannot refill an
87 * empty AGFL up to the minimum level. Rather than grind through empty AGs
88 * until the fs goes down, we subtract this many AG blocks from the incore
89 * fdblocks to ensure user allocation does not overcommit the space the
90 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
91 * withhold space from xfs_mod_fdblocks, so we do not account for that here.
92 */
93 #define XFS_ALLOCBT_AGFL_RESERVE 4
94
95 /*
96 * Compute the number of blocks that we set aside to guarantee the ability to
97 * refill the AGFL and handle a full bmap btree split.
98 *
99 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
100 * AGF buffer (PV 947395), we place constraints on the relationship among
101 * actual allocations for data blocks, freelist blocks, and potential file data
102 * bmap btree blocks. However, these restrictions may result in no actual space
103 * allocated for a delayed extent, for example, a data block in a certain AG is
104 * allocated but there is no additional block for the additional bmap btree
105 * block due to a split of the bmap btree of the file. The result of this may
106 * lead to an infinite loop when the file gets flushed to disk and all delayed
107 * extents need to be actually allocated. To get around this, we explicitly set
108 * aside a few blocks which will not be reserved in delayed allocation.
109 *
110 * For each AG, we need to reserve enough blocks to replenish a totally empty
111 * AGFL and 4 more to handle a potential split of the file's bmap btree.
112 */
113 unsigned int
114 xfs_alloc_set_aside(
115 struct xfs_mount *mp)
116 {
117 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
118 }
119
120 /*
121 * When deciding how much space to allocate out of an AG, we limit the
122 * allocation maximum size to the size the AG. However, we cannot use all the
123 * blocks in the AG - some are permanently used by metadata. These
124 * blocks are generally:
125 * - the AG superblock, AGF, AGI and AGFL
126 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
127 * the AGI free inode and rmap btree root blocks.
128 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
129 * - the rmapbt root block
130 *
131 * The AG headers are sector sized, so the amount of space they take up is
132 * dependent on filesystem geometry. The others are all single blocks.
133 */
134 unsigned int
135 xfs_alloc_ag_max_usable(
136 struct xfs_mount *mp)
137 {
138 unsigned int blocks;
139
140 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
141 blocks += XFS_ALLOCBT_AGFL_RESERVE;
142 blocks += 3; /* AGF, AGI btree root blocks */
143 if (xfs_has_finobt(mp))
144 blocks++; /* finobt root block */
145 if (xfs_has_rmapbt(mp))
146 blocks++; /* rmap root block */
147 if (xfs_has_reflink(mp))
148 blocks++; /* refcount root block */
149
150 return mp->m_sb.sb_agblocks - blocks;
151 }
152
153 /*
154 * Lookup the record equal to [bno, len] in the btree given by cur.
155 */
156 STATIC int /* error */
157 xfs_alloc_lookup_eq(
158 struct xfs_btree_cur *cur, /* btree cursor */
159 xfs_agblock_t bno, /* starting block of extent */
160 xfs_extlen_t len, /* length of extent */
161 int *stat) /* success/failure */
162 {
163 int error;
164
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
168 cur->bc_ag.abt.active = (*stat == 1);
169 return error;
170 }
171
172 /*
173 * Lookup the first record greater than or equal to [bno, len]
174 * in the btree given by cur.
175 */
176 int /* error */
177 xfs_alloc_lookup_ge(
178 struct xfs_btree_cur *cur, /* btree cursor */
179 xfs_agblock_t bno, /* starting block of extent */
180 xfs_extlen_t len, /* length of extent */
181 int *stat) /* success/failure */
182 {
183 int error;
184
185 cur->bc_rec.a.ar_startblock = bno;
186 cur->bc_rec.a.ar_blockcount = len;
187 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
188 cur->bc_ag.abt.active = (*stat == 1);
189 return error;
190 }
191
192 /*
193 * Lookup the first record less than or equal to [bno, len]
194 * in the btree given by cur.
195 */
196 int /* error */
197 xfs_alloc_lookup_le(
198 struct xfs_btree_cur *cur, /* btree cursor */
199 xfs_agblock_t bno, /* starting block of extent */
200 xfs_extlen_t len, /* length of extent */
201 int *stat) /* success/failure */
202 {
203 int error;
204 cur->bc_rec.a.ar_startblock = bno;
205 cur->bc_rec.a.ar_blockcount = len;
206 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
207 cur->bc_ag.abt.active = (*stat == 1);
208 return error;
209 }
210
211 static inline bool
212 xfs_alloc_cur_active(
213 struct xfs_btree_cur *cur)
214 {
215 return cur && cur->bc_ag.abt.active;
216 }
217
218 /*
219 * Update the record referred to by cur to the value given
220 * by [bno, len].
221 * This either works (return 0) or gets an EFSCORRUPTED error.
222 */
223 STATIC int /* error */
224 xfs_alloc_update(
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t bno, /* starting block of extent */
227 xfs_extlen_t len) /* length of extent */
228 {
229 union xfs_btree_rec rec;
230
231 rec.alloc.ar_startblock = cpu_to_be32(bno);
232 rec.alloc.ar_blockcount = cpu_to_be32(len);
233 return xfs_btree_update(cur, &rec);
234 }
235
236 /*
237 * Get the data from the pointed-to record.
238 */
239 int /* error */
240 xfs_alloc_get_rec(
241 struct xfs_btree_cur *cur, /* btree cursor */
242 xfs_agblock_t *bno, /* output: starting block of extent */
243 xfs_extlen_t *len, /* output: length of extent */
244 int *stat) /* output: success/failure */
245 {
246 struct xfs_mount *mp = cur->bc_mp;
247 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
248 union xfs_btree_rec *rec;
249 int error;
250
251 error = xfs_btree_get_rec(cur, &rec, stat);
252 if (error || !(*stat))
253 return error;
254
255 *bno = be32_to_cpu(rec->alloc.ar_startblock);
256 *len = be32_to_cpu(rec->alloc.ar_blockcount);
257
258 if (*len == 0)
259 goto out_bad_rec;
260
261 /* check for valid extent range, including overflow */
262 if (!xfs_verify_agbno(mp, agno, *bno))
263 goto out_bad_rec;
264 if (*bno > *bno + *len)
265 goto out_bad_rec;
266 if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
267 goto out_bad_rec;
268
269 return 0;
270
271 out_bad_rec:
272 xfs_warn(mp,
273 "%s Freespace BTree record corruption in AG %d detected!",
274 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
275 xfs_warn(mp,
276 "start block 0x%x block count 0x%x", *bno, *len);
277 return -EFSCORRUPTED;
278 }
279
280 /*
281 * Compute aligned version of the found extent.
282 * Takes alignment and min length into account.
283 */
284 STATIC bool
285 xfs_alloc_compute_aligned(
286 xfs_alloc_arg_t *args, /* allocation argument structure */
287 xfs_agblock_t foundbno, /* starting block in found extent */
288 xfs_extlen_t foundlen, /* length in found extent */
289 xfs_agblock_t *resbno, /* result block number */
290 xfs_extlen_t *reslen, /* result length */
291 unsigned *busy_gen)
292 {
293 xfs_agblock_t bno = foundbno;
294 xfs_extlen_t len = foundlen;
295 xfs_extlen_t diff;
296 bool busy;
297
298 /* Trim busy sections out of found extent */
299 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
300
301 /*
302 * If we have a largish extent that happens to start before min_agbno,
303 * see if we can shift it into range...
304 */
305 if (bno < args->min_agbno && bno + len > args->min_agbno) {
306 diff = args->min_agbno - bno;
307 if (len > diff) {
308 bno += diff;
309 len -= diff;
310 }
311 }
312
313 if (args->alignment > 1 && len >= args->minlen) {
314 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
315
316 diff = aligned_bno - bno;
317
318 *resbno = aligned_bno;
319 *reslen = diff >= len ? 0 : len - diff;
320 } else {
321 *resbno = bno;
322 *reslen = len;
323 }
324
325 return busy;
326 }
327
328 /*
329 * Compute best start block and diff for "near" allocations.
330 * freelen >= wantlen already checked by caller.
331 */
332 STATIC xfs_extlen_t /* difference value (absolute) */
333 xfs_alloc_compute_diff(
334 xfs_agblock_t wantbno, /* target starting block */
335 xfs_extlen_t wantlen, /* target length */
336 xfs_extlen_t alignment, /* target alignment */
337 int datatype, /* are we allocating data? */
338 xfs_agblock_t freebno, /* freespace's starting block */
339 xfs_extlen_t freelen, /* freespace's length */
340 xfs_agblock_t *newbnop) /* result: best start block from free */
341 {
342 xfs_agblock_t freeend; /* end of freespace extent */
343 xfs_agblock_t newbno1; /* return block number */
344 xfs_agblock_t newbno2; /* other new block number */
345 xfs_extlen_t newlen1=0; /* length with newbno1 */
346 xfs_extlen_t newlen2=0; /* length with newbno2 */
347 xfs_agblock_t wantend; /* end of target extent */
348 bool userdata = datatype & XFS_ALLOC_USERDATA;
349
350 ASSERT(freelen >= wantlen);
351 freeend = freebno + freelen;
352 wantend = wantbno + wantlen;
353 /*
354 * We want to allocate from the start of a free extent if it is past
355 * the desired block or if we are allocating user data and the free
356 * extent is before desired block. The second case is there to allow
357 * for contiguous allocation from the remaining free space if the file
358 * grows in the short term.
359 */
360 if (freebno >= wantbno || (userdata && freeend < wantend)) {
361 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
362 newbno1 = NULLAGBLOCK;
363 } else if (freeend >= wantend && alignment > 1) {
364 newbno1 = roundup(wantbno, alignment);
365 newbno2 = newbno1 - alignment;
366 if (newbno1 >= freeend)
367 newbno1 = NULLAGBLOCK;
368 else
369 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
370 if (newbno2 < freebno)
371 newbno2 = NULLAGBLOCK;
372 else
373 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
374 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
375 if (newlen1 < newlen2 ||
376 (newlen1 == newlen2 &&
377 XFS_ABSDIFF(newbno1, wantbno) >
378 XFS_ABSDIFF(newbno2, wantbno)))
379 newbno1 = newbno2;
380 } else if (newbno2 != NULLAGBLOCK)
381 newbno1 = newbno2;
382 } else if (freeend >= wantend) {
383 newbno1 = wantbno;
384 } else if (alignment > 1) {
385 newbno1 = roundup(freeend - wantlen, alignment);
386 if (newbno1 > freeend - wantlen &&
387 newbno1 - alignment >= freebno)
388 newbno1 -= alignment;
389 else if (newbno1 >= freeend)
390 newbno1 = NULLAGBLOCK;
391 } else
392 newbno1 = freeend - wantlen;
393 *newbnop = newbno1;
394 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
395 }
396
397 /*
398 * Fix up the length, based on mod and prod.
399 * len should be k * prod + mod for some k.
400 * If len is too small it is returned unchanged.
401 * If len hits maxlen it is left alone.
402 */
403 STATIC void
404 xfs_alloc_fix_len(
405 xfs_alloc_arg_t *args) /* allocation argument structure */
406 {
407 xfs_extlen_t k;
408 xfs_extlen_t rlen;
409
410 ASSERT(args->mod < args->prod);
411 rlen = args->len;
412 ASSERT(rlen >= args->minlen);
413 ASSERT(rlen <= args->maxlen);
414 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
415 (args->mod == 0 && rlen < args->prod))
416 return;
417 k = rlen % args->prod;
418 if (k == args->mod)
419 return;
420 if (k > args->mod)
421 rlen = rlen - (k - args->mod);
422 else
423 rlen = rlen - args->prod + (args->mod - k);
424 /* casts to (int) catch length underflows */
425 if ((int)rlen < (int)args->minlen)
426 return;
427 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
428 ASSERT(rlen % args->prod == args->mod);
429 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
430 rlen + args->minleft);
431 args->len = rlen;
432 }
433
434 /*
435 * Update the two btrees, logically removing from freespace the extent
436 * starting at rbno, rlen blocks. The extent is contained within the
437 * actual (current) free extent fbno for flen blocks.
438 * Flags are passed in indicating whether the cursors are set to the
439 * relevant records.
440 */
441 STATIC int /* error code */
442 xfs_alloc_fixup_trees(
443 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
444 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
445 xfs_agblock_t fbno, /* starting block of free extent */
446 xfs_extlen_t flen, /* length of free extent */
447 xfs_agblock_t rbno, /* starting block of returned extent */
448 xfs_extlen_t rlen, /* length of returned extent */
449 int flags) /* flags, XFSA_FIXUP_... */
450 {
451 int error; /* error code */
452 int i; /* operation results */
453 xfs_agblock_t nfbno1; /* first new free startblock */
454 xfs_agblock_t nfbno2; /* second new free startblock */
455 xfs_extlen_t nflen1=0; /* first new free length */
456 xfs_extlen_t nflen2=0; /* second new free length */
457 struct xfs_mount *mp;
458
459 mp = cnt_cur->bc_mp;
460
461 /*
462 * Look up the record in the by-size tree if necessary.
463 */
464 if (flags & XFSA_FIXUP_CNT_OK) {
465 #ifdef DEBUG
466 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
467 return error;
468 if (XFS_IS_CORRUPT(mp,
469 i != 1 ||
470 nfbno1 != fbno ||
471 nflen1 != flen))
472 return -EFSCORRUPTED;
473 #endif
474 } else {
475 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
476 return error;
477 if (XFS_IS_CORRUPT(mp, i != 1))
478 return -EFSCORRUPTED;
479 }
480 /*
481 * Look up the record in the by-block tree if necessary.
482 */
483 if (flags & XFSA_FIXUP_BNO_OK) {
484 #ifdef DEBUG
485 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
486 return error;
487 if (XFS_IS_CORRUPT(mp,
488 i != 1 ||
489 nfbno1 != fbno ||
490 nflen1 != flen))
491 return -EFSCORRUPTED;
492 #endif
493 } else {
494 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
495 return error;
496 if (XFS_IS_CORRUPT(mp, i != 1))
497 return -EFSCORRUPTED;
498 }
499
500 #ifdef DEBUG
501 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
502 struct xfs_btree_block *bnoblock;
503 struct xfs_btree_block *cntblock;
504
505 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
506 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
507
508 if (XFS_IS_CORRUPT(mp,
509 bnoblock->bb_numrecs !=
510 cntblock->bb_numrecs))
511 return -EFSCORRUPTED;
512 }
513 #endif
514
515 /*
516 * Deal with all four cases: the allocated record is contained
517 * within the freespace record, so we can have new freespace
518 * at either (or both) end, or no freespace remaining.
519 */
520 if (rbno == fbno && rlen == flen)
521 nfbno1 = nfbno2 = NULLAGBLOCK;
522 else if (rbno == fbno) {
523 nfbno1 = rbno + rlen;
524 nflen1 = flen - rlen;
525 nfbno2 = NULLAGBLOCK;
526 } else if (rbno + rlen == fbno + flen) {
527 nfbno1 = fbno;
528 nflen1 = flen - rlen;
529 nfbno2 = NULLAGBLOCK;
530 } else {
531 nfbno1 = fbno;
532 nflen1 = rbno - fbno;
533 nfbno2 = rbno + rlen;
534 nflen2 = (fbno + flen) - nfbno2;
535 }
536 /*
537 * Delete the entry from the by-size btree.
538 */
539 if ((error = xfs_btree_delete(cnt_cur, &i)))
540 return error;
541 if (XFS_IS_CORRUPT(mp, i != 1))
542 return -EFSCORRUPTED;
543 /*
544 * Add new by-size btree entry(s).
545 */
546 if (nfbno1 != NULLAGBLOCK) {
547 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
548 return error;
549 if (XFS_IS_CORRUPT(mp, i != 0))
550 return -EFSCORRUPTED;
551 if ((error = xfs_btree_insert(cnt_cur, &i)))
552 return error;
553 if (XFS_IS_CORRUPT(mp, i != 1))
554 return -EFSCORRUPTED;
555 }
556 if (nfbno2 != NULLAGBLOCK) {
557 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
558 return error;
559 if (XFS_IS_CORRUPT(mp, i != 0))
560 return -EFSCORRUPTED;
561 if ((error = xfs_btree_insert(cnt_cur, &i)))
562 return error;
563 if (XFS_IS_CORRUPT(mp, i != 1))
564 return -EFSCORRUPTED;
565 }
566 /*
567 * Fix up the by-block btree entry(s).
568 */
569 if (nfbno1 == NULLAGBLOCK) {
570 /*
571 * No remaining freespace, just delete the by-block tree entry.
572 */
573 if ((error = xfs_btree_delete(bno_cur, &i)))
574 return error;
575 if (XFS_IS_CORRUPT(mp, i != 1))
576 return -EFSCORRUPTED;
577 } else {
578 /*
579 * Update the by-block entry to start later|be shorter.
580 */
581 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
582 return error;
583 }
584 if (nfbno2 != NULLAGBLOCK) {
585 /*
586 * 2 resulting free entries, need to add one.
587 */
588 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
589 return error;
590 if (XFS_IS_CORRUPT(mp, i != 0))
591 return -EFSCORRUPTED;
592 if ((error = xfs_btree_insert(bno_cur, &i)))
593 return error;
594 if (XFS_IS_CORRUPT(mp, i != 1))
595 return -EFSCORRUPTED;
596 }
597 return 0;
598 }
599
600 static xfs_failaddr_t
601 xfs_agfl_verify(
602 struct xfs_buf *bp)
603 {
604 struct xfs_mount *mp = bp->b_mount;
605 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
606 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
607 int i;
608
609 /*
610 * There is no verification of non-crc AGFLs because mkfs does not
611 * initialise the AGFL to zero or NULL. Hence the only valid part of the
612 * AGFL is what the AGF says is active. We can't get to the AGF, so we
613 * can't verify just those entries are valid.
614 */
615 if (!xfs_has_crc(mp))
616 return NULL;
617
618 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
619 return __this_address;
620 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
621 return __this_address;
622 /*
623 * during growfs operations, the perag is not fully initialised,
624 * so we can't use it for any useful checking. growfs ensures we can't
625 * use it by using uncached buffers that don't have the perag attached
626 * so we can detect and avoid this problem.
627 */
628 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
629 return __this_address;
630
631 for (i = 0; i < xfs_agfl_size(mp); i++) {
632 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
633 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
634 return __this_address;
635 }
636
637 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
638 return __this_address;
639 return NULL;
640 }
641
642 static void
643 xfs_agfl_read_verify(
644 struct xfs_buf *bp)
645 {
646 struct xfs_mount *mp = bp->b_mount;
647 xfs_failaddr_t fa;
648
649 /*
650 * There is no verification of non-crc AGFLs because mkfs does not
651 * initialise the AGFL to zero or NULL. Hence the only valid part of the
652 * AGFL is what the AGF says is active. We can't get to the AGF, so we
653 * can't verify just those entries are valid.
654 */
655 if (!xfs_has_crc(mp))
656 return;
657
658 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
659 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
660 else {
661 fa = xfs_agfl_verify(bp);
662 if (fa)
663 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
664 }
665 }
666
667 static void
668 xfs_agfl_write_verify(
669 struct xfs_buf *bp)
670 {
671 struct xfs_mount *mp = bp->b_mount;
672 struct xfs_buf_log_item *bip = bp->b_log_item;
673 xfs_failaddr_t fa;
674
675 /* no verification of non-crc AGFLs */
676 if (!xfs_has_crc(mp))
677 return;
678
679 fa = xfs_agfl_verify(bp);
680 if (fa) {
681 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
682 return;
683 }
684
685 if (bip)
686 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
687
688 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
689 }
690
691 const struct xfs_buf_ops xfs_agfl_buf_ops = {
692 .name = "xfs_agfl",
693 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
694 .verify_read = xfs_agfl_read_verify,
695 .verify_write = xfs_agfl_write_verify,
696 .verify_struct = xfs_agfl_verify,
697 };
698
699 /*
700 * Read in the allocation group free block array.
701 */
702 int /* error */
703 xfs_alloc_read_agfl(
704 xfs_mount_t *mp, /* mount point structure */
705 xfs_trans_t *tp, /* transaction pointer */
706 xfs_agnumber_t agno, /* allocation group number */
707 struct xfs_buf **bpp) /* buffer for the ag free block array */
708 {
709 struct xfs_buf *bp; /* return value */
710 int error;
711
712 ASSERT(agno != NULLAGNUMBER);
713 error = xfs_trans_read_buf(
714 mp, tp, mp->m_ddev_targp,
715 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
716 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
717 if (error)
718 return error;
719 xfs_buf_set_ref(bp, XFS_AGFL_REF);
720 *bpp = bp;
721 return 0;
722 }
723
724 STATIC int
725 xfs_alloc_update_counters(
726 struct xfs_trans *tp,
727 struct xfs_buf *agbp,
728 long len)
729 {
730 struct xfs_agf *agf = agbp->b_addr;
731
732 agbp->b_pag->pagf_freeblks += len;
733 be32_add_cpu(&agf->agf_freeblks, len);
734
735 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
736 be32_to_cpu(agf->agf_length))) {
737 xfs_buf_mark_corrupt(agbp);
738 return -EFSCORRUPTED;
739 }
740
741 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
742 return 0;
743 }
744
745 /*
746 * Block allocation algorithm and data structures.
747 */
748 struct xfs_alloc_cur {
749 struct xfs_btree_cur *cnt; /* btree cursors */
750 struct xfs_btree_cur *bnolt;
751 struct xfs_btree_cur *bnogt;
752 xfs_extlen_t cur_len;/* current search length */
753 xfs_agblock_t rec_bno;/* extent startblock */
754 xfs_extlen_t rec_len;/* extent length */
755 xfs_agblock_t bno; /* alloc bno */
756 xfs_extlen_t len; /* alloc len */
757 xfs_extlen_t diff; /* diff from search bno */
758 unsigned int busy_gen;/* busy state */
759 bool busy;
760 };
761
762 /*
763 * Set up cursors, etc. in the extent allocation cursor. This function can be
764 * called multiple times to reset an initialized structure without having to
765 * reallocate cursors.
766 */
767 static int
768 xfs_alloc_cur_setup(
769 struct xfs_alloc_arg *args,
770 struct xfs_alloc_cur *acur)
771 {
772 int error;
773 int i;
774
775 ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
776
777 acur->cur_len = args->maxlen;
778 acur->rec_bno = 0;
779 acur->rec_len = 0;
780 acur->bno = 0;
781 acur->len = 0;
782 acur->diff = -1;
783 acur->busy = false;
784 acur->busy_gen = 0;
785
786 /*
787 * Perform an initial cntbt lookup to check for availability of maxlen
788 * extents. If this fails, we'll return -ENOSPC to signal the caller to
789 * attempt a small allocation.
790 */
791 if (!acur->cnt)
792 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
793 args->agbp, args->pag, XFS_BTNUM_CNT);
794 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
795 if (error)
796 return error;
797
798 /*
799 * Allocate the bnobt left and right search cursors.
800 */
801 if (!acur->bnolt)
802 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
803 args->agbp, args->pag, XFS_BTNUM_BNO);
804 if (!acur->bnogt)
805 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
806 args->agbp, args->pag, XFS_BTNUM_BNO);
807 return i == 1 ? 0 : -ENOSPC;
808 }
809
810 static void
811 xfs_alloc_cur_close(
812 struct xfs_alloc_cur *acur,
813 bool error)
814 {
815 int cur_error = XFS_BTREE_NOERROR;
816
817 if (error)
818 cur_error = XFS_BTREE_ERROR;
819
820 if (acur->cnt)
821 xfs_btree_del_cursor(acur->cnt, cur_error);
822 if (acur->bnolt)
823 xfs_btree_del_cursor(acur->bnolt, cur_error);
824 if (acur->bnogt)
825 xfs_btree_del_cursor(acur->bnogt, cur_error);
826 acur->cnt = acur->bnolt = acur->bnogt = NULL;
827 }
828
829 /*
830 * Check an extent for allocation and track the best available candidate in the
831 * allocation structure. The cursor is deactivated if it has entered an out of
832 * range state based on allocation arguments. Optionally return the extent
833 * extent geometry and allocation status if requested by the caller.
834 */
835 static int
836 xfs_alloc_cur_check(
837 struct xfs_alloc_arg *args,
838 struct xfs_alloc_cur *acur,
839 struct xfs_btree_cur *cur,
840 int *new)
841 {
842 int error, i;
843 xfs_agblock_t bno, bnoa, bnew;
844 xfs_extlen_t len, lena, diff = -1;
845 bool busy;
846 unsigned busy_gen = 0;
847 bool deactivate = false;
848 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
849
850 *new = 0;
851
852 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
853 if (error)
854 return error;
855 if (XFS_IS_CORRUPT(args->mp, i != 1))
856 return -EFSCORRUPTED;
857
858 /*
859 * Check minlen and deactivate a cntbt cursor if out of acceptable size
860 * range (i.e., walking backwards looking for a minlen extent).
861 */
862 if (len < args->minlen) {
863 deactivate = !isbnobt;
864 goto out;
865 }
866
867 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
868 &busy_gen);
869 acur->busy |= busy;
870 if (busy)
871 acur->busy_gen = busy_gen;
872 /* deactivate a bnobt cursor outside of locality range */
873 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
874 deactivate = isbnobt;
875 goto out;
876 }
877 if (lena < args->minlen)
878 goto out;
879
880 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
881 xfs_alloc_fix_len(args);
882 ASSERT(args->len >= args->minlen);
883 if (args->len < acur->len)
884 goto out;
885
886 /*
887 * We have an aligned record that satisfies minlen and beats or matches
888 * the candidate extent size. Compare locality for near allocation mode.
889 */
890 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
891 diff = xfs_alloc_compute_diff(args->agbno, args->len,
892 args->alignment, args->datatype,
893 bnoa, lena, &bnew);
894 if (bnew == NULLAGBLOCK)
895 goto out;
896
897 /*
898 * Deactivate a bnobt cursor with worse locality than the current best.
899 */
900 if (diff > acur->diff) {
901 deactivate = isbnobt;
902 goto out;
903 }
904
905 ASSERT(args->len > acur->len ||
906 (args->len == acur->len && diff <= acur->diff));
907 acur->rec_bno = bno;
908 acur->rec_len = len;
909 acur->bno = bnew;
910 acur->len = args->len;
911 acur->diff = diff;
912 *new = 1;
913
914 /*
915 * We're done if we found a perfect allocation. This only deactivates
916 * the current cursor, but this is just an optimization to terminate a
917 * cntbt search that otherwise runs to the edge of the tree.
918 */
919 if (acur->diff == 0 && acur->len == args->maxlen)
920 deactivate = true;
921 out:
922 if (deactivate)
923 cur->bc_ag.abt.active = false;
924 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
925 *new);
926 return 0;
927 }
928
929 /*
930 * Complete an allocation of a candidate extent. Remove the extent from both
931 * trees and update the args structure.
932 */
933 STATIC int
934 xfs_alloc_cur_finish(
935 struct xfs_alloc_arg *args,
936 struct xfs_alloc_cur *acur)
937 {
938 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
939 int error;
940
941 ASSERT(acur->cnt && acur->bnolt);
942 ASSERT(acur->bno >= acur->rec_bno);
943 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
944 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
945
946 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
947 acur->rec_len, acur->bno, acur->len, 0);
948 if (error)
949 return error;
950
951 args->agbno = acur->bno;
952 args->len = acur->len;
953 args->wasfromfl = 0;
954
955 trace_xfs_alloc_cur(args);
956 return 0;
957 }
958
959 /*
960 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
961 * bno optimized lookup to search for extents with ideal size and locality.
962 */
963 STATIC int
964 xfs_alloc_cntbt_iter(
965 struct xfs_alloc_arg *args,
966 struct xfs_alloc_cur *acur)
967 {
968 struct xfs_btree_cur *cur = acur->cnt;
969 xfs_agblock_t bno;
970 xfs_extlen_t len, cur_len;
971 int error;
972 int i;
973
974 if (!xfs_alloc_cur_active(cur))
975 return 0;
976
977 /* locality optimized lookup */
978 cur_len = acur->cur_len;
979 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
980 if (error)
981 return error;
982 if (i == 0)
983 return 0;
984 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
985 if (error)
986 return error;
987
988 /* check the current record and update search length from it */
989 error = xfs_alloc_cur_check(args, acur, cur, &i);
990 if (error)
991 return error;
992 ASSERT(len >= acur->cur_len);
993 acur->cur_len = len;
994
995 /*
996 * We looked up the first record >= [agbno, len] above. The agbno is a
997 * secondary key and so the current record may lie just before or after
998 * agbno. If it is past agbno, check the previous record too so long as
999 * the length matches as it may be closer. Don't check a smaller record
1000 * because that could deactivate our cursor.
1001 */
1002 if (bno > args->agbno) {
1003 error = xfs_btree_decrement(cur, 0, &i);
1004 if (!error && i) {
1005 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1006 if (!error && i && len == acur->cur_len)
1007 error = xfs_alloc_cur_check(args, acur, cur,
1008 &i);
1009 }
1010 if (error)
1011 return error;
1012 }
1013
1014 /*
1015 * Increment the search key until we find at least one allocation
1016 * candidate or if the extent we found was larger. Otherwise, double the
1017 * search key to optimize the search. Efficiency is more important here
1018 * than absolute best locality.
1019 */
1020 cur_len <<= 1;
1021 if (!acur->len || acur->cur_len >= cur_len)
1022 acur->cur_len++;
1023 else
1024 acur->cur_len = cur_len;
1025
1026 return error;
1027 }
1028
1029 /*
1030 * Deal with the case where only small freespaces remain. Either return the
1031 * contents of the last freespace record, or allocate space from the freelist if
1032 * there is nothing in the tree.
1033 */
1034 STATIC int /* error */
1035 xfs_alloc_ag_vextent_small(
1036 struct xfs_alloc_arg *args, /* allocation argument structure */
1037 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1038 xfs_agblock_t *fbnop, /* result block number */
1039 xfs_extlen_t *flenp, /* result length */
1040 int *stat) /* status: 0-freelist, 1-normal/none */
1041 {
1042 struct xfs_agf *agf = args->agbp->b_addr;
1043 int error = 0;
1044 xfs_agblock_t fbno = NULLAGBLOCK;
1045 xfs_extlen_t flen = 0;
1046 int i = 0;
1047
1048 /*
1049 * If a cntbt cursor is provided, try to allocate the largest record in
1050 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1051 * allocation. Make sure to respect minleft even when pulling from the
1052 * freelist.
1053 */
1054 if (ccur)
1055 error = xfs_btree_decrement(ccur, 0, &i);
1056 if (error)
1057 goto error;
1058 if (i) {
1059 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1060 if (error)
1061 goto error;
1062 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1063 error = -EFSCORRUPTED;
1064 goto error;
1065 }
1066 goto out;
1067 }
1068
1069 if (args->minlen != 1 || args->alignment != 1 ||
1070 args->resv == XFS_AG_RESV_AGFL ||
1071 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1072 goto out;
1073
1074 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1075 if (error)
1076 goto error;
1077 if (fbno == NULLAGBLOCK)
1078 goto out;
1079
1080 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1081 (args->datatype & XFS_ALLOC_NOBUSY));
1082
1083 if (args->datatype & XFS_ALLOC_USERDATA) {
1084 struct xfs_buf *bp;
1085
1086 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1087 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1088 args->mp->m_bsize, 0, &bp);
1089 if (error)
1090 goto error;
1091 xfs_trans_binval(args->tp, bp);
1092 }
1093 *fbnop = args->agbno = fbno;
1094 *flenp = args->len = 1;
1095 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1096 error = -EFSCORRUPTED;
1097 goto error;
1098 }
1099 args->wasfromfl = 1;
1100 trace_xfs_alloc_small_freelist(args);
1101
1102 /*
1103 * If we're feeding an AGFL block to something that doesn't live in the
1104 * free space, we need to clear out the OWN_AG rmap.
1105 */
1106 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1107 &XFS_RMAP_OINFO_AG);
1108 if (error)
1109 goto error;
1110
1111 *stat = 0;
1112 return 0;
1113
1114 out:
1115 /*
1116 * Can't do the allocation, give up.
1117 */
1118 if (flen < args->minlen) {
1119 args->agbno = NULLAGBLOCK;
1120 trace_xfs_alloc_small_notenough(args);
1121 flen = 0;
1122 }
1123 *fbnop = fbno;
1124 *flenp = flen;
1125 *stat = 1;
1126 trace_xfs_alloc_small_done(args);
1127 return 0;
1128
1129 error:
1130 trace_xfs_alloc_small_error(args);
1131 return error;
1132 }
1133
1134 /*
1135 * Allocate a variable extent in the allocation group agno.
1136 * Type and bno are used to determine where in the allocation group the
1137 * extent will start.
1138 * Extent's length (returned in *len) will be between minlen and maxlen,
1139 * and of the form k * prod + mod unless there's nothing that large.
1140 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1141 */
1142 STATIC int /* error */
1143 xfs_alloc_ag_vextent(
1144 xfs_alloc_arg_t *args) /* argument structure for allocation */
1145 {
1146 int error=0;
1147
1148 ASSERT(args->minlen > 0);
1149 ASSERT(args->maxlen > 0);
1150 ASSERT(args->minlen <= args->maxlen);
1151 ASSERT(args->mod < args->prod);
1152 ASSERT(args->alignment > 0);
1153
1154 /*
1155 * Branch to correct routine based on the type.
1156 */
1157 args->wasfromfl = 0;
1158 switch (args->type) {
1159 case XFS_ALLOCTYPE_THIS_AG:
1160 error = xfs_alloc_ag_vextent_size(args);
1161 break;
1162 case XFS_ALLOCTYPE_NEAR_BNO:
1163 error = xfs_alloc_ag_vextent_near(args);
1164 break;
1165 case XFS_ALLOCTYPE_THIS_BNO:
1166 error = xfs_alloc_ag_vextent_exact(args);
1167 break;
1168 default:
1169 ASSERT(0);
1170 /* NOTREACHED */
1171 }
1172
1173 if (error || args->agbno == NULLAGBLOCK)
1174 return error;
1175
1176 ASSERT(args->len >= args->minlen);
1177 ASSERT(args->len <= args->maxlen);
1178 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
1179 ASSERT(args->agbno % args->alignment == 0);
1180
1181 /* if not file data, insert new block into the reverse map btree */
1182 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
1183 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
1184 args->agbno, args->len, &args->oinfo);
1185 if (error)
1186 return error;
1187 }
1188
1189 if (!args->wasfromfl) {
1190 error = xfs_alloc_update_counters(args->tp, args->agbp,
1191 -((long)(args->len)));
1192 if (error)
1193 return error;
1194
1195 ASSERT(!xfs_extent_busy_search(args->mp, args->pag,
1196 args->agbno, args->len));
1197 }
1198
1199 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
1200
1201 XFS_STATS_INC(args->mp, xs_allocx);
1202 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
1203 return error;
1204 }
1205
1206 /*
1207 * Allocate a variable extent at exactly agno/bno.
1208 * Extent's length (returned in *len) will be between minlen and maxlen,
1209 * and of the form k * prod + mod unless there's nothing that large.
1210 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1211 */
1212 STATIC int /* error */
1213 xfs_alloc_ag_vextent_exact(
1214 xfs_alloc_arg_t *args) /* allocation argument structure */
1215 {
1216 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1217 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1218 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1219 int error;
1220 xfs_agblock_t fbno; /* start block of found extent */
1221 xfs_extlen_t flen; /* length of found extent */
1222 xfs_agblock_t tbno; /* start block of busy extent */
1223 xfs_extlen_t tlen; /* length of busy extent */
1224 xfs_agblock_t tend; /* end block of busy extent */
1225 int i; /* success/failure of operation */
1226 unsigned busy_gen;
1227
1228 ASSERT(args->alignment == 1);
1229
1230 /*
1231 * Allocate/initialize a cursor for the by-number freespace btree.
1232 */
1233 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1234 args->pag, XFS_BTNUM_BNO);
1235
1236 /*
1237 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1238 * Look for the closest free block <= bno, it must contain bno
1239 * if any free block does.
1240 */
1241 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1242 if (error)
1243 goto error0;
1244 if (!i)
1245 goto not_found;
1246
1247 /*
1248 * Grab the freespace record.
1249 */
1250 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1251 if (error)
1252 goto error0;
1253 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1254 error = -EFSCORRUPTED;
1255 goto error0;
1256 }
1257 ASSERT(fbno <= args->agbno);
1258
1259 /*
1260 * Check for overlapping busy extents.
1261 */
1262 tbno = fbno;
1263 tlen = flen;
1264 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1265
1266 /*
1267 * Give up if the start of the extent is busy, or the freespace isn't
1268 * long enough for the minimum request.
1269 */
1270 if (tbno > args->agbno)
1271 goto not_found;
1272 if (tlen < args->minlen)
1273 goto not_found;
1274 tend = tbno + tlen;
1275 if (tend < args->agbno + args->minlen)
1276 goto not_found;
1277
1278 /*
1279 * End of extent will be smaller of the freespace end and the
1280 * maximal requested end.
1281 *
1282 * Fix the length according to mod and prod if given.
1283 */
1284 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1285 - args->agbno;
1286 xfs_alloc_fix_len(args);
1287 ASSERT(args->agbno + args->len <= tend);
1288
1289 /*
1290 * We are allocating agbno for args->len
1291 * Allocate/initialize a cursor for the by-size btree.
1292 */
1293 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1294 args->pag, XFS_BTNUM_CNT);
1295 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1296 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1297 args->len, XFSA_FIXUP_BNO_OK);
1298 if (error) {
1299 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1300 goto error0;
1301 }
1302
1303 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1304 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1305
1306 args->wasfromfl = 0;
1307 trace_xfs_alloc_exact_done(args);
1308 return 0;
1309
1310 not_found:
1311 /* Didn't find it, return null. */
1312 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1313 args->agbno = NULLAGBLOCK;
1314 trace_xfs_alloc_exact_notfound(args);
1315 return 0;
1316
1317 error0:
1318 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1319 trace_xfs_alloc_exact_error(args);
1320 return error;
1321 }
1322
1323 /*
1324 * Search a given number of btree records in a given direction. Check each
1325 * record against the good extent we've already found.
1326 */
1327 STATIC int
1328 xfs_alloc_walk_iter(
1329 struct xfs_alloc_arg *args,
1330 struct xfs_alloc_cur *acur,
1331 struct xfs_btree_cur *cur,
1332 bool increment,
1333 bool find_one, /* quit on first candidate */
1334 int count, /* rec count (-1 for infinite) */
1335 int *stat)
1336 {
1337 int error;
1338 int i;
1339
1340 *stat = 0;
1341
1342 /*
1343 * Search so long as the cursor is active or we find a better extent.
1344 * The cursor is deactivated if it extends beyond the range of the
1345 * current allocation candidate.
1346 */
1347 while (xfs_alloc_cur_active(cur) && count) {
1348 error = xfs_alloc_cur_check(args, acur, cur, &i);
1349 if (error)
1350 return error;
1351 if (i == 1) {
1352 *stat = 1;
1353 if (find_one)
1354 break;
1355 }
1356 if (!xfs_alloc_cur_active(cur))
1357 break;
1358
1359 if (increment)
1360 error = xfs_btree_increment(cur, 0, &i);
1361 else
1362 error = xfs_btree_decrement(cur, 0, &i);
1363 if (error)
1364 return error;
1365 if (i == 0)
1366 cur->bc_ag.abt.active = false;
1367
1368 if (count > 0)
1369 count--;
1370 }
1371
1372 return 0;
1373 }
1374
1375 /*
1376 * Search the by-bno and by-size btrees in parallel in search of an extent with
1377 * ideal locality based on the NEAR mode ->agbno locality hint.
1378 */
1379 STATIC int
1380 xfs_alloc_ag_vextent_locality(
1381 struct xfs_alloc_arg *args,
1382 struct xfs_alloc_cur *acur,
1383 int *stat)
1384 {
1385 struct xfs_btree_cur *fbcur = NULL;
1386 int error;
1387 int i;
1388 bool fbinc;
1389
1390 ASSERT(acur->len == 0);
1391 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
1392
1393 *stat = 0;
1394
1395 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1396 if (error)
1397 return error;
1398 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1399 if (error)
1400 return error;
1401 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1402 if (error)
1403 return error;
1404
1405 /*
1406 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1407 * right and lookup the closest extent to the locality hint for each
1408 * extent size key in the cntbt. The entire search terminates
1409 * immediately on a bnobt hit because that means we've found best case
1410 * locality. Otherwise the search continues until the cntbt cursor runs
1411 * off the end of the tree. If no allocation candidate is found at this
1412 * point, give up on locality, walk backwards from the end of the cntbt
1413 * and take the first available extent.
1414 *
1415 * The parallel tree searches balance each other out to provide fairly
1416 * consistent performance for various situations. The bnobt search can
1417 * have pathological behavior in the worst case scenario of larger
1418 * allocation requests and fragmented free space. On the other hand, the
1419 * bnobt is able to satisfy most smaller allocation requests much more
1420 * quickly than the cntbt. The cntbt search can sift through fragmented
1421 * free space and sets of free extents for larger allocation requests
1422 * more quickly than the bnobt. Since the locality hint is just a hint
1423 * and we don't want to scan the entire bnobt for perfect locality, the
1424 * cntbt search essentially bounds the bnobt search such that we can
1425 * find good enough locality at reasonable performance in most cases.
1426 */
1427 while (xfs_alloc_cur_active(acur->bnolt) ||
1428 xfs_alloc_cur_active(acur->bnogt) ||
1429 xfs_alloc_cur_active(acur->cnt)) {
1430
1431 trace_xfs_alloc_cur_lookup(args);
1432
1433 /*
1434 * Search the bnobt left and right. In the case of a hit, finish
1435 * the search in the opposite direction and we're done.
1436 */
1437 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1438 true, 1, &i);
1439 if (error)
1440 return error;
1441 if (i == 1) {
1442 trace_xfs_alloc_cur_left(args);
1443 fbcur = acur->bnogt;
1444 fbinc = true;
1445 break;
1446 }
1447 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1448 1, &i);
1449 if (error)
1450 return error;
1451 if (i == 1) {
1452 trace_xfs_alloc_cur_right(args);
1453 fbcur = acur->bnolt;
1454 fbinc = false;
1455 break;
1456 }
1457
1458 /*
1459 * Check the extent with best locality based on the current
1460 * extent size search key and keep track of the best candidate.
1461 */
1462 error = xfs_alloc_cntbt_iter(args, acur);
1463 if (error)
1464 return error;
1465 if (!xfs_alloc_cur_active(acur->cnt)) {
1466 trace_xfs_alloc_cur_lookup_done(args);
1467 break;
1468 }
1469 }
1470
1471 /*
1472 * If we failed to find anything due to busy extents, return empty
1473 * handed so the caller can flush and retry. If no busy extents were
1474 * found, walk backwards from the end of the cntbt as a last resort.
1475 */
1476 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1477 error = xfs_btree_decrement(acur->cnt, 0, &i);
1478 if (error)
1479 return error;
1480 if (i) {
1481 acur->cnt->bc_ag.abt.active = true;
1482 fbcur = acur->cnt;
1483 fbinc = false;
1484 }
1485 }
1486
1487 /*
1488 * Search in the opposite direction for a better entry in the case of
1489 * a bnobt hit or walk backwards from the end of the cntbt.
1490 */
1491 if (fbcur) {
1492 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1493 &i);
1494 if (error)
1495 return error;
1496 }
1497
1498 if (acur->len)
1499 *stat = 1;
1500
1501 return 0;
1502 }
1503
1504 /* Check the last block of the cnt btree for allocations. */
1505 static int
1506 xfs_alloc_ag_vextent_lastblock(
1507 struct xfs_alloc_arg *args,
1508 struct xfs_alloc_cur *acur,
1509 xfs_agblock_t *bno,
1510 xfs_extlen_t *len,
1511 bool *allocated)
1512 {
1513 int error;
1514 int i;
1515
1516 #ifdef DEBUG
1517 /* Randomly don't execute the first algorithm. */
1518 if (prandom_u32() & 1)
1519 return 0;
1520 #endif
1521
1522 /*
1523 * Start from the entry that lookup found, sequence through all larger
1524 * free blocks. If we're actually pointing at a record smaller than
1525 * maxlen, go to the start of this block, and skip all those smaller
1526 * than minlen.
1527 */
1528 if (*len || args->alignment > 1) {
1529 acur->cnt->bc_levels[0].ptr = 1;
1530 do {
1531 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1532 if (error)
1533 return error;
1534 if (XFS_IS_CORRUPT(args->mp, i != 1))
1535 return -EFSCORRUPTED;
1536 if (*len >= args->minlen)
1537 break;
1538 error = xfs_btree_increment(acur->cnt, 0, &i);
1539 if (error)
1540 return error;
1541 } while (i);
1542 ASSERT(*len >= args->minlen);
1543 if (!i)
1544 return 0;
1545 }
1546
1547 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1548 if (error)
1549 return error;
1550
1551 /*
1552 * It didn't work. We COULD be in a case where there's a good record
1553 * somewhere, so try again.
1554 */
1555 if (acur->len == 0)
1556 return 0;
1557
1558 trace_xfs_alloc_near_first(args);
1559 *allocated = true;
1560 return 0;
1561 }
1562
1563 /*
1564 * Allocate a variable extent near bno in the allocation group agno.
1565 * Extent's length (returned in len) will be between minlen and maxlen,
1566 * and of the form k * prod + mod unless there's nothing that large.
1567 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1568 */
1569 STATIC int
1570 xfs_alloc_ag_vextent_near(
1571 struct xfs_alloc_arg *args)
1572 {
1573 struct xfs_alloc_cur acur = {};
1574 int error; /* error code */
1575 int i; /* result code, temporary */
1576 xfs_agblock_t bno;
1577 xfs_extlen_t len;
1578
1579 /* handle uninitialized agbno range so caller doesn't have to */
1580 if (!args->min_agbno && !args->max_agbno)
1581 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1582 ASSERT(args->min_agbno <= args->max_agbno);
1583
1584 /* clamp agbno to the range if it's outside */
1585 if (args->agbno < args->min_agbno)
1586 args->agbno = args->min_agbno;
1587 if (args->agbno > args->max_agbno)
1588 args->agbno = args->max_agbno;
1589
1590 restart:
1591 len = 0;
1592
1593 /*
1594 * Set up cursors and see if there are any free extents as big as
1595 * maxlen. If not, pick the last entry in the tree unless the tree is
1596 * empty.
1597 */
1598 error = xfs_alloc_cur_setup(args, &acur);
1599 if (error == -ENOSPC) {
1600 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1601 &len, &i);
1602 if (error)
1603 goto out;
1604 if (i == 0 || len == 0) {
1605 trace_xfs_alloc_near_noentry(args);
1606 goto out;
1607 }
1608 ASSERT(i == 1);
1609 } else if (error) {
1610 goto out;
1611 }
1612
1613 /*
1614 * First algorithm.
1615 * If the requested extent is large wrt the freespaces available
1616 * in this a.g., then the cursor will be pointing to a btree entry
1617 * near the right edge of the tree. If it's in the last btree leaf
1618 * block, then we just examine all the entries in that block
1619 * that are big enough, and pick the best one.
1620 */
1621 if (xfs_btree_islastblock(acur.cnt, 0)) {
1622 bool allocated = false;
1623
1624 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1625 &allocated);
1626 if (error)
1627 goto out;
1628 if (allocated)
1629 goto alloc_finish;
1630 }
1631
1632 /*
1633 * Second algorithm. Combined cntbt and bnobt search to find ideal
1634 * locality.
1635 */
1636 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1637 if (error)
1638 goto out;
1639
1640 /*
1641 * If we couldn't get anything, give up.
1642 */
1643 if (!acur.len) {
1644 if (acur.busy) {
1645 trace_xfs_alloc_near_busy(args);
1646 xfs_extent_busy_flush(args->mp, args->pag,
1647 acur.busy_gen);
1648 goto restart;
1649 }
1650 trace_xfs_alloc_size_neither(args);
1651 args->agbno = NULLAGBLOCK;
1652 goto out;
1653 }
1654
1655 alloc_finish:
1656 /* fix up btrees on a successful allocation */
1657 error = xfs_alloc_cur_finish(args, &acur);
1658
1659 out:
1660 xfs_alloc_cur_close(&acur, error);
1661 return error;
1662 }
1663
1664 /*
1665 * Allocate a variable extent anywhere in the allocation group agno.
1666 * Extent's length (returned in len) will be between minlen and maxlen,
1667 * and of the form k * prod + mod unless there's nothing that large.
1668 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1669 */
1670 STATIC int /* error */
1671 xfs_alloc_ag_vextent_size(
1672 xfs_alloc_arg_t *args) /* allocation argument structure */
1673 {
1674 struct xfs_agf *agf = args->agbp->b_addr;
1675 struct xfs_btree_cur *bno_cur; /* cursor for bno btree */
1676 struct xfs_btree_cur *cnt_cur; /* cursor for cnt btree */
1677 int error; /* error result */
1678 xfs_agblock_t fbno; /* start of found freespace */
1679 xfs_extlen_t flen; /* length of found freespace */
1680 int i; /* temp status variable */
1681 xfs_agblock_t rbno; /* returned block number */
1682 xfs_extlen_t rlen; /* length of returned extent */
1683 bool busy;
1684 unsigned busy_gen;
1685
1686 restart:
1687 /*
1688 * Allocate and initialize a cursor for the by-size btree.
1689 */
1690 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1691 args->pag, XFS_BTNUM_CNT);
1692 bno_cur = NULL;
1693
1694 /*
1695 * Look for an entry >= maxlen+alignment-1 blocks.
1696 */
1697 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1698 args->maxlen + args->alignment - 1, &i)))
1699 goto error0;
1700
1701 /*
1702 * If none then we have to settle for a smaller extent. In the case that
1703 * there are no large extents, this will return the last entry in the
1704 * tree unless the tree is empty. In the case that there are only busy
1705 * large extents, this will return the largest small extent unless there
1706 * are no smaller extents available.
1707 */
1708 if (!i) {
1709 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1710 &fbno, &flen, &i);
1711 if (error)
1712 goto error0;
1713 if (i == 0 || flen == 0) {
1714 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1715 trace_xfs_alloc_size_noentry(args);
1716 return 0;
1717 }
1718 ASSERT(i == 1);
1719 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1720 &rlen, &busy_gen);
1721 } else {
1722 /*
1723 * Search for a non-busy extent that is large enough.
1724 */
1725 for (;;) {
1726 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1727 if (error)
1728 goto error0;
1729 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1730 error = -EFSCORRUPTED;
1731 goto error0;
1732 }
1733
1734 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1735 &rbno, &rlen, &busy_gen);
1736
1737 if (rlen >= args->maxlen)
1738 break;
1739
1740 error = xfs_btree_increment(cnt_cur, 0, &i);
1741 if (error)
1742 goto error0;
1743 if (i == 0) {
1744 /*
1745 * Our only valid extents must have been busy.
1746 * Make it unbusy by forcing the log out and
1747 * retrying.
1748 */
1749 xfs_btree_del_cursor(cnt_cur,
1750 XFS_BTREE_NOERROR);
1751 trace_xfs_alloc_size_busy(args);
1752 xfs_extent_busy_flush(args->mp,
1753 args->pag, busy_gen);
1754 goto restart;
1755 }
1756 }
1757 }
1758
1759 /*
1760 * In the first case above, we got the last entry in the
1761 * by-size btree. Now we check to see if the space hits maxlen
1762 * once aligned; if not, we search left for something better.
1763 * This can't happen in the second case above.
1764 */
1765 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1766 if (XFS_IS_CORRUPT(args->mp,
1767 rlen != 0 &&
1768 (rlen > flen ||
1769 rbno + rlen > fbno + flen))) {
1770 error = -EFSCORRUPTED;
1771 goto error0;
1772 }
1773 if (rlen < args->maxlen) {
1774 xfs_agblock_t bestfbno;
1775 xfs_extlen_t bestflen;
1776 xfs_agblock_t bestrbno;
1777 xfs_extlen_t bestrlen;
1778
1779 bestrlen = rlen;
1780 bestrbno = rbno;
1781 bestflen = flen;
1782 bestfbno = fbno;
1783 for (;;) {
1784 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1785 goto error0;
1786 if (i == 0)
1787 break;
1788 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1789 &i)))
1790 goto error0;
1791 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1792 error = -EFSCORRUPTED;
1793 goto error0;
1794 }
1795 if (flen < bestrlen)
1796 break;
1797 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1798 &rbno, &rlen, &busy_gen);
1799 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1800 if (XFS_IS_CORRUPT(args->mp,
1801 rlen != 0 &&
1802 (rlen > flen ||
1803 rbno + rlen > fbno + flen))) {
1804 error = -EFSCORRUPTED;
1805 goto error0;
1806 }
1807 if (rlen > bestrlen) {
1808 bestrlen = rlen;
1809 bestrbno = rbno;
1810 bestflen = flen;
1811 bestfbno = fbno;
1812 if (rlen == args->maxlen)
1813 break;
1814 }
1815 }
1816 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1817 &i)))
1818 goto error0;
1819 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1820 error = -EFSCORRUPTED;
1821 goto error0;
1822 }
1823 rlen = bestrlen;
1824 rbno = bestrbno;
1825 flen = bestflen;
1826 fbno = bestfbno;
1827 }
1828 args->wasfromfl = 0;
1829 /*
1830 * Fix up the length.
1831 */
1832 args->len = rlen;
1833 if (rlen < args->minlen) {
1834 if (busy) {
1835 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1836 trace_xfs_alloc_size_busy(args);
1837 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1838 goto restart;
1839 }
1840 goto out_nominleft;
1841 }
1842 xfs_alloc_fix_len(args);
1843
1844 rlen = args->len;
1845 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1846 error = -EFSCORRUPTED;
1847 goto error0;
1848 }
1849 /*
1850 * Allocate and initialize a cursor for the by-block tree.
1851 */
1852 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1853 args->pag, XFS_BTNUM_BNO);
1854 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1855 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1856 goto error0;
1857 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1858 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1859 cnt_cur = bno_cur = NULL;
1860 args->len = rlen;
1861 args->agbno = rbno;
1862 if (XFS_IS_CORRUPT(args->mp,
1863 args->agbno + args->len >
1864 be32_to_cpu(agf->agf_length))) {
1865 error = -EFSCORRUPTED;
1866 goto error0;
1867 }
1868 trace_xfs_alloc_size_done(args);
1869 return 0;
1870
1871 error0:
1872 trace_xfs_alloc_size_error(args);
1873 if (cnt_cur)
1874 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1875 if (bno_cur)
1876 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1877 return error;
1878
1879 out_nominleft:
1880 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1881 trace_xfs_alloc_size_nominleft(args);
1882 args->agbno = NULLAGBLOCK;
1883 return 0;
1884 }
1885
1886 /*
1887 * Free the extent starting at agno/bno for length.
1888 */
1889 STATIC int
1890 xfs_free_ag_extent(
1891 struct xfs_trans *tp,
1892 struct xfs_buf *agbp,
1893 xfs_agnumber_t agno,
1894 xfs_agblock_t bno,
1895 xfs_extlen_t len,
1896 const struct xfs_owner_info *oinfo,
1897 enum xfs_ag_resv_type type)
1898 {
1899 struct xfs_mount *mp;
1900 struct xfs_btree_cur *bno_cur;
1901 struct xfs_btree_cur *cnt_cur;
1902 xfs_agblock_t gtbno; /* start of right neighbor */
1903 xfs_extlen_t gtlen; /* length of right neighbor */
1904 xfs_agblock_t ltbno; /* start of left neighbor */
1905 xfs_extlen_t ltlen; /* length of left neighbor */
1906 xfs_agblock_t nbno; /* new starting block of freesp */
1907 xfs_extlen_t nlen; /* new length of freespace */
1908 int haveleft; /* have a left neighbor */
1909 int haveright; /* have a right neighbor */
1910 int i;
1911 int error;
1912 struct xfs_perag *pag = agbp->b_pag;
1913
1914 bno_cur = cnt_cur = NULL;
1915 mp = tp->t_mountp;
1916
1917 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1918 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1919 if (error)
1920 goto error0;
1921 }
1922
1923 /*
1924 * Allocate and initialize a cursor for the by-block btree.
1925 */
1926 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1927 /*
1928 * Look for a neighboring block on the left (lower block numbers)
1929 * that is contiguous with this space.
1930 */
1931 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1932 goto error0;
1933 if (haveleft) {
1934 /*
1935 * There is a block to our left.
1936 */
1937 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1938 goto error0;
1939 if (XFS_IS_CORRUPT(mp, i != 1)) {
1940 error = -EFSCORRUPTED;
1941 goto error0;
1942 }
1943 /*
1944 * It's not contiguous, though.
1945 */
1946 if (ltbno + ltlen < bno)
1947 haveleft = 0;
1948 else {
1949 /*
1950 * If this failure happens the request to free this
1951 * space was invalid, it's (partly) already free.
1952 * Very bad.
1953 */
1954 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1955 error = -EFSCORRUPTED;
1956 goto error0;
1957 }
1958 }
1959 }
1960 /*
1961 * Look for a neighboring block on the right (higher block numbers)
1962 * that is contiguous with this space.
1963 */
1964 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1965 goto error0;
1966 if (haveright) {
1967 /*
1968 * There is a block to our right.
1969 */
1970 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1971 goto error0;
1972 if (XFS_IS_CORRUPT(mp, i != 1)) {
1973 error = -EFSCORRUPTED;
1974 goto error0;
1975 }
1976 /*
1977 * It's not contiguous, though.
1978 */
1979 if (bno + len < gtbno)
1980 haveright = 0;
1981 else {
1982 /*
1983 * If this failure happens the request to free this
1984 * space was invalid, it's (partly) already free.
1985 * Very bad.
1986 */
1987 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1988 error = -EFSCORRUPTED;
1989 goto error0;
1990 }
1991 }
1992 }
1993 /*
1994 * Now allocate and initialize a cursor for the by-size tree.
1995 */
1996 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
1997 /*
1998 * Have both left and right contiguous neighbors.
1999 * Merge all three into a single free block.
2000 */
2001 if (haveleft && haveright) {
2002 /*
2003 * Delete the old by-size entry on the left.
2004 */
2005 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2006 goto error0;
2007 if (XFS_IS_CORRUPT(mp, i != 1)) {
2008 error = -EFSCORRUPTED;
2009 goto error0;
2010 }
2011 if ((error = xfs_btree_delete(cnt_cur, &i)))
2012 goto error0;
2013 if (XFS_IS_CORRUPT(mp, i != 1)) {
2014 error = -EFSCORRUPTED;
2015 goto error0;
2016 }
2017 /*
2018 * Delete the old by-size entry on the right.
2019 */
2020 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2021 goto error0;
2022 if (XFS_IS_CORRUPT(mp, i != 1)) {
2023 error = -EFSCORRUPTED;
2024 goto error0;
2025 }
2026 if ((error = xfs_btree_delete(cnt_cur, &i)))
2027 goto error0;
2028 if (XFS_IS_CORRUPT(mp, i != 1)) {
2029 error = -EFSCORRUPTED;
2030 goto error0;
2031 }
2032 /*
2033 * Delete the old by-block entry for the right block.
2034 */
2035 if ((error = xfs_btree_delete(bno_cur, &i)))
2036 goto error0;
2037 if (XFS_IS_CORRUPT(mp, i != 1)) {
2038 error = -EFSCORRUPTED;
2039 goto error0;
2040 }
2041 /*
2042 * Move the by-block cursor back to the left neighbor.
2043 */
2044 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2045 goto error0;
2046 if (XFS_IS_CORRUPT(mp, i != 1)) {
2047 error = -EFSCORRUPTED;
2048 goto error0;
2049 }
2050 #ifdef DEBUG
2051 /*
2052 * Check that this is the right record: delete didn't
2053 * mangle the cursor.
2054 */
2055 {
2056 xfs_agblock_t xxbno;
2057 xfs_extlen_t xxlen;
2058
2059 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2060 &i)))
2061 goto error0;
2062 if (XFS_IS_CORRUPT(mp,
2063 i != 1 ||
2064 xxbno != ltbno ||
2065 xxlen != ltlen)) {
2066 error = -EFSCORRUPTED;
2067 goto error0;
2068 }
2069 }
2070 #endif
2071 /*
2072 * Update remaining by-block entry to the new, joined block.
2073 */
2074 nbno = ltbno;
2075 nlen = len + ltlen + gtlen;
2076 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2077 goto error0;
2078 }
2079 /*
2080 * Have only a left contiguous neighbor.
2081 * Merge it together with the new freespace.
2082 */
2083 else if (haveleft) {
2084 /*
2085 * Delete the old by-size entry on the left.
2086 */
2087 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2088 goto error0;
2089 if (XFS_IS_CORRUPT(mp, i != 1)) {
2090 error = -EFSCORRUPTED;
2091 goto error0;
2092 }
2093 if ((error = xfs_btree_delete(cnt_cur, &i)))
2094 goto error0;
2095 if (XFS_IS_CORRUPT(mp, i != 1)) {
2096 error = -EFSCORRUPTED;
2097 goto error0;
2098 }
2099 /*
2100 * Back up the by-block cursor to the left neighbor, and
2101 * update its length.
2102 */
2103 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2104 goto error0;
2105 if (XFS_IS_CORRUPT(mp, i != 1)) {
2106 error = -EFSCORRUPTED;
2107 goto error0;
2108 }
2109 nbno = ltbno;
2110 nlen = len + ltlen;
2111 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2112 goto error0;
2113 }
2114 /*
2115 * Have only a right contiguous neighbor.
2116 * Merge it together with the new freespace.
2117 */
2118 else if (haveright) {
2119 /*
2120 * Delete the old by-size entry on the right.
2121 */
2122 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2123 goto error0;
2124 if (XFS_IS_CORRUPT(mp, i != 1)) {
2125 error = -EFSCORRUPTED;
2126 goto error0;
2127 }
2128 if ((error = xfs_btree_delete(cnt_cur, &i)))
2129 goto error0;
2130 if (XFS_IS_CORRUPT(mp, i != 1)) {
2131 error = -EFSCORRUPTED;
2132 goto error0;
2133 }
2134 /*
2135 * Update the starting block and length of the right
2136 * neighbor in the by-block tree.
2137 */
2138 nbno = bno;
2139 nlen = len + gtlen;
2140 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2141 goto error0;
2142 }
2143 /*
2144 * No contiguous neighbors.
2145 * Insert the new freespace into the by-block tree.
2146 */
2147 else {
2148 nbno = bno;
2149 nlen = len;
2150 if ((error = xfs_btree_insert(bno_cur, &i)))
2151 goto error0;
2152 if (XFS_IS_CORRUPT(mp, i != 1)) {
2153 error = -EFSCORRUPTED;
2154 goto error0;
2155 }
2156 }
2157 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2158 bno_cur = NULL;
2159 /*
2160 * In all cases we need to insert the new freespace in the by-size tree.
2161 */
2162 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2163 goto error0;
2164 if (XFS_IS_CORRUPT(mp, i != 0)) {
2165 error = -EFSCORRUPTED;
2166 goto error0;
2167 }
2168 if ((error = xfs_btree_insert(cnt_cur, &i)))
2169 goto error0;
2170 if (XFS_IS_CORRUPT(mp, i != 1)) {
2171 error = -EFSCORRUPTED;
2172 goto error0;
2173 }
2174 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2175 cnt_cur = NULL;
2176
2177 /*
2178 * Update the freespace totals in the ag and superblock.
2179 */
2180 error = xfs_alloc_update_counters(tp, agbp, len);
2181 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2182 if (error)
2183 goto error0;
2184
2185 XFS_STATS_INC(mp, xs_freex);
2186 XFS_STATS_ADD(mp, xs_freeb, len);
2187
2188 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2189
2190 return 0;
2191
2192 error0:
2193 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2194 if (bno_cur)
2195 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2196 if (cnt_cur)
2197 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2198 return error;
2199 }
2200
2201 /*
2202 * Visible (exported) allocation/free functions.
2203 * Some of these are used just by xfs_alloc_btree.c and this file.
2204 */
2205
2206 /*
2207 * Compute and fill in value of m_alloc_maxlevels.
2208 */
2209 void
2210 xfs_alloc_compute_maxlevels(
2211 xfs_mount_t *mp) /* file system mount structure */
2212 {
2213 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2214 (mp->m_sb.sb_agblocks + 1) / 2);
2215 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2216 }
2217
2218 /*
2219 * Find the length of the longest extent in an AG. The 'need' parameter
2220 * specifies how much space we're going to need for the AGFL and the
2221 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2222 * other callers.
2223 */
2224 xfs_extlen_t
2225 xfs_alloc_longest_free_extent(
2226 struct xfs_perag *pag,
2227 xfs_extlen_t need,
2228 xfs_extlen_t reserved)
2229 {
2230 xfs_extlen_t delta = 0;
2231
2232 /*
2233 * If the AGFL needs a recharge, we'll have to subtract that from the
2234 * longest extent.
2235 */
2236 if (need > pag->pagf_flcount)
2237 delta = need - pag->pagf_flcount;
2238
2239 /*
2240 * If we cannot maintain others' reservations with space from the
2241 * not-longest freesp extents, we'll have to subtract /that/ from
2242 * the longest extent too.
2243 */
2244 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2245 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2246
2247 /*
2248 * If the longest extent is long enough to satisfy all the
2249 * reservations and AGFL rules in place, we can return this extent.
2250 */
2251 if (pag->pagf_longest > delta)
2252 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2253 pag->pagf_longest - delta);
2254
2255 /* Otherwise, let the caller try for 1 block if there's space. */
2256 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2257 }
2258
2259 /*
2260 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2261 * return the largest possible minimum length.
2262 */
2263 unsigned int
2264 xfs_alloc_min_freelist(
2265 struct xfs_mount *mp,
2266 struct xfs_perag *pag)
2267 {
2268 /* AG btrees have at least 1 level. */
2269 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2270 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2271 unsigned int min_free;
2272
2273 ASSERT(mp->m_alloc_maxlevels > 0);
2274
2275 /* space needed by-bno freespace btree */
2276 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2277 mp->m_alloc_maxlevels);
2278 /* space needed by-size freespace btree */
2279 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2280 mp->m_alloc_maxlevels);
2281 /* space needed reverse mapping used space btree */
2282 if (xfs_has_rmapbt(mp))
2283 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2284 mp->m_rmap_maxlevels);
2285
2286 return min_free;
2287 }
2288
2289 /*
2290 * Check if the operation we are fixing up the freelist for should go ahead or
2291 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2292 * is dependent on whether the size and shape of free space available will
2293 * permit the requested allocation to take place.
2294 */
2295 static bool
2296 xfs_alloc_space_available(
2297 struct xfs_alloc_arg *args,
2298 xfs_extlen_t min_free,
2299 int flags)
2300 {
2301 struct xfs_perag *pag = args->pag;
2302 xfs_extlen_t alloc_len, longest;
2303 xfs_extlen_t reservation; /* blocks that are still reserved */
2304 int available;
2305 xfs_extlen_t agflcount;
2306
2307 if (flags & XFS_ALLOC_FLAG_FREEING)
2308 return true;
2309
2310 reservation = xfs_ag_resv_needed(pag, args->resv);
2311
2312 /* do we have enough contiguous free space for the allocation? */
2313 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2314 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2315 if (longest < alloc_len)
2316 return false;
2317
2318 /*
2319 * Do we have enough free space remaining for the allocation? Don't
2320 * account extra agfl blocks because we are about to defer free them,
2321 * making them unavailable until the current transaction commits.
2322 */
2323 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2324 available = (int)(pag->pagf_freeblks + agflcount -
2325 reservation - min_free - args->minleft);
2326 if (available < (int)max(args->total, alloc_len))
2327 return false;
2328
2329 /*
2330 * Clamp maxlen to the amount of free space available for the actual
2331 * extent allocation.
2332 */
2333 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2334 args->maxlen = available;
2335 ASSERT(args->maxlen > 0);
2336 ASSERT(args->maxlen >= args->minlen);
2337 }
2338
2339 return true;
2340 }
2341
2342 int
2343 xfs_free_agfl_block(
2344 struct xfs_trans *tp,
2345 xfs_agnumber_t agno,
2346 xfs_agblock_t agbno,
2347 struct xfs_buf *agbp,
2348 struct xfs_owner_info *oinfo)
2349 {
2350 int error;
2351 struct xfs_buf *bp;
2352
2353 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2354 XFS_AG_RESV_AGFL);
2355 if (error)
2356 return error;
2357
2358 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2359 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2360 tp->t_mountp->m_bsize, 0, &bp);
2361 if (error)
2362 return error;
2363 xfs_trans_binval(tp, bp);
2364
2365 return 0;
2366 }
2367
2368 /*
2369 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2370 * is to detect an agfl header padding mismatch between current and early v5
2371 * kernels. This problem manifests as a 1-slot size difference between the
2372 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2373 * may also catch variants of agfl count corruption unrelated to padding. Either
2374 * way, we'll reset the agfl and warn the user.
2375 *
2376 * Return true if a reset is required before the agfl can be used, false
2377 * otherwise.
2378 */
2379 static bool
2380 xfs_agfl_needs_reset(
2381 struct xfs_mount *mp,
2382 struct xfs_agf *agf)
2383 {
2384 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2385 uint32_t l = be32_to_cpu(agf->agf_fllast);
2386 uint32_t c = be32_to_cpu(agf->agf_flcount);
2387 int agfl_size = xfs_agfl_size(mp);
2388 int active;
2389
2390 /* no agfl header on v4 supers */
2391 if (!xfs_has_crc(mp))
2392 return false;
2393
2394 /*
2395 * The agf read verifier catches severe corruption of these fields.
2396 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2397 * the verifier allows it.
2398 */
2399 if (f >= agfl_size || l >= agfl_size)
2400 return true;
2401 if (c > agfl_size)
2402 return true;
2403
2404 /*
2405 * Check consistency between the on-disk count and the active range. An
2406 * agfl padding mismatch manifests as an inconsistent flcount.
2407 */
2408 if (c && l >= f)
2409 active = l - f + 1;
2410 else if (c)
2411 active = agfl_size - f + l + 1;
2412 else
2413 active = 0;
2414
2415 return active != c;
2416 }
2417
2418 /*
2419 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2420 * agfl content cannot be trusted. Warn the user that a repair is required to
2421 * recover leaked blocks.
2422 *
2423 * The purpose of this mechanism is to handle filesystems affected by the agfl
2424 * header padding mismatch problem. A reset keeps the filesystem online with a
2425 * relatively minor free space accounting inconsistency rather than suffer the
2426 * inevitable crash from use of an invalid agfl block.
2427 */
2428 static void
2429 xfs_agfl_reset(
2430 struct xfs_trans *tp,
2431 struct xfs_buf *agbp,
2432 struct xfs_perag *pag)
2433 {
2434 struct xfs_mount *mp = tp->t_mountp;
2435 struct xfs_agf *agf = agbp->b_addr;
2436
2437 ASSERT(pag->pagf_agflreset);
2438 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2439
2440 xfs_warn(mp,
2441 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2442 "Please unmount and run xfs_repair.",
2443 pag->pag_agno, pag->pagf_flcount);
2444
2445 agf->agf_flfirst = 0;
2446 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2447 agf->agf_flcount = 0;
2448 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2449 XFS_AGF_FLCOUNT);
2450
2451 pag->pagf_flcount = 0;
2452 pag->pagf_agflreset = false;
2453 }
2454
2455 /*
2456 * Defer an AGFL block free. This is effectively equivalent to
2457 * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2458 *
2459 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2460 * allocation operations in a transaction. AGFL frees are prone to this problem
2461 * because for one they are always freed one at a time. Further, an immediate
2462 * AGFL block free can cause a btree join and require another block free before
2463 * the real allocation can proceed. Deferring the free disconnects freeing up
2464 * the AGFL slot from freeing the block.
2465 */
2466 STATIC void
2467 xfs_defer_agfl_block(
2468 struct xfs_trans *tp,
2469 xfs_agnumber_t agno,
2470 xfs_fsblock_t agbno,
2471 struct xfs_owner_info *oinfo)
2472 {
2473 struct xfs_mount *mp = tp->t_mountp;
2474 struct xfs_extent_free_item *new; /* new element */
2475
2476 ASSERT(xfs_extfree_item_cache != NULL);
2477 ASSERT(oinfo != NULL);
2478
2479 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2480 GFP_KERNEL | __GFP_NOFAIL);
2481 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2482 new->xefi_blockcount = 1;
2483 new->xefi_owner = oinfo->oi_owner;
2484
2485 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2486
2487 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2488 }
2489
2490 /*
2491 * Add the extent to the list of extents to be free at transaction end.
2492 * The list is maintained sorted (by block number).
2493 */
2494 void
2495 __xfs_free_extent_later(
2496 struct xfs_trans *tp,
2497 xfs_fsblock_t bno,
2498 xfs_filblks_t len,
2499 const struct xfs_owner_info *oinfo,
2500 bool skip_discard)
2501 {
2502 struct xfs_extent_free_item *new; /* new element */
2503 #ifdef DEBUG
2504 struct xfs_mount *mp = tp->t_mountp;
2505 xfs_agnumber_t agno;
2506 xfs_agblock_t agbno;
2507
2508 ASSERT(bno != NULLFSBLOCK);
2509 ASSERT(len > 0);
2510 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2511 ASSERT(!isnullstartblock(bno));
2512 agno = XFS_FSB_TO_AGNO(mp, bno);
2513 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2514 ASSERT(agno < mp->m_sb.sb_agcount);
2515 ASSERT(agbno < mp->m_sb.sb_agblocks);
2516 ASSERT(len < mp->m_sb.sb_agblocks);
2517 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2518 #endif
2519 ASSERT(xfs_extfree_item_cache != NULL);
2520
2521 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2522 GFP_KERNEL | __GFP_NOFAIL);
2523 new->xefi_startblock = bno;
2524 new->xefi_blockcount = (xfs_extlen_t)len;
2525 if (skip_discard)
2526 new->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2527 if (oinfo) {
2528 ASSERT(oinfo->oi_offset == 0);
2529
2530 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2531 new->xefi_flags |= XFS_EFI_ATTR_FORK;
2532 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2533 new->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2534 new->xefi_owner = oinfo->oi_owner;
2535 } else {
2536 new->xefi_owner = XFS_RMAP_OWN_NULL;
2537 }
2538 trace_xfs_bmap_free_defer(tp->t_mountp,
2539 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2540 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2541 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
2542 }
2543
2544 #ifdef DEBUG
2545 /*
2546 * Check if an AGF has a free extent record whose length is equal to
2547 * args->minlen.
2548 */
2549 STATIC int
2550 xfs_exact_minlen_extent_available(
2551 struct xfs_alloc_arg *args,
2552 struct xfs_buf *agbp,
2553 int *stat)
2554 {
2555 struct xfs_btree_cur *cnt_cur;
2556 xfs_agblock_t fbno;
2557 xfs_extlen_t flen;
2558 int error = 0;
2559
2560 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2561 args->pag, XFS_BTNUM_CNT);
2562 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2563 if (error)
2564 goto out;
2565
2566 if (*stat == 0) {
2567 error = -EFSCORRUPTED;
2568 goto out;
2569 }
2570
2571 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2572 if (error)
2573 goto out;
2574
2575 if (*stat == 1 && flen != args->minlen)
2576 *stat = 0;
2577
2578 out:
2579 xfs_btree_del_cursor(cnt_cur, error);
2580
2581 return error;
2582 }
2583 #endif
2584
2585 /*
2586 * Decide whether to use this allocation group for this allocation.
2587 * If so, fix up the btree freelist's size.
2588 */
2589 int /* error */
2590 xfs_alloc_fix_freelist(
2591 struct xfs_alloc_arg *args, /* allocation argument structure */
2592 int flags) /* XFS_ALLOC_FLAG_... */
2593 {
2594 struct xfs_mount *mp = args->mp;
2595 struct xfs_perag *pag = args->pag;
2596 struct xfs_trans *tp = args->tp;
2597 struct xfs_buf *agbp = NULL;
2598 struct xfs_buf *agflbp = NULL;
2599 struct xfs_alloc_arg targs; /* local allocation arguments */
2600 xfs_agblock_t bno; /* freelist block */
2601 xfs_extlen_t need; /* total blocks needed in freelist */
2602 int error = 0;
2603
2604 /* deferred ops (AGFL block frees) require permanent transactions */
2605 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2606
2607 if (!pag->pagf_init) {
2608 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2609 if (error) {
2610 /* Couldn't lock the AGF so skip this AG. */
2611 if (error == -EAGAIN)
2612 error = 0;
2613 goto out_no_agbp;
2614 }
2615 }
2616
2617 /*
2618 * If this is a metadata preferred pag and we are user data then try
2619 * somewhere else if we are not being asked to try harder at this
2620 * point
2621 */
2622 if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
2623 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2624 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2625 goto out_agbp_relse;
2626 }
2627
2628 need = xfs_alloc_min_freelist(mp, pag);
2629 if (!xfs_alloc_space_available(args, need, flags |
2630 XFS_ALLOC_FLAG_CHECK))
2631 goto out_agbp_relse;
2632
2633 /*
2634 * Get the a.g. freespace buffer.
2635 * Can fail if we're not blocking on locks, and it's held.
2636 */
2637 if (!agbp) {
2638 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2639 if (error) {
2640 /* Couldn't lock the AGF so skip this AG. */
2641 if (error == -EAGAIN)
2642 error = 0;
2643 goto out_no_agbp;
2644 }
2645 }
2646
2647 /* reset a padding mismatched agfl before final free space check */
2648 if (pag->pagf_agflreset)
2649 xfs_agfl_reset(tp, agbp, pag);
2650
2651 /* If there isn't enough total space or single-extent, reject it. */
2652 need = xfs_alloc_min_freelist(mp, pag);
2653 if (!xfs_alloc_space_available(args, need, flags))
2654 goto out_agbp_relse;
2655
2656 #ifdef DEBUG
2657 if (args->alloc_minlen_only) {
2658 int stat;
2659
2660 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2661 if (error || !stat)
2662 goto out_agbp_relse;
2663 }
2664 #endif
2665 /*
2666 * Make the freelist shorter if it's too long.
2667 *
2668 * Note that from this point onwards, we will always release the agf and
2669 * agfl buffers on error. This handles the case where we error out and
2670 * the buffers are clean or may not have been joined to the transaction
2671 * and hence need to be released manually. If they have been joined to
2672 * the transaction, then xfs_trans_brelse() will handle them
2673 * appropriately based on the recursion count and dirty state of the
2674 * buffer.
2675 *
2676 * XXX (dgc): When we have lots of free space, does this buy us
2677 * anything other than extra overhead when we need to put more blocks
2678 * back on the free list? Maybe we should only do this when space is
2679 * getting low or the AGFL is more than half full?
2680 *
2681 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2682 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2683 * updating the rmapbt. Both flags are used in xfs_repair while we're
2684 * rebuilding the rmapbt, and neither are used by the kernel. They're
2685 * both required to ensure that rmaps are correctly recorded for the
2686 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2687 * repair/rmap.c in xfsprogs for details.
2688 */
2689 memset(&targs, 0, sizeof(targs));
2690 /* struct copy below */
2691 if (flags & XFS_ALLOC_FLAG_NORMAP)
2692 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2693 else
2694 targs.oinfo = XFS_RMAP_OINFO_AG;
2695 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2696 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2697 if (error)
2698 goto out_agbp_relse;
2699
2700 /* defer agfl frees */
2701 xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2702 }
2703
2704 targs.tp = tp;
2705 targs.mp = mp;
2706 targs.agbp = agbp;
2707 targs.agno = args->agno;
2708 targs.alignment = targs.minlen = targs.prod = 1;
2709 targs.type = XFS_ALLOCTYPE_THIS_AG;
2710 targs.pag = pag;
2711 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2712 if (error)
2713 goto out_agbp_relse;
2714
2715 /* Make the freelist longer if it's too short. */
2716 while (pag->pagf_flcount < need) {
2717 targs.agbno = 0;
2718 targs.maxlen = need - pag->pagf_flcount;
2719 targs.resv = XFS_AG_RESV_AGFL;
2720
2721 /* Allocate as many blocks as possible at once. */
2722 error = xfs_alloc_ag_vextent(&targs);
2723 if (error)
2724 goto out_agflbp_relse;
2725
2726 /*
2727 * Stop if we run out. Won't happen if callers are obeying
2728 * the restrictions correctly. Can happen for free calls
2729 * on a completely full ag.
2730 */
2731 if (targs.agbno == NULLAGBLOCK) {
2732 if (flags & XFS_ALLOC_FLAG_FREEING)
2733 break;
2734 goto out_agflbp_relse;
2735 }
2736 /*
2737 * Put each allocated block on the list.
2738 */
2739 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2740 error = xfs_alloc_put_freelist(tp, agbp,
2741 agflbp, bno, 0);
2742 if (error)
2743 goto out_agflbp_relse;
2744 }
2745 }
2746 xfs_trans_brelse(tp, agflbp);
2747 args->agbp = agbp;
2748 return 0;
2749
2750 out_agflbp_relse:
2751 xfs_trans_brelse(tp, agflbp);
2752 out_agbp_relse:
2753 if (agbp)
2754 xfs_trans_brelse(tp, agbp);
2755 out_no_agbp:
2756 args->agbp = NULL;
2757 return error;
2758 }
2759
2760 /*
2761 * Get a block from the freelist.
2762 * Returns with the buffer for the block gotten.
2763 */
2764 int
2765 xfs_alloc_get_freelist(
2766 struct xfs_trans *tp,
2767 struct xfs_buf *agbp,
2768 xfs_agblock_t *bnop,
2769 int btreeblk)
2770 {
2771 struct xfs_agf *agf = agbp->b_addr;
2772 struct xfs_buf *agflbp;
2773 xfs_agblock_t bno;
2774 __be32 *agfl_bno;
2775 int error;
2776 int logflags;
2777 struct xfs_mount *mp = tp->t_mountp;
2778 struct xfs_perag *pag;
2779
2780 /*
2781 * Freelist is empty, give up.
2782 */
2783 if (!agf->agf_flcount) {
2784 *bnop = NULLAGBLOCK;
2785 return 0;
2786 }
2787 /*
2788 * Read the array of free blocks.
2789 */
2790 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2791 &agflbp);
2792 if (error)
2793 return error;
2794
2795
2796 /*
2797 * Get the block number and update the data structures.
2798 */
2799 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2800 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2801 be32_add_cpu(&agf->agf_flfirst, 1);
2802 xfs_trans_brelse(tp, agflbp);
2803 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2804 agf->agf_flfirst = 0;
2805
2806 pag = agbp->b_pag;
2807 ASSERT(!pag->pagf_agflreset);
2808 be32_add_cpu(&agf->agf_flcount, -1);
2809 pag->pagf_flcount--;
2810
2811 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2812 if (btreeblk) {
2813 be32_add_cpu(&agf->agf_btreeblks, 1);
2814 pag->pagf_btreeblks++;
2815 logflags |= XFS_AGF_BTREEBLKS;
2816 }
2817
2818 xfs_alloc_log_agf(tp, agbp, logflags);
2819 *bnop = bno;
2820
2821 return 0;
2822 }
2823
2824 /*
2825 * Log the given fields from the agf structure.
2826 */
2827 void
2828 xfs_alloc_log_agf(
2829 xfs_trans_t *tp, /* transaction pointer */
2830 struct xfs_buf *bp, /* buffer for a.g. freelist header */
2831 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2832 {
2833 int first; /* first byte offset */
2834 int last; /* last byte offset */
2835 static const short offsets[] = {
2836 offsetof(xfs_agf_t, agf_magicnum),
2837 offsetof(xfs_agf_t, agf_versionnum),
2838 offsetof(xfs_agf_t, agf_seqno),
2839 offsetof(xfs_agf_t, agf_length),
2840 offsetof(xfs_agf_t, agf_roots[0]),
2841 offsetof(xfs_agf_t, agf_levels[0]),
2842 offsetof(xfs_agf_t, agf_flfirst),
2843 offsetof(xfs_agf_t, agf_fllast),
2844 offsetof(xfs_agf_t, agf_flcount),
2845 offsetof(xfs_agf_t, agf_freeblks),
2846 offsetof(xfs_agf_t, agf_longest),
2847 offsetof(xfs_agf_t, agf_btreeblks),
2848 offsetof(xfs_agf_t, agf_uuid),
2849 offsetof(xfs_agf_t, agf_rmap_blocks),
2850 offsetof(xfs_agf_t, agf_refcount_blocks),
2851 offsetof(xfs_agf_t, agf_refcount_root),
2852 offsetof(xfs_agf_t, agf_refcount_level),
2853 /* needed so that we don't log the whole rest of the structure: */
2854 offsetof(xfs_agf_t, agf_spare64),
2855 sizeof(xfs_agf_t)
2856 };
2857
2858 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
2859
2860 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2861
2862 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2863 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2864 }
2865
2866 /*
2867 * Interface for inode allocation to force the pag data to be initialized.
2868 */
2869 int /* error */
2870 xfs_alloc_pagf_init(
2871 xfs_mount_t *mp, /* file system mount structure */
2872 xfs_trans_t *tp, /* transaction pointer */
2873 xfs_agnumber_t agno, /* allocation group number */
2874 int flags) /* XFS_ALLOC_FLAGS_... */
2875 {
2876 struct xfs_buf *bp;
2877 int error;
2878
2879 error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
2880 if (!error)
2881 xfs_trans_brelse(tp, bp);
2882 return error;
2883 }
2884
2885 /*
2886 * Put the block on the freelist for the allocation group.
2887 */
2888 int
2889 xfs_alloc_put_freelist(
2890 struct xfs_trans *tp,
2891 struct xfs_buf *agbp,
2892 struct xfs_buf *agflbp,
2893 xfs_agblock_t bno,
2894 int btreeblk)
2895 {
2896 struct xfs_mount *mp = tp->t_mountp;
2897 struct xfs_agf *agf = agbp->b_addr;
2898 struct xfs_perag *pag;
2899 __be32 *blockp;
2900 int error;
2901 int logflags;
2902 __be32 *agfl_bno;
2903 int startoff;
2904
2905 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2906 be32_to_cpu(agf->agf_seqno), &agflbp)))
2907 return error;
2908 be32_add_cpu(&agf->agf_fllast, 1);
2909 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2910 agf->agf_fllast = 0;
2911
2912 pag = agbp->b_pag;
2913 ASSERT(!pag->pagf_agflreset);
2914 be32_add_cpu(&agf->agf_flcount, 1);
2915 pag->pagf_flcount++;
2916
2917 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2918 if (btreeblk) {
2919 be32_add_cpu(&agf->agf_btreeblks, -1);
2920 pag->pagf_btreeblks--;
2921 logflags |= XFS_AGF_BTREEBLKS;
2922 }
2923
2924 xfs_alloc_log_agf(tp, agbp, logflags);
2925
2926 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2927
2928 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2929 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2930 *blockp = cpu_to_be32(bno);
2931 startoff = (char *)blockp - (char *)agflbp->b_addr;
2932
2933 xfs_alloc_log_agf(tp, agbp, logflags);
2934
2935 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2936 xfs_trans_log_buf(tp, agflbp, startoff,
2937 startoff + sizeof(xfs_agblock_t) - 1);
2938 return 0;
2939 }
2940
2941 static xfs_failaddr_t
2942 xfs_agf_verify(
2943 struct xfs_buf *bp)
2944 {
2945 struct xfs_mount *mp = bp->b_mount;
2946 struct xfs_agf *agf = bp->b_addr;
2947
2948 if (xfs_has_crc(mp)) {
2949 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2950 return __this_address;
2951 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
2952 return __this_address;
2953 }
2954
2955 if (!xfs_verify_magic(bp, agf->agf_magicnum))
2956 return __this_address;
2957
2958 if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2959 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2960 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2961 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2962 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2963 return __this_address;
2964
2965 if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
2966 return __this_address;
2967
2968 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
2969 be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
2970 return __this_address;
2971
2972 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2973 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2974 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
2975 mp->m_alloc_maxlevels ||
2976 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
2977 mp->m_alloc_maxlevels)
2978 return __this_address;
2979
2980 if (xfs_has_rmapbt(mp) &&
2981 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2982 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
2983 mp->m_rmap_maxlevels))
2984 return __this_address;
2985
2986 if (xfs_has_rmapbt(mp) &&
2987 be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
2988 return __this_address;
2989
2990 /*
2991 * during growfs operations, the perag is not fully initialised,
2992 * so we can't use it for any useful checking. growfs ensures we can't
2993 * use it by using uncached buffers that don't have the perag attached
2994 * so we can detect and avoid this problem.
2995 */
2996 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2997 return __this_address;
2998
2999 if (xfs_has_lazysbcount(mp) &&
3000 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
3001 return __this_address;
3002
3003 if (xfs_has_reflink(mp) &&
3004 be32_to_cpu(agf->agf_refcount_blocks) >
3005 be32_to_cpu(agf->agf_length))
3006 return __this_address;
3007
3008 if (xfs_has_reflink(mp) &&
3009 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3010 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels))
3011 return __this_address;
3012
3013 return NULL;
3014
3015 }
3016
3017 static void
3018 xfs_agf_read_verify(
3019 struct xfs_buf *bp)
3020 {
3021 struct xfs_mount *mp = bp->b_mount;
3022 xfs_failaddr_t fa;
3023
3024 if (xfs_has_crc(mp) &&
3025 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3026 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3027 else {
3028 fa = xfs_agf_verify(bp);
3029 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3030 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3031 }
3032 }
3033
3034 static void
3035 xfs_agf_write_verify(
3036 struct xfs_buf *bp)
3037 {
3038 struct xfs_mount *mp = bp->b_mount;
3039 struct xfs_buf_log_item *bip = bp->b_log_item;
3040 struct xfs_agf *agf = bp->b_addr;
3041 xfs_failaddr_t fa;
3042
3043 fa = xfs_agf_verify(bp);
3044 if (fa) {
3045 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3046 return;
3047 }
3048
3049 if (!xfs_has_crc(mp))
3050 return;
3051
3052 if (bip)
3053 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3054
3055 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3056 }
3057
3058 const struct xfs_buf_ops xfs_agf_buf_ops = {
3059 .name = "xfs_agf",
3060 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3061 .verify_read = xfs_agf_read_verify,
3062 .verify_write = xfs_agf_write_verify,
3063 .verify_struct = xfs_agf_verify,
3064 };
3065
3066 /*
3067 * Read in the allocation group header (free/alloc section).
3068 */
3069 int /* error */
3070 xfs_read_agf(
3071 struct xfs_mount *mp, /* mount point structure */
3072 struct xfs_trans *tp, /* transaction pointer */
3073 xfs_agnumber_t agno, /* allocation group number */
3074 int flags, /* XFS_BUF_ */
3075 struct xfs_buf **bpp) /* buffer for the ag freelist header */
3076 {
3077 int error;
3078
3079 trace_xfs_read_agf(mp, agno);
3080
3081 ASSERT(agno != NULLAGNUMBER);
3082 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3083 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
3084 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
3085 if (error)
3086 return error;
3087
3088 ASSERT(!(*bpp)->b_error);
3089 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
3090 return 0;
3091 }
3092
3093 /*
3094 * Read in the allocation group header (free/alloc section).
3095 */
3096 int /* error */
3097 xfs_alloc_read_agf(
3098 struct xfs_mount *mp, /* mount point structure */
3099 struct xfs_trans *tp, /* transaction pointer */
3100 xfs_agnumber_t agno, /* allocation group number */
3101 int flags, /* XFS_ALLOC_FLAG_... */
3102 struct xfs_buf **bpp) /* buffer for the ag freelist header */
3103 {
3104 struct xfs_agf *agf; /* ag freelist header */
3105 struct xfs_perag *pag; /* per allocation group data */
3106 int error;
3107 int allocbt_blks;
3108
3109 trace_xfs_alloc_read_agf(mp, agno);
3110
3111 /* We don't support trylock when freeing. */
3112 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3113 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3114 ASSERT(agno != NULLAGNUMBER);
3115 error = xfs_read_agf(mp, tp, agno,
3116 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3117 bpp);
3118 if (error)
3119 return error;
3120 ASSERT(!(*bpp)->b_error);
3121
3122 agf = (*bpp)->b_addr;
3123 pag = (*bpp)->b_pag;
3124 if (!pag->pagf_init) {
3125 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3126 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3127 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3128 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3129 pag->pagf_levels[XFS_BTNUM_BNOi] =
3130 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3131 pag->pagf_levels[XFS_BTNUM_CNTi] =
3132 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3133 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3134 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3135 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3136 pag->pagf_init = 1;
3137 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
3138
3139 /*
3140 * Update the in-core allocbt counter. Filter out the rmapbt
3141 * subset of the btreeblks counter because the rmapbt is managed
3142 * by perag reservation. Subtract one for the rmapbt root block
3143 * because the rmap counter includes it while the btreeblks
3144 * counter only tracks non-root blocks.
3145 */
3146 allocbt_blks = pag->pagf_btreeblks;
3147 if (xfs_has_rmapbt(mp))
3148 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3149 if (allocbt_blks > 0)
3150 atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
3151 }
3152 #ifdef DEBUG
3153 else if (!xfs_is_shutdown(mp)) {
3154 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3155 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3156 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3157 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3158 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3159 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3160 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3161 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3162 }
3163 #endif
3164 return 0;
3165 }
3166
3167 /*
3168 * Allocate an extent (variable-size).
3169 * Depending on the allocation type, we either look in a single allocation
3170 * group or loop over the allocation groups to find the result.
3171 */
3172 int /* error */
3173 xfs_alloc_vextent(
3174 struct xfs_alloc_arg *args) /* allocation argument structure */
3175 {
3176 xfs_agblock_t agsize; /* allocation group size */
3177 int error;
3178 int flags; /* XFS_ALLOC_FLAG_... locking flags */
3179 struct xfs_mount *mp; /* mount structure pointer */
3180 xfs_agnumber_t sagno; /* starting allocation group number */
3181 xfs_alloctype_t type; /* input allocation type */
3182 int bump_rotor = 0;
3183 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
3184
3185 mp = args->mp;
3186 type = args->otype = args->type;
3187 args->agbno = NULLAGBLOCK;
3188 /*
3189 * Just fix this up, for the case where the last a.g. is shorter
3190 * (or there's only one a.g.) and the caller couldn't easily figure
3191 * that out (xfs_bmap_alloc).
3192 */
3193 agsize = mp->m_sb.sb_agblocks;
3194 if (args->maxlen > agsize)
3195 args->maxlen = agsize;
3196 if (args->alignment == 0)
3197 args->alignment = 1;
3198 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
3199 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
3200 ASSERT(args->minlen <= args->maxlen);
3201 ASSERT(args->minlen <= agsize);
3202 ASSERT(args->mod < args->prod);
3203 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
3204 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
3205 args->minlen > args->maxlen || args->minlen > agsize ||
3206 args->mod >= args->prod) {
3207 args->fsbno = NULLFSBLOCK;
3208 trace_xfs_alloc_vextent_badargs(args);
3209 return 0;
3210 }
3211
3212 switch (type) {
3213 case XFS_ALLOCTYPE_THIS_AG:
3214 case XFS_ALLOCTYPE_NEAR_BNO:
3215 case XFS_ALLOCTYPE_THIS_BNO:
3216 /*
3217 * These three force us into a single a.g.
3218 */
3219 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3220 args->pag = xfs_perag_get(mp, args->agno);
3221 error = xfs_alloc_fix_freelist(args, 0);
3222 if (error) {
3223 trace_xfs_alloc_vextent_nofix(args);
3224 goto error0;
3225 }
3226 if (!args->agbp) {
3227 trace_xfs_alloc_vextent_noagbp(args);
3228 break;
3229 }
3230 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3231 if ((error = xfs_alloc_ag_vextent(args)))
3232 goto error0;
3233 break;
3234 case XFS_ALLOCTYPE_START_BNO:
3235 /*
3236 * Try near allocation first, then anywhere-in-ag after
3237 * the first a.g. fails.
3238 */
3239 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3240 xfs_is_inode32(mp)) {
3241 args->fsbno = XFS_AGB_TO_FSB(mp,
3242 ((mp->m_agfrotor / rotorstep) %
3243 mp->m_sb.sb_agcount), 0);
3244 bump_rotor = 1;
3245 }
3246 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3247 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3248 fallthrough;
3249 case XFS_ALLOCTYPE_FIRST_AG:
3250 /*
3251 * Rotate through the allocation groups looking for a winner.
3252 */
3253 if (type == XFS_ALLOCTYPE_FIRST_AG) {
3254 /*
3255 * Start with allocation group given by bno.
3256 */
3257 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3258 args->type = XFS_ALLOCTYPE_THIS_AG;
3259 sagno = 0;
3260 flags = 0;
3261 } else {
3262 /*
3263 * Start with the given allocation group.
3264 */
3265 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3266 flags = XFS_ALLOC_FLAG_TRYLOCK;
3267 }
3268 /*
3269 * Loop over allocation groups twice; first time with
3270 * trylock set, second time without.
3271 */
3272 for (;;) {
3273 args->pag = xfs_perag_get(mp, args->agno);
3274 error = xfs_alloc_fix_freelist(args, flags);
3275 if (error) {
3276 trace_xfs_alloc_vextent_nofix(args);
3277 goto error0;
3278 }
3279 /*
3280 * If we get a buffer back then the allocation will fly.
3281 */
3282 if (args->agbp) {
3283 if ((error = xfs_alloc_ag_vextent(args)))
3284 goto error0;
3285 break;
3286 }
3287
3288 trace_xfs_alloc_vextent_loopfailed(args);
3289
3290 /*
3291 * Didn't work, figure out the next iteration.
3292 */
3293 if (args->agno == sagno &&
3294 type == XFS_ALLOCTYPE_START_BNO)
3295 args->type = XFS_ALLOCTYPE_THIS_AG;
3296 /*
3297 * For the first allocation, we can try any AG to get
3298 * space. However, if we already have allocated a
3299 * block, we don't want to try AGs whose number is below
3300 * sagno. Otherwise, we may end up with out-of-order
3301 * locking of AGF, which might cause deadlock.
3302 */
3303 if (++(args->agno) == mp->m_sb.sb_agcount) {
3304 if (args->tp->t_firstblock != NULLFSBLOCK)
3305 args->agno = sagno;
3306 else
3307 args->agno = 0;
3308 }
3309 /*
3310 * Reached the starting a.g., must either be done
3311 * or switch to non-trylock mode.
3312 */
3313 if (args->agno == sagno) {
3314 if (flags == 0) {
3315 args->agbno = NULLAGBLOCK;
3316 trace_xfs_alloc_vextent_allfailed(args);
3317 break;
3318 }
3319
3320 flags = 0;
3321 if (type == XFS_ALLOCTYPE_START_BNO) {
3322 args->agbno = XFS_FSB_TO_AGBNO(mp,
3323 args->fsbno);
3324 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3325 }
3326 }
3327 xfs_perag_put(args->pag);
3328 }
3329 if (bump_rotor) {
3330 if (args->agno == sagno)
3331 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3332 (mp->m_sb.sb_agcount * rotorstep);
3333 else
3334 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3335 (mp->m_sb.sb_agcount * rotorstep);
3336 }
3337 break;
3338 default:
3339 ASSERT(0);
3340 /* NOTREACHED */
3341 }
3342 if (args->agbno == NULLAGBLOCK)
3343 args->fsbno = NULLFSBLOCK;
3344 else {
3345 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3346 #ifdef DEBUG
3347 ASSERT(args->len >= args->minlen);
3348 ASSERT(args->len <= args->maxlen);
3349 ASSERT(args->agbno % args->alignment == 0);
3350 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
3351 args->len);
3352 #endif
3353
3354 }
3355 xfs_perag_put(args->pag);
3356 return 0;
3357 error0:
3358 xfs_perag_put(args->pag);
3359 return error;
3360 }
3361
3362 /* Ensure that the freelist is at full capacity. */
3363 int
3364 xfs_free_extent_fix_freelist(
3365 struct xfs_trans *tp,
3366 struct xfs_perag *pag,
3367 struct xfs_buf **agbp)
3368 {
3369 struct xfs_alloc_arg args;
3370 int error;
3371
3372 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3373 args.tp = tp;
3374 args.mp = tp->t_mountp;
3375 args.agno = pag->pag_agno;
3376 args.pag = pag;
3377
3378 /*
3379 * validate that the block number is legal - the enables us to detect
3380 * and handle a silent filesystem corruption rather than crashing.
3381 */
3382 if (args.agno >= args.mp->m_sb.sb_agcount)
3383 return -EFSCORRUPTED;
3384
3385 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3386 if (error)
3387 return error;
3388
3389 *agbp = args.agbp;
3390 return 0;
3391 }
3392
3393 /*
3394 * Free an extent.
3395 * Just break up the extent address and hand off to xfs_free_ag_extent
3396 * after fixing up the freelist.
3397 */
3398 int
3399 __xfs_free_extent(
3400 struct xfs_trans *tp,
3401 xfs_fsblock_t bno,
3402 xfs_extlen_t len,
3403 const struct xfs_owner_info *oinfo,
3404 enum xfs_ag_resv_type type,
3405 bool skip_discard)
3406 {
3407 struct xfs_mount *mp = tp->t_mountp;
3408 struct xfs_buf *agbp;
3409 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3410 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3411 struct xfs_agf *agf;
3412 int error;
3413 unsigned int busy_flags = 0;
3414 struct xfs_perag *pag;
3415
3416 ASSERT(len != 0);
3417 ASSERT(type != XFS_AG_RESV_AGFL);
3418
3419 if (XFS_TEST_ERROR(false, mp,
3420 XFS_ERRTAG_FREE_EXTENT))
3421 return -EIO;
3422
3423 pag = xfs_perag_get(mp, agno);
3424 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3425 if (error)
3426 goto err;
3427 agf = agbp->b_addr;
3428
3429 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3430 error = -EFSCORRUPTED;
3431 goto err_release;
3432 }
3433
3434 /* validate the extent size is legal now we have the agf locked */
3435 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3436 error = -EFSCORRUPTED;
3437 goto err_release;
3438 }
3439
3440 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3441 if (error)
3442 goto err_release;
3443
3444 if (skip_discard)
3445 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3446 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3447 xfs_perag_put(pag);
3448 return 0;
3449
3450 err_release:
3451 xfs_trans_brelse(tp, agbp);
3452 err:
3453 xfs_perag_put(pag);
3454 return error;
3455 }
3456
3457 struct xfs_alloc_query_range_info {
3458 xfs_alloc_query_range_fn fn;
3459 void *priv;
3460 };
3461
3462 /* Format btree record and pass to our callback. */
3463 STATIC int
3464 xfs_alloc_query_range_helper(
3465 struct xfs_btree_cur *cur,
3466 const union xfs_btree_rec *rec,
3467 void *priv)
3468 {
3469 struct xfs_alloc_query_range_info *query = priv;
3470 struct xfs_alloc_rec_incore irec;
3471
3472 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3473 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3474 return query->fn(cur, &irec, query->priv);
3475 }
3476
3477 /* Find all free space within a given range of blocks. */
3478 int
3479 xfs_alloc_query_range(
3480 struct xfs_btree_cur *cur,
3481 const struct xfs_alloc_rec_incore *low_rec,
3482 const struct xfs_alloc_rec_incore *high_rec,
3483 xfs_alloc_query_range_fn fn,
3484 void *priv)
3485 {
3486 union xfs_btree_irec low_brec;
3487 union xfs_btree_irec high_brec;
3488 struct xfs_alloc_query_range_info query;
3489
3490 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3491 low_brec.a = *low_rec;
3492 high_brec.a = *high_rec;
3493 query.priv = priv;
3494 query.fn = fn;
3495 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3496 xfs_alloc_query_range_helper, &query);
3497 }
3498
3499 /* Find all free space records. */
3500 int
3501 xfs_alloc_query_all(
3502 struct xfs_btree_cur *cur,
3503 xfs_alloc_query_range_fn fn,
3504 void *priv)
3505 {
3506 struct xfs_alloc_query_range_info query;
3507
3508 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3509 query.priv = priv;
3510 query.fn = fn;
3511 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3512 }
3513
3514 /* Is there a record covering a given extent? */
3515 int
3516 xfs_alloc_has_record(
3517 struct xfs_btree_cur *cur,
3518 xfs_agblock_t bno,
3519 xfs_extlen_t len,
3520 bool *exists)
3521 {
3522 union xfs_btree_irec low;
3523 union xfs_btree_irec high;
3524
3525 memset(&low, 0, sizeof(low));
3526 low.a.ar_startblock = bno;
3527 memset(&high, 0xFF, sizeof(high));
3528 high.a.ar_startblock = bno + len - 1;
3529
3530 return xfs_btree_has_record(cur, &low, &high, exists);
3531 }
3532
3533 /*
3534 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3535 * error code or XFS_ITER_*.
3536 */
3537 int
3538 xfs_agfl_walk(
3539 struct xfs_mount *mp,
3540 struct xfs_agf *agf,
3541 struct xfs_buf *agflbp,
3542 xfs_agfl_walk_fn walk_fn,
3543 void *priv)
3544 {
3545 __be32 *agfl_bno;
3546 unsigned int i;
3547 int error;
3548
3549 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3550 i = be32_to_cpu(agf->agf_flfirst);
3551
3552 /* Nothing to walk in an empty AGFL. */
3553 if (agf->agf_flcount == cpu_to_be32(0))
3554 return 0;
3555
3556 /* Otherwise, walk from first to last, wrapping as needed. */
3557 for (;;) {
3558 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3559 if (error)
3560 return error;
3561 if (i == be32_to_cpu(agf->agf_fllast))
3562 break;
3563 if (++i == xfs_agfl_size(mp))
3564 i = 0;
3565 }
3566
3567 return 0;
3568 }
3569
3570 int __init
3571 xfs_extfree_intent_init_cache(void)
3572 {
3573 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
3574 sizeof(struct xfs_extent_free_item),
3575 0, 0, NULL);
3576
3577 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
3578 }
3579
3580 void
3581 xfs_extfree_intent_destroy_cache(void)
3582 {
3583 kmem_cache_destroy(xfs_extfree_item_cache);
3584 xfs_extfree_item_cache = NULL;
3585 }