]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc.c
2bbd75ec9f49bc09459be21fbf78508e18d8964c
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_rmap.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_errortag.h"
34 #include "xfs_cksum.h"
35 #include "xfs_trace.h"
36 #include "xfs_trans.h"
37 #include "xfs_ag_resv.h"
38
39 struct workqueue_struct *xfs_alloc_wq;
40
41 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
42
43 #define XFSA_FIXUP_BNO_OK 1
44 #define XFSA_FIXUP_CNT_OK 2
45
46 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
47 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
48 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
49 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
50 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
51
52 /*
53 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
54 * the beginning of the block for a proper header with the location information
55 * and CRC.
56 */
57 unsigned int
58 xfs_agfl_size(
59 struct xfs_mount *mp)
60 {
61 unsigned int size = mp->m_sb.sb_sectsize;
62
63 if (xfs_sb_version_hascrc(&mp->m_sb))
64 size -= sizeof(struct xfs_agfl);
65
66 return size / sizeof(xfs_agblock_t);
67 }
68
69 unsigned int
70 xfs_refc_block(
71 struct xfs_mount *mp)
72 {
73 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_sb_version_hasfinobt(&mp->m_sb))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78 }
79
80 xfs_extlen_t
81 xfs_prealloc_blocks(
82 struct xfs_mount *mp)
83 {
84 if (xfs_sb_version_hasreflink(&mp->m_sb))
85 return xfs_refc_block(mp) + 1;
86 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
87 return XFS_RMAP_BLOCK(mp) + 1;
88 if (xfs_sb_version_hasfinobt(&mp->m_sb))
89 return XFS_FIBT_BLOCK(mp) + 1;
90 return XFS_IBT_BLOCK(mp) + 1;
91 }
92
93 /*
94 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
95 * AGF buffer (PV 947395), we place constraints on the relationship among
96 * actual allocations for data blocks, freelist blocks, and potential file data
97 * bmap btree blocks. However, these restrictions may result in no actual space
98 * allocated for a delayed extent, for example, a data block in a certain AG is
99 * allocated but there is no additional block for the additional bmap btree
100 * block due to a split of the bmap btree of the file. The result of this may
101 * lead to an infinite loop when the file gets flushed to disk and all delayed
102 * extents need to be actually allocated. To get around this, we explicitly set
103 * aside a few blocks which will not be reserved in delayed allocation.
104 *
105 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
106 * potential split of the file's bmap btree.
107 */
108 unsigned int
109 xfs_alloc_set_aside(
110 struct xfs_mount *mp)
111 {
112 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
113 }
114
115 /*
116 * When deciding how much space to allocate out of an AG, we limit the
117 * allocation maximum size to the size the AG. However, we cannot use all the
118 * blocks in the AG - some are permanently used by metadata. These
119 * blocks are generally:
120 * - the AG superblock, AGF, AGI and AGFL
121 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
122 * the AGI free inode and rmap btree root blocks.
123 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
124 * - the rmapbt root block
125 *
126 * The AG headers are sector sized, so the amount of space they take up is
127 * dependent on filesystem geometry. The others are all single blocks.
128 */
129 unsigned int
130 xfs_alloc_ag_max_usable(
131 struct xfs_mount *mp)
132 {
133 unsigned int blocks;
134
135 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
136 blocks += XFS_ALLOC_AGFL_RESERVE;
137 blocks += 3; /* AGF, AGI btree root blocks */
138 if (xfs_sb_version_hasfinobt(&mp->m_sb))
139 blocks++; /* finobt root block */
140 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
141 blocks++; /* rmap root block */
142 if (xfs_sb_version_hasreflink(&mp->m_sb))
143 blocks++; /* refcount root block */
144
145 return mp->m_sb.sb_agblocks - blocks;
146 }
147
148 /*
149 * Lookup the record equal to [bno, len] in the btree given by cur.
150 */
151 STATIC int /* error */
152 xfs_alloc_lookup_eq(
153 struct xfs_btree_cur *cur, /* btree cursor */
154 xfs_agblock_t bno, /* starting block of extent */
155 xfs_extlen_t len, /* length of extent */
156 int *stat) /* success/failure */
157 {
158 cur->bc_rec.a.ar_startblock = bno;
159 cur->bc_rec.a.ar_blockcount = len;
160 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
161 }
162
163 /*
164 * Lookup the first record greater than or equal to [bno, len]
165 * in the btree given by cur.
166 */
167 int /* error */
168 xfs_alloc_lookup_ge(
169 struct xfs_btree_cur *cur, /* btree cursor */
170 xfs_agblock_t bno, /* starting block of extent */
171 xfs_extlen_t len, /* length of extent */
172 int *stat) /* success/failure */
173 {
174 cur->bc_rec.a.ar_startblock = bno;
175 cur->bc_rec.a.ar_blockcount = len;
176 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
177 }
178
179 /*
180 * Lookup the first record less than or equal to [bno, len]
181 * in the btree given by cur.
182 */
183 int /* error */
184 xfs_alloc_lookup_le(
185 struct xfs_btree_cur *cur, /* btree cursor */
186 xfs_agblock_t bno, /* starting block of extent */
187 xfs_extlen_t len, /* length of extent */
188 int *stat) /* success/failure */
189 {
190 cur->bc_rec.a.ar_startblock = bno;
191 cur->bc_rec.a.ar_blockcount = len;
192 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
193 }
194
195 /*
196 * Update the record referred to by cur to the value given
197 * by [bno, len].
198 * This either works (return 0) or gets an EFSCORRUPTED error.
199 */
200 STATIC int /* error */
201 xfs_alloc_update(
202 struct xfs_btree_cur *cur, /* btree cursor */
203 xfs_agblock_t bno, /* starting block of extent */
204 xfs_extlen_t len) /* length of extent */
205 {
206 union xfs_btree_rec rec;
207
208 rec.alloc.ar_startblock = cpu_to_be32(bno);
209 rec.alloc.ar_blockcount = cpu_to_be32(len);
210 return xfs_btree_update(cur, &rec);
211 }
212
213 /*
214 * Get the data from the pointed-to record.
215 */
216 int /* error */
217 xfs_alloc_get_rec(
218 struct xfs_btree_cur *cur, /* btree cursor */
219 xfs_agblock_t *bno, /* output: starting block of extent */
220 xfs_extlen_t *len, /* output: length of extent */
221 int *stat) /* output: success/failure */
222 {
223 union xfs_btree_rec *rec;
224 int error;
225
226 error = xfs_btree_get_rec(cur, &rec, stat);
227 if (!error && *stat == 1) {
228 *bno = be32_to_cpu(rec->alloc.ar_startblock);
229 *len = be32_to_cpu(rec->alloc.ar_blockcount);
230 }
231 return error;
232 }
233
234 /*
235 * Compute aligned version of the found extent.
236 * Takes alignment and min length into account.
237 */
238 STATIC bool
239 xfs_alloc_compute_aligned(
240 xfs_alloc_arg_t *args, /* allocation argument structure */
241 xfs_agblock_t foundbno, /* starting block in found extent */
242 xfs_extlen_t foundlen, /* length in found extent */
243 xfs_agblock_t *resbno, /* result block number */
244 xfs_extlen_t *reslen, /* result length */
245 unsigned *busy_gen)
246 {
247 xfs_agblock_t bno = foundbno;
248 xfs_extlen_t len = foundlen;
249 xfs_extlen_t diff;
250 bool busy;
251
252 /* Trim busy sections out of found extent */
253 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
254
255 /*
256 * If we have a largish extent that happens to start before min_agbno,
257 * see if we can shift it into range...
258 */
259 if (bno < args->min_agbno && bno + len > args->min_agbno) {
260 diff = args->min_agbno - bno;
261 if (len > diff) {
262 bno += diff;
263 len -= diff;
264 }
265 }
266
267 if (args->alignment > 1 && len >= args->minlen) {
268 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
269
270 diff = aligned_bno - bno;
271
272 *resbno = aligned_bno;
273 *reslen = diff >= len ? 0 : len - diff;
274 } else {
275 *resbno = bno;
276 *reslen = len;
277 }
278
279 return busy;
280 }
281
282 /*
283 * Compute best start block and diff for "near" allocations.
284 * freelen >= wantlen already checked by caller.
285 */
286 STATIC xfs_extlen_t /* difference value (absolute) */
287 xfs_alloc_compute_diff(
288 xfs_agblock_t wantbno, /* target starting block */
289 xfs_extlen_t wantlen, /* target length */
290 xfs_extlen_t alignment, /* target alignment */
291 int datatype, /* are we allocating data? */
292 xfs_agblock_t freebno, /* freespace's starting block */
293 xfs_extlen_t freelen, /* freespace's length */
294 xfs_agblock_t *newbnop) /* result: best start block from free */
295 {
296 xfs_agblock_t freeend; /* end of freespace extent */
297 xfs_agblock_t newbno1; /* return block number */
298 xfs_agblock_t newbno2; /* other new block number */
299 xfs_extlen_t newlen1=0; /* length with newbno1 */
300 xfs_extlen_t newlen2=0; /* length with newbno2 */
301 xfs_agblock_t wantend; /* end of target extent */
302 bool userdata = xfs_alloc_is_userdata(datatype);
303
304 ASSERT(freelen >= wantlen);
305 freeend = freebno + freelen;
306 wantend = wantbno + wantlen;
307 /*
308 * We want to allocate from the start of a free extent if it is past
309 * the desired block or if we are allocating user data and the free
310 * extent is before desired block. The second case is there to allow
311 * for contiguous allocation from the remaining free space if the file
312 * grows in the short term.
313 */
314 if (freebno >= wantbno || (userdata && freeend < wantend)) {
315 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
316 newbno1 = NULLAGBLOCK;
317 } else if (freeend >= wantend && alignment > 1) {
318 newbno1 = roundup(wantbno, alignment);
319 newbno2 = newbno1 - alignment;
320 if (newbno1 >= freeend)
321 newbno1 = NULLAGBLOCK;
322 else
323 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
324 if (newbno2 < freebno)
325 newbno2 = NULLAGBLOCK;
326 else
327 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
328 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
329 if (newlen1 < newlen2 ||
330 (newlen1 == newlen2 &&
331 XFS_ABSDIFF(newbno1, wantbno) >
332 XFS_ABSDIFF(newbno2, wantbno)))
333 newbno1 = newbno2;
334 } else if (newbno2 != NULLAGBLOCK)
335 newbno1 = newbno2;
336 } else if (freeend >= wantend) {
337 newbno1 = wantbno;
338 } else if (alignment > 1) {
339 newbno1 = roundup(freeend - wantlen, alignment);
340 if (newbno1 > freeend - wantlen &&
341 newbno1 - alignment >= freebno)
342 newbno1 -= alignment;
343 else if (newbno1 >= freeend)
344 newbno1 = NULLAGBLOCK;
345 } else
346 newbno1 = freeend - wantlen;
347 *newbnop = newbno1;
348 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
349 }
350
351 /*
352 * Fix up the length, based on mod and prod.
353 * len should be k * prod + mod for some k.
354 * If len is too small it is returned unchanged.
355 * If len hits maxlen it is left alone.
356 */
357 STATIC void
358 xfs_alloc_fix_len(
359 xfs_alloc_arg_t *args) /* allocation argument structure */
360 {
361 xfs_extlen_t k;
362 xfs_extlen_t rlen;
363
364 ASSERT(args->mod < args->prod);
365 rlen = args->len;
366 ASSERT(rlen >= args->minlen);
367 ASSERT(rlen <= args->maxlen);
368 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
369 (args->mod == 0 && rlen < args->prod))
370 return;
371 k = rlen % args->prod;
372 if (k == args->mod)
373 return;
374 if (k > args->mod)
375 rlen = rlen - (k - args->mod);
376 else
377 rlen = rlen - args->prod + (args->mod - k);
378 /* casts to (int) catch length underflows */
379 if ((int)rlen < (int)args->minlen)
380 return;
381 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
382 ASSERT(rlen % args->prod == args->mod);
383 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
384 rlen + args->minleft);
385 args->len = rlen;
386 }
387
388 /*
389 * Update the two btrees, logically removing from freespace the extent
390 * starting at rbno, rlen blocks. The extent is contained within the
391 * actual (current) free extent fbno for flen blocks.
392 * Flags are passed in indicating whether the cursors are set to the
393 * relevant records.
394 */
395 STATIC int /* error code */
396 xfs_alloc_fixup_trees(
397 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
398 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
399 xfs_agblock_t fbno, /* starting block of free extent */
400 xfs_extlen_t flen, /* length of free extent */
401 xfs_agblock_t rbno, /* starting block of returned extent */
402 xfs_extlen_t rlen, /* length of returned extent */
403 int flags) /* flags, XFSA_FIXUP_... */
404 {
405 int error; /* error code */
406 int i; /* operation results */
407 xfs_agblock_t nfbno1; /* first new free startblock */
408 xfs_agblock_t nfbno2; /* second new free startblock */
409 xfs_extlen_t nflen1=0; /* first new free length */
410 xfs_extlen_t nflen2=0; /* second new free length */
411 struct xfs_mount *mp;
412
413 mp = cnt_cur->bc_mp;
414
415 /*
416 * Look up the record in the by-size tree if necessary.
417 */
418 if (flags & XFSA_FIXUP_CNT_OK) {
419 #ifdef DEBUG
420 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
421 return error;
422 XFS_WANT_CORRUPTED_RETURN(mp,
423 i == 1 && nfbno1 == fbno && nflen1 == flen);
424 #endif
425 } else {
426 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
427 return error;
428 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
429 }
430 /*
431 * Look up the record in the by-block tree if necessary.
432 */
433 if (flags & XFSA_FIXUP_BNO_OK) {
434 #ifdef DEBUG
435 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
436 return error;
437 XFS_WANT_CORRUPTED_RETURN(mp,
438 i == 1 && nfbno1 == fbno && nflen1 == flen);
439 #endif
440 } else {
441 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
442 return error;
443 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
444 }
445
446 #ifdef DEBUG
447 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
448 struct xfs_btree_block *bnoblock;
449 struct xfs_btree_block *cntblock;
450
451 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
452 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
453
454 XFS_WANT_CORRUPTED_RETURN(mp,
455 bnoblock->bb_numrecs == cntblock->bb_numrecs);
456 }
457 #endif
458
459 /*
460 * Deal with all four cases: the allocated record is contained
461 * within the freespace record, so we can have new freespace
462 * at either (or both) end, or no freespace remaining.
463 */
464 if (rbno == fbno && rlen == flen)
465 nfbno1 = nfbno2 = NULLAGBLOCK;
466 else if (rbno == fbno) {
467 nfbno1 = rbno + rlen;
468 nflen1 = flen - rlen;
469 nfbno2 = NULLAGBLOCK;
470 } else if (rbno + rlen == fbno + flen) {
471 nfbno1 = fbno;
472 nflen1 = flen - rlen;
473 nfbno2 = NULLAGBLOCK;
474 } else {
475 nfbno1 = fbno;
476 nflen1 = rbno - fbno;
477 nfbno2 = rbno + rlen;
478 nflen2 = (fbno + flen) - nfbno2;
479 }
480 /*
481 * Delete the entry from the by-size btree.
482 */
483 if ((error = xfs_btree_delete(cnt_cur, &i)))
484 return error;
485 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
486 /*
487 * Add new by-size btree entry(s).
488 */
489 if (nfbno1 != NULLAGBLOCK) {
490 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
491 return error;
492 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
493 if ((error = xfs_btree_insert(cnt_cur, &i)))
494 return error;
495 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
496 }
497 if (nfbno2 != NULLAGBLOCK) {
498 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
499 return error;
500 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
501 if ((error = xfs_btree_insert(cnt_cur, &i)))
502 return error;
503 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
504 }
505 /*
506 * Fix up the by-block btree entry(s).
507 */
508 if (nfbno1 == NULLAGBLOCK) {
509 /*
510 * No remaining freespace, just delete the by-block tree entry.
511 */
512 if ((error = xfs_btree_delete(bno_cur, &i)))
513 return error;
514 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
515 } else {
516 /*
517 * Update the by-block entry to start later|be shorter.
518 */
519 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
520 return error;
521 }
522 if (nfbno2 != NULLAGBLOCK) {
523 /*
524 * 2 resulting free entries, need to add one.
525 */
526 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
527 return error;
528 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
529 if ((error = xfs_btree_insert(bno_cur, &i)))
530 return error;
531 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
532 }
533 return 0;
534 }
535
536 static xfs_failaddr_t
537 xfs_agfl_verify(
538 struct xfs_buf *bp)
539 {
540 struct xfs_mount *mp = bp->b_target->bt_mount;
541 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
542 int i;
543
544 /*
545 * There is no verification of non-crc AGFLs because mkfs does not
546 * initialise the AGFL to zero or NULL. Hence the only valid part of the
547 * AGFL is what the AGF says is active. We can't get to the AGF, so we
548 * can't verify just those entries are valid.
549 */
550 if (!xfs_sb_version_hascrc(&mp->m_sb))
551 return NULL;
552
553 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
554 return __this_address;
555 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
556 return __this_address;
557 /*
558 * during growfs operations, the perag is not fully initialised,
559 * so we can't use it for any useful checking. growfs ensures we can't
560 * use it by using uncached buffers that don't have the perag attached
561 * so we can detect and avoid this problem.
562 */
563 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
564 return __this_address;
565
566 for (i = 0; i < xfs_agfl_size(mp); i++) {
567 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
568 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
569 return __this_address;
570 }
571
572 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
573 return __this_address;
574 return NULL;
575 }
576
577 static void
578 xfs_agfl_read_verify(
579 struct xfs_buf *bp)
580 {
581 struct xfs_mount *mp = bp->b_target->bt_mount;
582 xfs_failaddr_t fa;
583
584 /*
585 * There is no verification of non-crc AGFLs because mkfs does not
586 * initialise the AGFL to zero or NULL. Hence the only valid part of the
587 * AGFL is what the AGF says is active. We can't get to the AGF, so we
588 * can't verify just those entries are valid.
589 */
590 if (!xfs_sb_version_hascrc(&mp->m_sb))
591 return;
592
593 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
594 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
595 else {
596 fa = xfs_agfl_verify(bp);
597 if (fa)
598 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
599 }
600 }
601
602 static void
603 xfs_agfl_write_verify(
604 struct xfs_buf *bp)
605 {
606 struct xfs_mount *mp = bp->b_target->bt_mount;
607 struct xfs_buf_log_item *bip = bp->b_log_item;
608 xfs_failaddr_t fa;
609
610 /* no verification of non-crc AGFLs */
611 if (!xfs_sb_version_hascrc(&mp->m_sb))
612 return;
613
614 fa = xfs_agfl_verify(bp);
615 if (fa) {
616 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
617 return;
618 }
619
620 if (bip)
621 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
622
623 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
624 }
625
626 const struct xfs_buf_ops xfs_agfl_buf_ops = {
627 .name = "xfs_agfl",
628 .verify_read = xfs_agfl_read_verify,
629 .verify_write = xfs_agfl_write_verify,
630 .verify_struct = xfs_agfl_verify,
631 };
632
633 /*
634 * Read in the allocation group free block array.
635 */
636 int /* error */
637 xfs_alloc_read_agfl(
638 xfs_mount_t *mp, /* mount point structure */
639 xfs_trans_t *tp, /* transaction pointer */
640 xfs_agnumber_t agno, /* allocation group number */
641 xfs_buf_t **bpp) /* buffer for the ag free block array */
642 {
643 xfs_buf_t *bp; /* return value */
644 int error;
645
646 ASSERT(agno != NULLAGNUMBER);
647 error = xfs_trans_read_buf(
648 mp, tp, mp->m_ddev_targp,
649 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
650 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
651 if (error)
652 return error;
653 xfs_buf_set_ref(bp, XFS_AGFL_REF);
654 *bpp = bp;
655 return 0;
656 }
657
658 STATIC int
659 xfs_alloc_update_counters(
660 struct xfs_trans *tp,
661 struct xfs_perag *pag,
662 struct xfs_buf *agbp,
663 long len)
664 {
665 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
666
667 pag->pagf_freeblks += len;
668 be32_add_cpu(&agf->agf_freeblks, len);
669
670 xfs_trans_agblocks_delta(tp, len);
671 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
672 be32_to_cpu(agf->agf_length)))
673 return -EFSCORRUPTED;
674
675 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
676 return 0;
677 }
678
679 /*
680 * Allocation group level functions.
681 */
682
683 /*
684 * Allocate a variable extent in the allocation group agno.
685 * Type and bno are used to determine where in the allocation group the
686 * extent will start.
687 * Extent's length (returned in *len) will be between minlen and maxlen,
688 * and of the form k * prod + mod unless there's nothing that large.
689 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
690 */
691 STATIC int /* error */
692 xfs_alloc_ag_vextent(
693 xfs_alloc_arg_t *args) /* argument structure for allocation */
694 {
695 int error=0;
696
697 ASSERT(args->minlen > 0);
698 ASSERT(args->maxlen > 0);
699 ASSERT(args->minlen <= args->maxlen);
700 ASSERT(args->mod < args->prod);
701 ASSERT(args->alignment > 0);
702
703 /*
704 * Branch to correct routine based on the type.
705 */
706 args->wasfromfl = 0;
707 switch (args->type) {
708 case XFS_ALLOCTYPE_THIS_AG:
709 error = xfs_alloc_ag_vextent_size(args);
710 break;
711 case XFS_ALLOCTYPE_NEAR_BNO:
712 error = xfs_alloc_ag_vextent_near(args);
713 break;
714 case XFS_ALLOCTYPE_THIS_BNO:
715 error = xfs_alloc_ag_vextent_exact(args);
716 break;
717 default:
718 ASSERT(0);
719 /* NOTREACHED */
720 }
721
722 if (error || args->agbno == NULLAGBLOCK)
723 return error;
724
725 ASSERT(args->len >= args->minlen);
726 ASSERT(args->len <= args->maxlen);
727 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
728 ASSERT(args->agbno % args->alignment == 0);
729
730 /* if not file data, insert new block into the reverse map btree */
731 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
732 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
733 args->agbno, args->len, &args->oinfo);
734 if (error)
735 return error;
736 }
737
738 if (!args->wasfromfl) {
739 error = xfs_alloc_update_counters(args->tp, args->pag,
740 args->agbp,
741 -((long)(args->len)));
742 if (error)
743 return error;
744
745 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
746 args->agbno, args->len));
747 }
748
749 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
750
751 XFS_STATS_INC(args->mp, xs_allocx);
752 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
753 return error;
754 }
755
756 /*
757 * Allocate a variable extent at exactly agno/bno.
758 * Extent's length (returned in *len) will be between minlen and maxlen,
759 * and of the form k * prod + mod unless there's nothing that large.
760 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
761 */
762 STATIC int /* error */
763 xfs_alloc_ag_vextent_exact(
764 xfs_alloc_arg_t *args) /* allocation argument structure */
765 {
766 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
767 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
768 int error;
769 xfs_agblock_t fbno; /* start block of found extent */
770 xfs_extlen_t flen; /* length of found extent */
771 xfs_agblock_t tbno; /* start block of busy extent */
772 xfs_extlen_t tlen; /* length of busy extent */
773 xfs_agblock_t tend; /* end block of busy extent */
774 int i; /* success/failure of operation */
775 unsigned busy_gen;
776
777 ASSERT(args->alignment == 1);
778
779 /*
780 * Allocate/initialize a cursor for the by-number freespace btree.
781 */
782 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
783 args->agno, XFS_BTNUM_BNO);
784
785 /*
786 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
787 * Look for the closest free block <= bno, it must contain bno
788 * if any free block does.
789 */
790 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
791 if (error)
792 goto error0;
793 if (!i)
794 goto not_found;
795
796 /*
797 * Grab the freespace record.
798 */
799 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
800 if (error)
801 goto error0;
802 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
803 ASSERT(fbno <= args->agbno);
804
805 /*
806 * Check for overlapping busy extents.
807 */
808 tbno = fbno;
809 tlen = flen;
810 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
811
812 /*
813 * Give up if the start of the extent is busy, or the freespace isn't
814 * long enough for the minimum request.
815 */
816 if (tbno > args->agbno)
817 goto not_found;
818 if (tlen < args->minlen)
819 goto not_found;
820 tend = tbno + tlen;
821 if (tend < args->agbno + args->minlen)
822 goto not_found;
823
824 /*
825 * End of extent will be smaller of the freespace end and the
826 * maximal requested end.
827 *
828 * Fix the length according to mod and prod if given.
829 */
830 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
831 - args->agbno;
832 xfs_alloc_fix_len(args);
833 ASSERT(args->agbno + args->len <= tend);
834
835 /*
836 * We are allocating agbno for args->len
837 * Allocate/initialize a cursor for the by-size btree.
838 */
839 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
840 args->agno, XFS_BTNUM_CNT);
841 ASSERT(args->agbno + args->len <=
842 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
843 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
844 args->len, XFSA_FIXUP_BNO_OK);
845 if (error) {
846 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
847 goto error0;
848 }
849
850 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
851 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
852
853 args->wasfromfl = 0;
854 trace_xfs_alloc_exact_done(args);
855 return 0;
856
857 not_found:
858 /* Didn't find it, return null. */
859 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
860 args->agbno = NULLAGBLOCK;
861 trace_xfs_alloc_exact_notfound(args);
862 return 0;
863
864 error0:
865 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
866 trace_xfs_alloc_exact_error(args);
867 return error;
868 }
869
870 /*
871 * Search the btree in a given direction via the search cursor and compare
872 * the records found against the good extent we've already found.
873 */
874 STATIC int
875 xfs_alloc_find_best_extent(
876 struct xfs_alloc_arg *args, /* allocation argument structure */
877 struct xfs_btree_cur **gcur, /* good cursor */
878 struct xfs_btree_cur **scur, /* searching cursor */
879 xfs_agblock_t gdiff, /* difference for search comparison */
880 xfs_agblock_t *sbno, /* extent found by search */
881 xfs_extlen_t *slen, /* extent length */
882 xfs_agblock_t *sbnoa, /* aligned extent found by search */
883 xfs_extlen_t *slena, /* aligned extent length */
884 int dir) /* 0 = search right, 1 = search left */
885 {
886 xfs_agblock_t new;
887 xfs_agblock_t sdiff;
888 int error;
889 int i;
890 unsigned busy_gen;
891
892 /* The good extent is perfect, no need to search. */
893 if (!gdiff)
894 goto out_use_good;
895
896 /*
897 * Look until we find a better one, run out of space or run off the end.
898 */
899 do {
900 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
901 if (error)
902 goto error0;
903 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
904 xfs_alloc_compute_aligned(args, *sbno, *slen,
905 sbnoa, slena, &busy_gen);
906
907 /*
908 * The good extent is closer than this one.
909 */
910 if (!dir) {
911 if (*sbnoa > args->max_agbno)
912 goto out_use_good;
913 if (*sbnoa >= args->agbno + gdiff)
914 goto out_use_good;
915 } else {
916 if (*sbnoa < args->min_agbno)
917 goto out_use_good;
918 if (*sbnoa <= args->agbno - gdiff)
919 goto out_use_good;
920 }
921
922 /*
923 * Same distance, compare length and pick the best.
924 */
925 if (*slena >= args->minlen) {
926 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
927 xfs_alloc_fix_len(args);
928
929 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
930 args->alignment,
931 args->datatype, *sbnoa,
932 *slena, &new);
933
934 /*
935 * Choose closer size and invalidate other cursor.
936 */
937 if (sdiff < gdiff)
938 goto out_use_search;
939 goto out_use_good;
940 }
941
942 if (!dir)
943 error = xfs_btree_increment(*scur, 0, &i);
944 else
945 error = xfs_btree_decrement(*scur, 0, &i);
946 if (error)
947 goto error0;
948 } while (i);
949
950 out_use_good:
951 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
952 *scur = NULL;
953 return 0;
954
955 out_use_search:
956 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
957 *gcur = NULL;
958 return 0;
959
960 error0:
961 /* caller invalidates cursors */
962 return error;
963 }
964
965 /*
966 * Allocate a variable extent near bno in the allocation group agno.
967 * Extent's length (returned in len) will be between minlen and maxlen,
968 * and of the form k * prod + mod unless there's nothing that large.
969 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
970 */
971 STATIC int /* error */
972 xfs_alloc_ag_vextent_near(
973 xfs_alloc_arg_t *args) /* allocation argument structure */
974 {
975 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
976 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
977 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
978 xfs_agblock_t gtbno; /* start bno of right side entry */
979 xfs_agblock_t gtbnoa; /* aligned ... */
980 xfs_extlen_t gtdiff; /* difference to right side entry */
981 xfs_extlen_t gtlen; /* length of right side entry */
982 xfs_extlen_t gtlena; /* aligned ... */
983 xfs_agblock_t gtnew; /* useful start bno of right side */
984 int error; /* error code */
985 int i; /* result code, temporary */
986 int j; /* result code, temporary */
987 xfs_agblock_t ltbno; /* start bno of left side entry */
988 xfs_agblock_t ltbnoa; /* aligned ... */
989 xfs_extlen_t ltdiff; /* difference to left side entry */
990 xfs_extlen_t ltlen; /* length of left side entry */
991 xfs_extlen_t ltlena; /* aligned ... */
992 xfs_agblock_t ltnew; /* useful start bno of left side */
993 xfs_extlen_t rlen; /* length of returned extent */
994 bool busy;
995 unsigned busy_gen;
996 #ifdef DEBUG
997 /*
998 * Randomly don't execute the first algorithm.
999 */
1000 int dofirst; /* set to do first algorithm */
1001
1002 dofirst = prandom_u32() & 1;
1003 #endif
1004
1005 /* handle unitialized agbno range so caller doesn't have to */
1006 if (!args->min_agbno && !args->max_agbno)
1007 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1008 ASSERT(args->min_agbno <= args->max_agbno);
1009
1010 /* clamp agbno to the range if it's outside */
1011 if (args->agbno < args->min_agbno)
1012 args->agbno = args->min_agbno;
1013 if (args->agbno > args->max_agbno)
1014 args->agbno = args->max_agbno;
1015
1016 restart:
1017 bno_cur_lt = NULL;
1018 bno_cur_gt = NULL;
1019 ltlen = 0;
1020 gtlena = 0;
1021 ltlena = 0;
1022 busy = false;
1023
1024 /*
1025 * Get a cursor for the by-size btree.
1026 */
1027 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1028 args->agno, XFS_BTNUM_CNT);
1029
1030 /*
1031 * See if there are any free extents as big as maxlen.
1032 */
1033 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1034 goto error0;
1035 /*
1036 * If none, then pick up the last entry in the tree unless the
1037 * tree is empty.
1038 */
1039 if (!i) {
1040 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1041 &ltlen, &i)))
1042 goto error0;
1043 if (i == 0 || ltlen == 0) {
1044 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1045 trace_xfs_alloc_near_noentry(args);
1046 return 0;
1047 }
1048 ASSERT(i == 1);
1049 }
1050 args->wasfromfl = 0;
1051
1052 /*
1053 * First algorithm.
1054 * If the requested extent is large wrt the freespaces available
1055 * in this a.g., then the cursor will be pointing to a btree entry
1056 * near the right edge of the tree. If it's in the last btree leaf
1057 * block, then we just examine all the entries in that block
1058 * that are big enough, and pick the best one.
1059 * This is written as a while loop so we can break out of it,
1060 * but we never loop back to the top.
1061 */
1062 while (xfs_btree_islastblock(cnt_cur, 0)) {
1063 xfs_extlen_t bdiff;
1064 int besti=0;
1065 xfs_extlen_t blen=0;
1066 xfs_agblock_t bnew=0;
1067
1068 #ifdef DEBUG
1069 if (dofirst)
1070 break;
1071 #endif
1072 /*
1073 * Start from the entry that lookup found, sequence through
1074 * all larger free blocks. If we're actually pointing at a
1075 * record smaller than maxlen, go to the start of this block,
1076 * and skip all those smaller than minlen.
1077 */
1078 if (ltlen || args->alignment > 1) {
1079 cnt_cur->bc_ptrs[0] = 1;
1080 do {
1081 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1082 &ltlen, &i)))
1083 goto error0;
1084 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1085 if (ltlen >= args->minlen)
1086 break;
1087 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1088 goto error0;
1089 } while (i);
1090 ASSERT(ltlen >= args->minlen);
1091 if (!i)
1092 break;
1093 }
1094 i = cnt_cur->bc_ptrs[0];
1095 for (j = 1, blen = 0, bdiff = 0;
1096 !error && j && (blen < args->maxlen || bdiff > 0);
1097 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1098 /*
1099 * For each entry, decide if it's better than
1100 * the previous best entry.
1101 */
1102 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1103 goto error0;
1104 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1105 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1106 &ltbnoa, &ltlena, &busy_gen);
1107 if (ltlena < args->minlen)
1108 continue;
1109 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1110 continue;
1111 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1112 xfs_alloc_fix_len(args);
1113 ASSERT(args->len >= args->minlen);
1114 if (args->len < blen)
1115 continue;
1116 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1117 args->alignment, args->datatype, ltbnoa,
1118 ltlena, &ltnew);
1119 if (ltnew != NULLAGBLOCK &&
1120 (args->len > blen || ltdiff < bdiff)) {
1121 bdiff = ltdiff;
1122 bnew = ltnew;
1123 blen = args->len;
1124 besti = cnt_cur->bc_ptrs[0];
1125 }
1126 }
1127 /*
1128 * It didn't work. We COULD be in a case where
1129 * there's a good record somewhere, so try again.
1130 */
1131 if (blen == 0)
1132 break;
1133 /*
1134 * Point at the best entry, and retrieve it again.
1135 */
1136 cnt_cur->bc_ptrs[0] = besti;
1137 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1138 goto error0;
1139 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1140 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1141 args->len = blen;
1142
1143 /*
1144 * We are allocating starting at bnew for blen blocks.
1145 */
1146 args->agbno = bnew;
1147 ASSERT(bnew >= ltbno);
1148 ASSERT(bnew + blen <= ltbno + ltlen);
1149 /*
1150 * Set up a cursor for the by-bno tree.
1151 */
1152 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1153 args->agbp, args->agno, XFS_BTNUM_BNO);
1154 /*
1155 * Fix up the btree entries.
1156 */
1157 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1158 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1159 goto error0;
1160 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1161 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1162
1163 trace_xfs_alloc_near_first(args);
1164 return 0;
1165 }
1166 /*
1167 * Second algorithm.
1168 * Search in the by-bno tree to the left and to the right
1169 * simultaneously, until in each case we find a space big enough,
1170 * or run into the edge of the tree. When we run into the edge,
1171 * we deallocate that cursor.
1172 * If both searches succeed, we compare the two spaces and pick
1173 * the better one.
1174 * With alignment, it's possible for both to fail; the upper
1175 * level algorithm that picks allocation groups for allocations
1176 * is not supposed to do this.
1177 */
1178 /*
1179 * Allocate and initialize the cursor for the leftward search.
1180 */
1181 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1182 args->agno, XFS_BTNUM_BNO);
1183 /*
1184 * Lookup <= bno to find the leftward search's starting point.
1185 */
1186 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1187 goto error0;
1188 if (!i) {
1189 /*
1190 * Didn't find anything; use this cursor for the rightward
1191 * search.
1192 */
1193 bno_cur_gt = bno_cur_lt;
1194 bno_cur_lt = NULL;
1195 }
1196 /*
1197 * Found something. Duplicate the cursor for the rightward search.
1198 */
1199 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1200 goto error0;
1201 /*
1202 * Increment the cursor, so we will point at the entry just right
1203 * of the leftward entry if any, or to the leftmost entry.
1204 */
1205 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1206 goto error0;
1207 if (!i) {
1208 /*
1209 * It failed, there are no rightward entries.
1210 */
1211 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1212 bno_cur_gt = NULL;
1213 }
1214 /*
1215 * Loop going left with the leftward cursor, right with the
1216 * rightward cursor, until either both directions give up or
1217 * we find an entry at least as big as minlen.
1218 */
1219 do {
1220 if (bno_cur_lt) {
1221 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1222 goto error0;
1223 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1224 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1225 &ltbnoa, &ltlena, &busy_gen);
1226 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1227 break;
1228 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1229 goto error0;
1230 if (!i || ltbnoa < args->min_agbno) {
1231 xfs_btree_del_cursor(bno_cur_lt,
1232 XFS_BTREE_NOERROR);
1233 bno_cur_lt = NULL;
1234 }
1235 }
1236 if (bno_cur_gt) {
1237 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1238 goto error0;
1239 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1240 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1241 &gtbnoa, &gtlena, &busy_gen);
1242 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1243 break;
1244 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1245 goto error0;
1246 if (!i || gtbnoa > args->max_agbno) {
1247 xfs_btree_del_cursor(bno_cur_gt,
1248 XFS_BTREE_NOERROR);
1249 bno_cur_gt = NULL;
1250 }
1251 }
1252 } while (bno_cur_lt || bno_cur_gt);
1253
1254 /*
1255 * Got both cursors still active, need to find better entry.
1256 */
1257 if (bno_cur_lt && bno_cur_gt) {
1258 if (ltlena >= args->minlen) {
1259 /*
1260 * Left side is good, look for a right side entry.
1261 */
1262 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1263 xfs_alloc_fix_len(args);
1264 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1265 args->alignment, args->datatype, ltbnoa,
1266 ltlena, &ltnew);
1267
1268 error = xfs_alloc_find_best_extent(args,
1269 &bno_cur_lt, &bno_cur_gt,
1270 ltdiff, &gtbno, &gtlen,
1271 &gtbnoa, &gtlena,
1272 0 /* search right */);
1273 } else {
1274 ASSERT(gtlena >= args->minlen);
1275
1276 /*
1277 * Right side is good, look for a left side entry.
1278 */
1279 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1280 xfs_alloc_fix_len(args);
1281 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1282 args->alignment, args->datatype, gtbnoa,
1283 gtlena, &gtnew);
1284
1285 error = xfs_alloc_find_best_extent(args,
1286 &bno_cur_gt, &bno_cur_lt,
1287 gtdiff, &ltbno, &ltlen,
1288 &ltbnoa, &ltlena,
1289 1 /* search left */);
1290 }
1291
1292 if (error)
1293 goto error0;
1294 }
1295
1296 /*
1297 * If we couldn't get anything, give up.
1298 */
1299 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1300 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1301
1302 if (busy) {
1303 trace_xfs_alloc_near_busy(args);
1304 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1305 goto restart;
1306 }
1307 trace_xfs_alloc_size_neither(args);
1308 args->agbno = NULLAGBLOCK;
1309 return 0;
1310 }
1311
1312 /*
1313 * At this point we have selected a freespace entry, either to the
1314 * left or to the right. If it's on the right, copy all the
1315 * useful variables to the "left" set so we only have one
1316 * copy of this code.
1317 */
1318 if (bno_cur_gt) {
1319 bno_cur_lt = bno_cur_gt;
1320 bno_cur_gt = NULL;
1321 ltbno = gtbno;
1322 ltbnoa = gtbnoa;
1323 ltlen = gtlen;
1324 ltlena = gtlena;
1325 j = 1;
1326 } else
1327 j = 0;
1328
1329 /*
1330 * Fix up the length and compute the useful address.
1331 */
1332 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1333 xfs_alloc_fix_len(args);
1334 rlen = args->len;
1335 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1336 args->datatype, ltbnoa, ltlena, &ltnew);
1337 ASSERT(ltnew >= ltbno);
1338 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1339 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1340 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1341 args->agbno = ltnew;
1342
1343 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1344 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1345 goto error0;
1346
1347 if (j)
1348 trace_xfs_alloc_near_greater(args);
1349 else
1350 trace_xfs_alloc_near_lesser(args);
1351
1352 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1353 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1354 return 0;
1355
1356 error0:
1357 trace_xfs_alloc_near_error(args);
1358 if (cnt_cur != NULL)
1359 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1360 if (bno_cur_lt != NULL)
1361 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1362 if (bno_cur_gt != NULL)
1363 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1364 return error;
1365 }
1366
1367 /*
1368 * Allocate a variable extent anywhere in the allocation group agno.
1369 * Extent's length (returned in len) will be between minlen and maxlen,
1370 * and of the form k * prod + mod unless there's nothing that large.
1371 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1372 */
1373 STATIC int /* error */
1374 xfs_alloc_ag_vextent_size(
1375 xfs_alloc_arg_t *args) /* allocation argument structure */
1376 {
1377 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1378 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1379 int error; /* error result */
1380 xfs_agblock_t fbno; /* start of found freespace */
1381 xfs_extlen_t flen; /* length of found freespace */
1382 int i; /* temp status variable */
1383 xfs_agblock_t rbno; /* returned block number */
1384 xfs_extlen_t rlen; /* length of returned extent */
1385 bool busy;
1386 unsigned busy_gen;
1387
1388 restart:
1389 /*
1390 * Allocate and initialize a cursor for the by-size btree.
1391 */
1392 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1393 args->agno, XFS_BTNUM_CNT);
1394 bno_cur = NULL;
1395 busy = false;
1396
1397 /*
1398 * Look for an entry >= maxlen+alignment-1 blocks.
1399 */
1400 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1401 args->maxlen + args->alignment - 1, &i)))
1402 goto error0;
1403
1404 /*
1405 * If none then we have to settle for a smaller extent. In the case that
1406 * there are no large extents, this will return the last entry in the
1407 * tree unless the tree is empty. In the case that there are only busy
1408 * large extents, this will return the largest small extent unless there
1409 * are no smaller extents available.
1410 */
1411 if (!i) {
1412 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1413 &fbno, &flen, &i);
1414 if (error)
1415 goto error0;
1416 if (i == 0 || flen == 0) {
1417 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1418 trace_xfs_alloc_size_noentry(args);
1419 return 0;
1420 }
1421 ASSERT(i == 1);
1422 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1423 &rlen, &busy_gen);
1424 } else {
1425 /*
1426 * Search for a non-busy extent that is large enough.
1427 */
1428 for (;;) {
1429 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1430 if (error)
1431 goto error0;
1432 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1433
1434 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1435 &rbno, &rlen, &busy_gen);
1436
1437 if (rlen >= args->maxlen)
1438 break;
1439
1440 error = xfs_btree_increment(cnt_cur, 0, &i);
1441 if (error)
1442 goto error0;
1443 if (i == 0) {
1444 /*
1445 * Our only valid extents must have been busy.
1446 * Make it unbusy by forcing the log out and
1447 * retrying.
1448 */
1449 xfs_btree_del_cursor(cnt_cur,
1450 XFS_BTREE_NOERROR);
1451 trace_xfs_alloc_size_busy(args);
1452 xfs_extent_busy_flush(args->mp,
1453 args->pag, busy_gen);
1454 goto restart;
1455 }
1456 }
1457 }
1458
1459 /*
1460 * In the first case above, we got the last entry in the
1461 * by-size btree. Now we check to see if the space hits maxlen
1462 * once aligned; if not, we search left for something better.
1463 * This can't happen in the second case above.
1464 */
1465 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1466 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1467 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1468 if (rlen < args->maxlen) {
1469 xfs_agblock_t bestfbno;
1470 xfs_extlen_t bestflen;
1471 xfs_agblock_t bestrbno;
1472 xfs_extlen_t bestrlen;
1473
1474 bestrlen = rlen;
1475 bestrbno = rbno;
1476 bestflen = flen;
1477 bestfbno = fbno;
1478 for (;;) {
1479 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1480 goto error0;
1481 if (i == 0)
1482 break;
1483 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1484 &i)))
1485 goto error0;
1486 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1487 if (flen < bestrlen)
1488 break;
1489 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1490 &rbno, &rlen, &busy_gen);
1491 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1492 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1493 (rlen <= flen && rbno + rlen <= fbno + flen),
1494 error0);
1495 if (rlen > bestrlen) {
1496 bestrlen = rlen;
1497 bestrbno = rbno;
1498 bestflen = flen;
1499 bestfbno = fbno;
1500 if (rlen == args->maxlen)
1501 break;
1502 }
1503 }
1504 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1505 &i)))
1506 goto error0;
1507 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1508 rlen = bestrlen;
1509 rbno = bestrbno;
1510 flen = bestflen;
1511 fbno = bestfbno;
1512 }
1513 args->wasfromfl = 0;
1514 /*
1515 * Fix up the length.
1516 */
1517 args->len = rlen;
1518 if (rlen < args->minlen) {
1519 if (busy) {
1520 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1521 trace_xfs_alloc_size_busy(args);
1522 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1523 goto restart;
1524 }
1525 goto out_nominleft;
1526 }
1527 xfs_alloc_fix_len(args);
1528
1529 rlen = args->len;
1530 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1531 /*
1532 * Allocate and initialize a cursor for the by-block tree.
1533 */
1534 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1535 args->agno, XFS_BTNUM_BNO);
1536 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1537 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1538 goto error0;
1539 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1540 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1541 cnt_cur = bno_cur = NULL;
1542 args->len = rlen;
1543 args->agbno = rbno;
1544 XFS_WANT_CORRUPTED_GOTO(args->mp,
1545 args->agbno + args->len <=
1546 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1547 error0);
1548 trace_xfs_alloc_size_done(args);
1549 return 0;
1550
1551 error0:
1552 trace_xfs_alloc_size_error(args);
1553 if (cnt_cur)
1554 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1555 if (bno_cur)
1556 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1557 return error;
1558
1559 out_nominleft:
1560 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1561 trace_xfs_alloc_size_nominleft(args);
1562 args->agbno = NULLAGBLOCK;
1563 return 0;
1564 }
1565
1566 /*
1567 * Deal with the case where only small freespaces remain.
1568 * Either return the contents of the last freespace record,
1569 * or allocate space from the freelist if there is nothing in the tree.
1570 */
1571 STATIC int /* error */
1572 xfs_alloc_ag_vextent_small(
1573 xfs_alloc_arg_t *args, /* allocation argument structure */
1574 xfs_btree_cur_t *ccur, /* by-size cursor */
1575 xfs_agblock_t *fbnop, /* result block number */
1576 xfs_extlen_t *flenp, /* result length */
1577 int *stat) /* status: 0-freelist, 1-normal/none */
1578 {
1579 struct xfs_owner_info oinfo;
1580 struct xfs_perag *pag;
1581 int error;
1582 xfs_agblock_t fbno;
1583 xfs_extlen_t flen;
1584 int i;
1585
1586 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1587 goto error0;
1588 if (i) {
1589 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1590 goto error0;
1591 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1592 }
1593 /*
1594 * Nothing in the btree, try the freelist. Make sure
1595 * to respect minleft even when pulling from the
1596 * freelist.
1597 */
1598 else if (args->minlen == 1 && args->alignment == 1 &&
1599 args->resv != XFS_AG_RESV_AGFL &&
1600 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1601 > args->minleft)) {
1602 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1603 if (error)
1604 goto error0;
1605 if (fbno != NULLAGBLOCK) {
1606 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1607 xfs_alloc_allow_busy_reuse(args->datatype));
1608
1609 if (xfs_alloc_is_userdata(args->datatype)) {
1610 xfs_buf_t *bp;
1611
1612 bp = xfs_btree_get_bufs(args->mp, args->tp,
1613 args->agno, fbno, 0);
1614 if (!bp) {
1615 error = -EFSCORRUPTED;
1616 goto error0;
1617 }
1618 xfs_trans_binval(args->tp, bp);
1619 }
1620 args->len = 1;
1621 args->agbno = fbno;
1622 XFS_WANT_CORRUPTED_GOTO(args->mp,
1623 args->agbno + args->len <=
1624 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1625 error0);
1626 args->wasfromfl = 1;
1627 trace_xfs_alloc_small_freelist(args);
1628
1629 /*
1630 * If we're feeding an AGFL block to something that
1631 * doesn't live in the free space, we need to clear
1632 * out the OWN_AG rmap and add the block back to
1633 * the AGFL per-AG reservation.
1634 */
1635 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1636 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1637 fbno, 1, &oinfo);
1638 if (error)
1639 goto error0;
1640 pag = xfs_perag_get(args->mp, args->agno);
1641 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_AGFL,
1642 args->tp, 1);
1643 xfs_perag_put(pag);
1644
1645 *stat = 0;
1646 return 0;
1647 }
1648 /*
1649 * Nothing in the freelist.
1650 */
1651 else
1652 flen = 0;
1653 }
1654 /*
1655 * Can't allocate from the freelist for some reason.
1656 */
1657 else {
1658 fbno = NULLAGBLOCK;
1659 flen = 0;
1660 }
1661 /*
1662 * Can't do the allocation, give up.
1663 */
1664 if (flen < args->minlen) {
1665 args->agbno = NULLAGBLOCK;
1666 trace_xfs_alloc_small_notenough(args);
1667 flen = 0;
1668 }
1669 *fbnop = fbno;
1670 *flenp = flen;
1671 *stat = 1;
1672 trace_xfs_alloc_small_done(args);
1673 return 0;
1674
1675 error0:
1676 trace_xfs_alloc_small_error(args);
1677 return error;
1678 }
1679
1680 /*
1681 * Free the extent starting at agno/bno for length.
1682 */
1683 STATIC int
1684 xfs_free_ag_extent(
1685 xfs_trans_t *tp,
1686 xfs_buf_t *agbp,
1687 xfs_agnumber_t agno,
1688 xfs_agblock_t bno,
1689 xfs_extlen_t len,
1690 struct xfs_owner_info *oinfo,
1691 enum xfs_ag_resv_type type)
1692 {
1693 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1694 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1695 int error; /* error return value */
1696 xfs_agblock_t gtbno; /* start of right neighbor block */
1697 xfs_extlen_t gtlen; /* length of right neighbor block */
1698 int haveleft; /* have a left neighbor block */
1699 int haveright; /* have a right neighbor block */
1700 int i; /* temp, result code */
1701 xfs_agblock_t ltbno; /* start of left neighbor block */
1702 xfs_extlen_t ltlen; /* length of left neighbor block */
1703 xfs_mount_t *mp; /* mount point struct for filesystem */
1704 xfs_agblock_t nbno; /* new starting block of freespace */
1705 xfs_extlen_t nlen; /* new length of freespace */
1706 xfs_perag_t *pag; /* per allocation group data */
1707
1708 bno_cur = cnt_cur = NULL;
1709 mp = tp->t_mountp;
1710
1711 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1712 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1713 if (error)
1714 goto error0;
1715 }
1716
1717 /*
1718 * Allocate and initialize a cursor for the by-block btree.
1719 */
1720 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1721 /*
1722 * Look for a neighboring block on the left (lower block numbers)
1723 * that is contiguous with this space.
1724 */
1725 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1726 goto error0;
1727 if (haveleft) {
1728 /*
1729 * There is a block to our left.
1730 */
1731 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1732 goto error0;
1733 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1734 /*
1735 * It's not contiguous, though.
1736 */
1737 if (ltbno + ltlen < bno)
1738 haveleft = 0;
1739 else {
1740 /*
1741 * If this failure happens the request to free this
1742 * space was invalid, it's (partly) already free.
1743 * Very bad.
1744 */
1745 XFS_WANT_CORRUPTED_GOTO(mp,
1746 ltbno + ltlen <= bno, error0);
1747 }
1748 }
1749 /*
1750 * Look for a neighboring block on the right (higher block numbers)
1751 * that is contiguous with this space.
1752 */
1753 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1754 goto error0;
1755 if (haveright) {
1756 /*
1757 * There is a block to our right.
1758 */
1759 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1760 goto error0;
1761 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1762 /*
1763 * It's not contiguous, though.
1764 */
1765 if (bno + len < gtbno)
1766 haveright = 0;
1767 else {
1768 /*
1769 * If this failure happens the request to free this
1770 * space was invalid, it's (partly) already free.
1771 * Very bad.
1772 */
1773 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1774 }
1775 }
1776 /*
1777 * Now allocate and initialize a cursor for the by-size tree.
1778 */
1779 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1780 /*
1781 * Have both left and right contiguous neighbors.
1782 * Merge all three into a single free block.
1783 */
1784 if (haveleft && haveright) {
1785 /*
1786 * Delete the old by-size entry on the left.
1787 */
1788 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1789 goto error0;
1790 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1791 if ((error = xfs_btree_delete(cnt_cur, &i)))
1792 goto error0;
1793 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1794 /*
1795 * Delete the old by-size entry on the right.
1796 */
1797 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1798 goto error0;
1799 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1800 if ((error = xfs_btree_delete(cnt_cur, &i)))
1801 goto error0;
1802 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1803 /*
1804 * Delete the old by-block entry for the right block.
1805 */
1806 if ((error = xfs_btree_delete(bno_cur, &i)))
1807 goto error0;
1808 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1809 /*
1810 * Move the by-block cursor back to the left neighbor.
1811 */
1812 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1813 goto error0;
1814 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1815 #ifdef DEBUG
1816 /*
1817 * Check that this is the right record: delete didn't
1818 * mangle the cursor.
1819 */
1820 {
1821 xfs_agblock_t xxbno;
1822 xfs_extlen_t xxlen;
1823
1824 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1825 &i)))
1826 goto error0;
1827 XFS_WANT_CORRUPTED_GOTO(mp,
1828 i == 1 && xxbno == ltbno && xxlen == ltlen,
1829 error0);
1830 }
1831 #endif
1832 /*
1833 * Update remaining by-block entry to the new, joined block.
1834 */
1835 nbno = ltbno;
1836 nlen = len + ltlen + gtlen;
1837 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1838 goto error0;
1839 }
1840 /*
1841 * Have only a left contiguous neighbor.
1842 * Merge it together with the new freespace.
1843 */
1844 else if (haveleft) {
1845 /*
1846 * Delete the old by-size entry on the left.
1847 */
1848 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1849 goto error0;
1850 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1851 if ((error = xfs_btree_delete(cnt_cur, &i)))
1852 goto error0;
1853 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1854 /*
1855 * Back up the by-block cursor to the left neighbor, and
1856 * update its length.
1857 */
1858 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1859 goto error0;
1860 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1861 nbno = ltbno;
1862 nlen = len + ltlen;
1863 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1864 goto error0;
1865 }
1866 /*
1867 * Have only a right contiguous neighbor.
1868 * Merge it together with the new freespace.
1869 */
1870 else if (haveright) {
1871 /*
1872 * Delete the old by-size entry on the right.
1873 */
1874 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1875 goto error0;
1876 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1877 if ((error = xfs_btree_delete(cnt_cur, &i)))
1878 goto error0;
1879 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1880 /*
1881 * Update the starting block and length of the right
1882 * neighbor in the by-block tree.
1883 */
1884 nbno = bno;
1885 nlen = len + gtlen;
1886 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1887 goto error0;
1888 }
1889 /*
1890 * No contiguous neighbors.
1891 * Insert the new freespace into the by-block tree.
1892 */
1893 else {
1894 nbno = bno;
1895 nlen = len;
1896 if ((error = xfs_btree_insert(bno_cur, &i)))
1897 goto error0;
1898 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1899 }
1900 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1901 bno_cur = NULL;
1902 /*
1903 * In all cases we need to insert the new freespace in the by-size tree.
1904 */
1905 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1906 goto error0;
1907 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1908 if ((error = xfs_btree_insert(cnt_cur, &i)))
1909 goto error0;
1910 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1911 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1912 cnt_cur = NULL;
1913
1914 /*
1915 * Update the freespace totals in the ag and superblock.
1916 */
1917 pag = xfs_perag_get(mp, agno);
1918 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1919 xfs_ag_resv_free_extent(pag, type, tp, len);
1920 xfs_perag_put(pag);
1921 if (error)
1922 goto error0;
1923
1924 XFS_STATS_INC(mp, xs_freex);
1925 XFS_STATS_ADD(mp, xs_freeb, len);
1926
1927 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1928 haveleft, haveright);
1929
1930 return 0;
1931
1932 error0:
1933 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1934 -1, -1);
1935 if (bno_cur)
1936 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1937 if (cnt_cur)
1938 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1939 return error;
1940 }
1941
1942 /*
1943 * Visible (exported) allocation/free functions.
1944 * Some of these are used just by xfs_alloc_btree.c and this file.
1945 */
1946
1947 /*
1948 * Compute and fill in value of m_ag_maxlevels.
1949 */
1950 void
1951 xfs_alloc_compute_maxlevels(
1952 xfs_mount_t *mp) /* file system mount structure */
1953 {
1954 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1955 (mp->m_sb.sb_agblocks + 1) / 2);
1956 }
1957
1958 /*
1959 * Find the length of the longest extent in an AG. The 'need' parameter
1960 * specifies how much space we're going to need for the AGFL and the
1961 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1962 * other callers.
1963 */
1964 xfs_extlen_t
1965 xfs_alloc_longest_free_extent(
1966 struct xfs_mount *mp,
1967 struct xfs_perag *pag,
1968 xfs_extlen_t need,
1969 xfs_extlen_t reserved)
1970 {
1971 xfs_extlen_t delta = 0;
1972
1973 /*
1974 * If the AGFL needs a recharge, we'll have to subtract that from the
1975 * longest extent.
1976 */
1977 if (need > pag->pagf_flcount)
1978 delta = need - pag->pagf_flcount;
1979
1980 /*
1981 * If we cannot maintain others' reservations with space from the
1982 * not-longest freesp extents, we'll have to subtract /that/ from
1983 * the longest extent too.
1984 */
1985 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1986 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1987
1988 /*
1989 * If the longest extent is long enough to satisfy all the
1990 * reservations and AGFL rules in place, we can return this extent.
1991 */
1992 if (pag->pagf_longest > delta)
1993 return pag->pagf_longest - delta;
1994
1995 /* Otherwise, let the caller try for 1 block if there's space. */
1996 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1997 }
1998
1999 unsigned int
2000 xfs_alloc_min_freelist(
2001 struct xfs_mount *mp,
2002 struct xfs_perag *pag)
2003 {
2004 unsigned int min_free;
2005
2006 /* space needed by-bno freespace btree */
2007 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2008 mp->m_ag_maxlevels);
2009 /* space needed by-size freespace btree */
2010 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2011 mp->m_ag_maxlevels);
2012 /* space needed reverse mapping used space btree */
2013 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2014 min_free += min_t(unsigned int,
2015 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2016 mp->m_rmap_maxlevels);
2017
2018 return min_free;
2019 }
2020
2021 /*
2022 * Check if the operation we are fixing up the freelist for should go ahead or
2023 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2024 * is dependent on whether the size and shape of free space available will
2025 * permit the requested allocation to take place.
2026 */
2027 static bool
2028 xfs_alloc_space_available(
2029 struct xfs_alloc_arg *args,
2030 xfs_extlen_t min_free,
2031 int flags)
2032 {
2033 struct xfs_perag *pag = args->pag;
2034 xfs_extlen_t alloc_len, longest;
2035 xfs_extlen_t reservation; /* blocks that are still reserved */
2036 int available;
2037
2038 if (flags & XFS_ALLOC_FLAG_FREEING)
2039 return true;
2040
2041 reservation = xfs_ag_resv_needed(pag, args->resv);
2042
2043 /* do we have enough contiguous free space for the allocation? */
2044 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2045 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2046 reservation);
2047 if (longest < alloc_len)
2048 return false;
2049
2050 /* do we have enough free space remaining for the allocation? */
2051 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2052 reservation - min_free - args->minleft);
2053 if (available < (int)max(args->total, alloc_len))
2054 return false;
2055
2056 /*
2057 * Clamp maxlen to the amount of free space available for the actual
2058 * extent allocation.
2059 */
2060 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2061 args->maxlen = available;
2062 ASSERT(args->maxlen > 0);
2063 ASSERT(args->maxlen >= args->minlen);
2064 }
2065
2066 return true;
2067 }
2068
2069 /*
2070 * Decide whether to use this allocation group for this allocation.
2071 * If so, fix up the btree freelist's size.
2072 */
2073 int /* error */
2074 xfs_alloc_fix_freelist(
2075 struct xfs_alloc_arg *args, /* allocation argument structure */
2076 int flags) /* XFS_ALLOC_FLAG_... */
2077 {
2078 struct xfs_mount *mp = args->mp;
2079 struct xfs_perag *pag = args->pag;
2080 struct xfs_trans *tp = args->tp;
2081 struct xfs_buf *agbp = NULL;
2082 struct xfs_buf *agflbp = NULL;
2083 struct xfs_alloc_arg targs; /* local allocation arguments */
2084 xfs_agblock_t bno; /* freelist block */
2085 xfs_extlen_t need; /* total blocks needed in freelist */
2086 int error = 0;
2087
2088 if (!pag->pagf_init) {
2089 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2090 if (error)
2091 goto out_no_agbp;
2092 if (!pag->pagf_init) {
2093 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2094 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2095 goto out_agbp_relse;
2096 }
2097 }
2098
2099 /*
2100 * If this is a metadata preferred pag and we are user data then try
2101 * somewhere else if we are not being asked to try harder at this
2102 * point
2103 */
2104 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2105 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2106 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2107 goto out_agbp_relse;
2108 }
2109
2110 need = xfs_alloc_min_freelist(mp, pag);
2111 if (!xfs_alloc_space_available(args, need, flags |
2112 XFS_ALLOC_FLAG_CHECK))
2113 goto out_agbp_relse;
2114
2115 /*
2116 * Get the a.g. freespace buffer.
2117 * Can fail if we're not blocking on locks, and it's held.
2118 */
2119 if (!agbp) {
2120 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2121 if (error)
2122 goto out_no_agbp;
2123 if (!agbp) {
2124 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2125 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2126 goto out_no_agbp;
2127 }
2128 }
2129
2130 /* If there isn't enough total space or single-extent, reject it. */
2131 need = xfs_alloc_min_freelist(mp, pag);
2132 if (!xfs_alloc_space_available(args, need, flags))
2133 goto out_agbp_relse;
2134
2135 /*
2136 * Make the freelist shorter if it's too long.
2137 *
2138 * Note that from this point onwards, we will always release the agf and
2139 * agfl buffers on error. This handles the case where we error out and
2140 * the buffers are clean or may not have been joined to the transaction
2141 * and hence need to be released manually. If they have been joined to
2142 * the transaction, then xfs_trans_brelse() will handle them
2143 * appropriately based on the recursion count and dirty state of the
2144 * buffer.
2145 *
2146 * XXX (dgc): When we have lots of free space, does this buy us
2147 * anything other than extra overhead when we need to put more blocks
2148 * back on the free list? Maybe we should only do this when space is
2149 * getting low or the AGFL is more than half full?
2150 *
2151 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2152 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2153 * updating the rmapbt. Both flags are used in xfs_repair while we're
2154 * rebuilding the rmapbt, and neither are used by the kernel. They're
2155 * both required to ensure that rmaps are correctly recorded for the
2156 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2157 * repair/rmap.c in xfsprogs for details.
2158 */
2159 memset(&targs, 0, sizeof(targs));
2160 if (flags & XFS_ALLOC_FLAG_NORMAP)
2161 xfs_rmap_skip_owner_update(&targs.oinfo);
2162 else
2163 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2164 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2165 struct xfs_buf *bp;
2166
2167 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2168 if (error)
2169 goto out_agbp_relse;
2170 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
2171 &targs.oinfo, XFS_AG_RESV_AGFL);
2172 if (error)
2173 goto out_agbp_relse;
2174 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2175 if (!bp) {
2176 error = -EFSCORRUPTED;
2177 goto out_agbp_relse;
2178 }
2179 xfs_trans_binval(tp, bp);
2180 }
2181
2182 targs.tp = tp;
2183 targs.mp = mp;
2184 targs.agbp = agbp;
2185 targs.agno = args->agno;
2186 targs.alignment = targs.minlen = targs.prod = 1;
2187 targs.type = XFS_ALLOCTYPE_THIS_AG;
2188 targs.pag = pag;
2189 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2190 if (error)
2191 goto out_agbp_relse;
2192
2193 /* Make the freelist longer if it's too short. */
2194 while (pag->pagf_flcount < need) {
2195 targs.agbno = 0;
2196 targs.maxlen = need - pag->pagf_flcount;
2197 targs.resv = XFS_AG_RESV_AGFL;
2198
2199 /* Allocate as many blocks as possible at once. */
2200 error = xfs_alloc_ag_vextent(&targs);
2201 if (error)
2202 goto out_agflbp_relse;
2203
2204 /*
2205 * Stop if we run out. Won't happen if callers are obeying
2206 * the restrictions correctly. Can happen for free calls
2207 * on a completely full ag.
2208 */
2209 if (targs.agbno == NULLAGBLOCK) {
2210 if (flags & XFS_ALLOC_FLAG_FREEING)
2211 break;
2212 goto out_agflbp_relse;
2213 }
2214 /*
2215 * Put each allocated block on the list.
2216 */
2217 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2218 error = xfs_alloc_put_freelist(tp, agbp,
2219 agflbp, bno, 0);
2220 if (error)
2221 goto out_agflbp_relse;
2222 }
2223 }
2224 xfs_trans_brelse(tp, agflbp);
2225 args->agbp = agbp;
2226 return 0;
2227
2228 out_agflbp_relse:
2229 xfs_trans_brelse(tp, agflbp);
2230 out_agbp_relse:
2231 if (agbp)
2232 xfs_trans_brelse(tp, agbp);
2233 out_no_agbp:
2234 args->agbp = NULL;
2235 return error;
2236 }
2237
2238 /*
2239 * Get a block from the freelist.
2240 * Returns with the buffer for the block gotten.
2241 */
2242 int /* error */
2243 xfs_alloc_get_freelist(
2244 xfs_trans_t *tp, /* transaction pointer */
2245 xfs_buf_t *agbp, /* buffer containing the agf structure */
2246 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2247 int btreeblk) /* destination is a AGF btree */
2248 {
2249 xfs_agf_t *agf; /* a.g. freespace structure */
2250 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2251 xfs_agblock_t bno; /* block number returned */
2252 __be32 *agfl_bno;
2253 int error;
2254 int logflags;
2255 xfs_mount_t *mp = tp->t_mountp;
2256 xfs_perag_t *pag; /* per allocation group data */
2257
2258 /*
2259 * Freelist is empty, give up.
2260 */
2261 agf = XFS_BUF_TO_AGF(agbp);
2262 if (!agf->agf_flcount) {
2263 *bnop = NULLAGBLOCK;
2264 return 0;
2265 }
2266 /*
2267 * Read the array of free blocks.
2268 */
2269 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2270 &agflbp);
2271 if (error)
2272 return error;
2273
2274
2275 /*
2276 * Get the block number and update the data structures.
2277 */
2278 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2279 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2280 be32_add_cpu(&agf->agf_flfirst, 1);
2281 xfs_trans_brelse(tp, agflbp);
2282 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2283 agf->agf_flfirst = 0;
2284
2285 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2286 be32_add_cpu(&agf->agf_flcount, -1);
2287 xfs_trans_agflist_delta(tp, -1);
2288 pag->pagf_flcount--;
2289 xfs_perag_put(pag);
2290
2291 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2292 if (btreeblk) {
2293 be32_add_cpu(&agf->agf_btreeblks, 1);
2294 pag->pagf_btreeblks++;
2295 logflags |= XFS_AGF_BTREEBLKS;
2296 }
2297
2298 xfs_alloc_log_agf(tp, agbp, logflags);
2299 *bnop = bno;
2300
2301 return 0;
2302 }
2303
2304 /*
2305 * Log the given fields from the agf structure.
2306 */
2307 void
2308 xfs_alloc_log_agf(
2309 xfs_trans_t *tp, /* transaction pointer */
2310 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2311 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2312 {
2313 int first; /* first byte offset */
2314 int last; /* last byte offset */
2315 static const short offsets[] = {
2316 offsetof(xfs_agf_t, agf_magicnum),
2317 offsetof(xfs_agf_t, agf_versionnum),
2318 offsetof(xfs_agf_t, agf_seqno),
2319 offsetof(xfs_agf_t, agf_length),
2320 offsetof(xfs_agf_t, agf_roots[0]),
2321 offsetof(xfs_agf_t, agf_levels[0]),
2322 offsetof(xfs_agf_t, agf_flfirst),
2323 offsetof(xfs_agf_t, agf_fllast),
2324 offsetof(xfs_agf_t, agf_flcount),
2325 offsetof(xfs_agf_t, agf_freeblks),
2326 offsetof(xfs_agf_t, agf_longest),
2327 offsetof(xfs_agf_t, agf_btreeblks),
2328 offsetof(xfs_agf_t, agf_uuid),
2329 offsetof(xfs_agf_t, agf_rmap_blocks),
2330 offsetof(xfs_agf_t, agf_refcount_blocks),
2331 offsetof(xfs_agf_t, agf_refcount_root),
2332 offsetof(xfs_agf_t, agf_refcount_level),
2333 /* needed so that we don't log the whole rest of the structure: */
2334 offsetof(xfs_agf_t, agf_spare64),
2335 sizeof(xfs_agf_t)
2336 };
2337
2338 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2339
2340 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2341
2342 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2343 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2344 }
2345
2346 /*
2347 * Interface for inode allocation to force the pag data to be initialized.
2348 */
2349 int /* error */
2350 xfs_alloc_pagf_init(
2351 xfs_mount_t *mp, /* file system mount structure */
2352 xfs_trans_t *tp, /* transaction pointer */
2353 xfs_agnumber_t agno, /* allocation group number */
2354 int flags) /* XFS_ALLOC_FLAGS_... */
2355 {
2356 xfs_buf_t *bp;
2357 int error;
2358
2359 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2360 return error;
2361 if (bp)
2362 xfs_trans_brelse(tp, bp);
2363 return 0;
2364 }
2365
2366 /*
2367 * Put the block on the freelist for the allocation group.
2368 */
2369 int /* error */
2370 xfs_alloc_put_freelist(
2371 xfs_trans_t *tp, /* transaction pointer */
2372 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2373 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2374 xfs_agblock_t bno, /* block being freed */
2375 int btreeblk) /* block came from a AGF btree */
2376 {
2377 xfs_agf_t *agf; /* a.g. freespace structure */
2378 __be32 *blockp;/* pointer to array entry */
2379 int error;
2380 int logflags;
2381 xfs_mount_t *mp; /* mount structure */
2382 xfs_perag_t *pag; /* per allocation group data */
2383 __be32 *agfl_bno;
2384 int startoff;
2385
2386 agf = XFS_BUF_TO_AGF(agbp);
2387 mp = tp->t_mountp;
2388
2389 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2390 be32_to_cpu(agf->agf_seqno), &agflbp)))
2391 return error;
2392 be32_add_cpu(&agf->agf_fllast, 1);
2393 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2394 agf->agf_fllast = 0;
2395
2396 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2397 be32_add_cpu(&agf->agf_flcount, 1);
2398 xfs_trans_agflist_delta(tp, 1);
2399 pag->pagf_flcount++;
2400
2401 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2402 if (btreeblk) {
2403 be32_add_cpu(&agf->agf_btreeblks, -1);
2404 pag->pagf_btreeblks--;
2405 logflags |= XFS_AGF_BTREEBLKS;
2406 }
2407 xfs_perag_put(pag);
2408
2409 xfs_alloc_log_agf(tp, agbp, logflags);
2410
2411 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2412
2413 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2414 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2415 *blockp = cpu_to_be32(bno);
2416 startoff = (char *)blockp - (char *)agflbp->b_addr;
2417
2418 xfs_alloc_log_agf(tp, agbp, logflags);
2419
2420 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2421 xfs_trans_log_buf(tp, agflbp, startoff,
2422 startoff + sizeof(xfs_agblock_t) - 1);
2423 return 0;
2424 }
2425
2426 static xfs_failaddr_t
2427 xfs_agf_verify(
2428 struct xfs_buf *bp)
2429 {
2430 struct xfs_mount *mp = bp->b_target->bt_mount;
2431 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2432
2433 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2434 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2435 return __this_address;
2436 if (!xfs_log_check_lsn(mp,
2437 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2438 return __this_address;
2439 }
2440
2441 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2442 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2443 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2444 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2445 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2446 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2447 return __this_address;
2448
2449 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2450 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2451 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2452 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2453 return __this_address;
2454
2455 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2456 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2457 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2458 return __this_address;
2459
2460 /*
2461 * during growfs operations, the perag is not fully initialised,
2462 * so we can't use it for any useful checking. growfs ensures we can't
2463 * use it by using uncached buffers that don't have the perag attached
2464 * so we can detect and avoid this problem.
2465 */
2466 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2467 return __this_address;
2468
2469 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2470 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2471 return __this_address;
2472
2473 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2474 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2475 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2476 return __this_address;
2477
2478 return NULL;
2479
2480 }
2481
2482 static void
2483 xfs_agf_read_verify(
2484 struct xfs_buf *bp)
2485 {
2486 struct xfs_mount *mp = bp->b_target->bt_mount;
2487 xfs_failaddr_t fa;
2488
2489 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2490 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2491 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2492 else {
2493 fa = xfs_agf_verify(bp);
2494 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2495 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2496 }
2497 }
2498
2499 static void
2500 xfs_agf_write_verify(
2501 struct xfs_buf *bp)
2502 {
2503 struct xfs_mount *mp = bp->b_target->bt_mount;
2504 struct xfs_buf_log_item *bip = bp->b_log_item;
2505 xfs_failaddr_t fa;
2506
2507 fa = xfs_agf_verify(bp);
2508 if (fa) {
2509 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2510 return;
2511 }
2512
2513 if (!xfs_sb_version_hascrc(&mp->m_sb))
2514 return;
2515
2516 if (bip)
2517 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2518
2519 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2520 }
2521
2522 const struct xfs_buf_ops xfs_agf_buf_ops = {
2523 .name = "xfs_agf",
2524 .verify_read = xfs_agf_read_verify,
2525 .verify_write = xfs_agf_write_verify,
2526 .verify_struct = xfs_agf_verify,
2527 };
2528
2529 /*
2530 * Read in the allocation group header (free/alloc section).
2531 */
2532 int /* error */
2533 xfs_read_agf(
2534 struct xfs_mount *mp, /* mount point structure */
2535 struct xfs_trans *tp, /* transaction pointer */
2536 xfs_agnumber_t agno, /* allocation group number */
2537 int flags, /* XFS_BUF_ */
2538 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2539 {
2540 int error;
2541
2542 trace_xfs_read_agf(mp, agno);
2543
2544 ASSERT(agno != NULLAGNUMBER);
2545 error = xfs_trans_read_buf(
2546 mp, tp, mp->m_ddev_targp,
2547 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2548 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2549 if (error)
2550 return error;
2551 if (!*bpp)
2552 return 0;
2553
2554 ASSERT(!(*bpp)->b_error);
2555 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2556 return 0;
2557 }
2558
2559 /*
2560 * Read in the allocation group header (free/alloc section).
2561 */
2562 int /* error */
2563 xfs_alloc_read_agf(
2564 struct xfs_mount *mp, /* mount point structure */
2565 struct xfs_trans *tp, /* transaction pointer */
2566 xfs_agnumber_t agno, /* allocation group number */
2567 int flags, /* XFS_ALLOC_FLAG_... */
2568 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2569 {
2570 struct xfs_agf *agf; /* ag freelist header */
2571 struct xfs_perag *pag; /* per allocation group data */
2572 int error;
2573
2574 trace_xfs_alloc_read_agf(mp, agno);
2575
2576 ASSERT(agno != NULLAGNUMBER);
2577 error = xfs_read_agf(mp, tp, agno,
2578 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2579 bpp);
2580 if (error)
2581 return error;
2582 if (!*bpp)
2583 return 0;
2584 ASSERT(!(*bpp)->b_error);
2585
2586 agf = XFS_BUF_TO_AGF(*bpp);
2587 pag = xfs_perag_get(mp, agno);
2588 if (!pag->pagf_init) {
2589 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2590 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2591 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2592 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2593 pag->pagf_levels[XFS_BTNUM_BNOi] =
2594 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2595 pag->pagf_levels[XFS_BTNUM_CNTi] =
2596 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2597 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2598 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2599 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2600 spin_lock_init(&pag->pagb_lock);
2601 pag->pagb_count = 0;
2602 /* XXX: pagb_tree doesn't exist in userspace */
2603 //pag->pagb_tree = RB_ROOT;
2604 pag->pagf_init = 1;
2605 }
2606 #ifdef DEBUG
2607 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2608 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2609 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2610 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2611 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2612 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2613 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2614 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2615 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2616 }
2617 #endif
2618 xfs_perag_put(pag);
2619 return 0;
2620 }
2621
2622 /*
2623 * Allocate an extent (variable-size).
2624 * Depending on the allocation type, we either look in a single allocation
2625 * group or loop over the allocation groups to find the result.
2626 */
2627 int /* error */
2628 xfs_alloc_vextent(
2629 xfs_alloc_arg_t *args) /* allocation argument structure */
2630 {
2631 xfs_agblock_t agsize; /* allocation group size */
2632 int error;
2633 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2634 xfs_mount_t *mp; /* mount structure pointer */
2635 xfs_agnumber_t sagno; /* starting allocation group number */
2636 xfs_alloctype_t type; /* input allocation type */
2637 int bump_rotor = 0;
2638 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2639
2640 mp = args->mp;
2641 type = args->otype = args->type;
2642 args->agbno = NULLAGBLOCK;
2643 /*
2644 * Just fix this up, for the case where the last a.g. is shorter
2645 * (or there's only one a.g.) and the caller couldn't easily figure
2646 * that out (xfs_bmap_alloc).
2647 */
2648 agsize = mp->m_sb.sb_agblocks;
2649 if (args->maxlen > agsize)
2650 args->maxlen = agsize;
2651 if (args->alignment == 0)
2652 args->alignment = 1;
2653 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2654 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2655 ASSERT(args->minlen <= args->maxlen);
2656 ASSERT(args->minlen <= agsize);
2657 ASSERT(args->mod < args->prod);
2658 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2659 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2660 args->minlen > args->maxlen || args->minlen > agsize ||
2661 args->mod >= args->prod) {
2662 args->fsbno = NULLFSBLOCK;
2663 trace_xfs_alloc_vextent_badargs(args);
2664 return 0;
2665 }
2666
2667 switch (type) {
2668 case XFS_ALLOCTYPE_THIS_AG:
2669 case XFS_ALLOCTYPE_NEAR_BNO:
2670 case XFS_ALLOCTYPE_THIS_BNO:
2671 /*
2672 * These three force us into a single a.g.
2673 */
2674 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2675 args->pag = xfs_perag_get(mp, args->agno);
2676 error = xfs_alloc_fix_freelist(args, 0);
2677 if (error) {
2678 trace_xfs_alloc_vextent_nofix(args);
2679 goto error0;
2680 }
2681 if (!args->agbp) {
2682 trace_xfs_alloc_vextent_noagbp(args);
2683 break;
2684 }
2685 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2686 if ((error = xfs_alloc_ag_vextent(args)))
2687 goto error0;
2688 break;
2689 case XFS_ALLOCTYPE_START_BNO:
2690 /*
2691 * Try near allocation first, then anywhere-in-ag after
2692 * the first a.g. fails.
2693 */
2694 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2695 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2696 args->fsbno = XFS_AGB_TO_FSB(mp,
2697 ((mp->m_agfrotor / rotorstep) %
2698 mp->m_sb.sb_agcount), 0);
2699 bump_rotor = 1;
2700 }
2701 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2702 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2703 /* FALLTHROUGH */
2704 case XFS_ALLOCTYPE_FIRST_AG:
2705 /*
2706 * Rotate through the allocation groups looking for a winner.
2707 */
2708 if (type == XFS_ALLOCTYPE_FIRST_AG) {
2709 /*
2710 * Start with allocation group given by bno.
2711 */
2712 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2713 args->type = XFS_ALLOCTYPE_THIS_AG;
2714 sagno = 0;
2715 flags = 0;
2716 } else {
2717 /*
2718 * Start with the given allocation group.
2719 */
2720 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2721 flags = XFS_ALLOC_FLAG_TRYLOCK;
2722 }
2723 /*
2724 * Loop over allocation groups twice; first time with
2725 * trylock set, second time without.
2726 */
2727 for (;;) {
2728 args->pag = xfs_perag_get(mp, args->agno);
2729 error = xfs_alloc_fix_freelist(args, flags);
2730 if (error) {
2731 trace_xfs_alloc_vextent_nofix(args);
2732 goto error0;
2733 }
2734 /*
2735 * If we get a buffer back then the allocation will fly.
2736 */
2737 if (args->agbp) {
2738 if ((error = xfs_alloc_ag_vextent(args)))
2739 goto error0;
2740 break;
2741 }
2742
2743 trace_xfs_alloc_vextent_loopfailed(args);
2744
2745 /*
2746 * Didn't work, figure out the next iteration.
2747 */
2748 if (args->agno == sagno &&
2749 type == XFS_ALLOCTYPE_START_BNO)
2750 args->type = XFS_ALLOCTYPE_THIS_AG;
2751 /*
2752 * For the first allocation, we can try any AG to get
2753 * space. However, if we already have allocated a
2754 * block, we don't want to try AGs whose number is below
2755 * sagno. Otherwise, we may end up with out-of-order
2756 * locking of AGF, which might cause deadlock.
2757 */
2758 if (++(args->agno) == mp->m_sb.sb_agcount) {
2759 if (args->firstblock != NULLFSBLOCK)
2760 args->agno = sagno;
2761 else
2762 args->agno = 0;
2763 }
2764 /*
2765 * Reached the starting a.g., must either be done
2766 * or switch to non-trylock mode.
2767 */
2768 if (args->agno == sagno) {
2769 if (flags == 0) {
2770 args->agbno = NULLAGBLOCK;
2771 trace_xfs_alloc_vextent_allfailed(args);
2772 break;
2773 }
2774
2775 flags = 0;
2776 if (type == XFS_ALLOCTYPE_START_BNO) {
2777 args->agbno = XFS_FSB_TO_AGBNO(mp,
2778 args->fsbno);
2779 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2780 }
2781 }
2782 xfs_perag_put(args->pag);
2783 }
2784 if (bump_rotor) {
2785 if (args->agno == sagno)
2786 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2787 (mp->m_sb.sb_agcount * rotorstep);
2788 else
2789 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2790 (mp->m_sb.sb_agcount * rotorstep);
2791 }
2792 break;
2793 default:
2794 ASSERT(0);
2795 /* NOTREACHED */
2796 }
2797 if (args->agbno == NULLAGBLOCK)
2798 args->fsbno = NULLFSBLOCK;
2799 else {
2800 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2801 #ifdef DEBUG
2802 ASSERT(args->len >= args->minlen);
2803 ASSERT(args->len <= args->maxlen);
2804 ASSERT(args->agbno % args->alignment == 0);
2805 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2806 args->len);
2807 #endif
2808
2809 /* Zero the extent if we were asked to do so */
2810 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2811 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2812 if (error)
2813 goto error0;
2814 }
2815
2816 }
2817 xfs_perag_put(args->pag);
2818 return 0;
2819 error0:
2820 xfs_perag_put(args->pag);
2821 return error;
2822 }
2823
2824 /* Ensure that the freelist is at full capacity. */
2825 int
2826 xfs_free_extent_fix_freelist(
2827 struct xfs_trans *tp,
2828 xfs_agnumber_t agno,
2829 struct xfs_buf **agbp)
2830 {
2831 struct xfs_alloc_arg args;
2832 int error;
2833
2834 memset(&args, 0, sizeof(struct xfs_alloc_arg));
2835 args.tp = tp;
2836 args.mp = tp->t_mountp;
2837 args.agno = agno;
2838
2839 /*
2840 * validate that the block number is legal - the enables us to detect
2841 * and handle a silent filesystem corruption rather than crashing.
2842 */
2843 if (args.agno >= args.mp->m_sb.sb_agcount)
2844 return -EFSCORRUPTED;
2845
2846 args.pag = xfs_perag_get(args.mp, args.agno);
2847 ASSERT(args.pag);
2848
2849 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2850 if (error)
2851 goto out;
2852
2853 *agbp = args.agbp;
2854 out:
2855 xfs_perag_put(args.pag);
2856 return error;
2857 }
2858
2859 /*
2860 * Free an extent.
2861 * Just break up the extent address and hand off to xfs_free_ag_extent
2862 * after fixing up the freelist.
2863 */
2864 int /* error */
2865 xfs_free_extent(
2866 struct xfs_trans *tp, /* transaction pointer */
2867 xfs_fsblock_t bno, /* starting block number of extent */
2868 xfs_extlen_t len, /* length of extent */
2869 struct xfs_owner_info *oinfo, /* extent owner */
2870 enum xfs_ag_resv_type type) /* block reservation type */
2871 {
2872 struct xfs_mount *mp = tp->t_mountp;
2873 struct xfs_buf *agbp;
2874 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2875 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2876 int error;
2877
2878 ASSERT(len != 0);
2879 ASSERT(type != XFS_AG_RESV_AGFL);
2880
2881 if (XFS_TEST_ERROR(false, mp,
2882 XFS_ERRTAG_FREE_EXTENT))
2883 return -EIO;
2884
2885 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2886 if (error)
2887 return error;
2888
2889 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
2890
2891 /* validate the extent size is legal now we have the agf locked */
2892 XFS_WANT_CORRUPTED_GOTO(mp,
2893 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2894 err);
2895
2896 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
2897 if (error)
2898 goto err;
2899
2900 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2901 return 0;
2902
2903 err:
2904 xfs_trans_brelse(tp, agbp);
2905 return error;
2906 }
2907
2908 struct xfs_alloc_query_range_info {
2909 xfs_alloc_query_range_fn fn;
2910 void *priv;
2911 };
2912
2913 /* Format btree record and pass to our callback. */
2914 STATIC int
2915 xfs_alloc_query_range_helper(
2916 struct xfs_btree_cur *cur,
2917 union xfs_btree_rec *rec,
2918 void *priv)
2919 {
2920 struct xfs_alloc_query_range_info *query = priv;
2921 struct xfs_alloc_rec_incore irec;
2922
2923 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
2924 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
2925 return query->fn(cur, &irec, query->priv);
2926 }
2927
2928 /* Find all free space within a given range of blocks. */
2929 int
2930 xfs_alloc_query_range(
2931 struct xfs_btree_cur *cur,
2932 struct xfs_alloc_rec_incore *low_rec,
2933 struct xfs_alloc_rec_incore *high_rec,
2934 xfs_alloc_query_range_fn fn,
2935 void *priv)
2936 {
2937 union xfs_btree_irec low_brec;
2938 union xfs_btree_irec high_brec;
2939 struct xfs_alloc_query_range_info query;
2940
2941 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2942 low_brec.a = *low_rec;
2943 high_brec.a = *high_rec;
2944 query.priv = priv;
2945 query.fn = fn;
2946 return xfs_btree_query_range(cur, &low_brec, &high_brec,
2947 xfs_alloc_query_range_helper, &query);
2948 }
2949
2950 /* Find all free space records. */
2951 int
2952 xfs_alloc_query_all(
2953 struct xfs_btree_cur *cur,
2954 xfs_alloc_query_range_fn fn,
2955 void *priv)
2956 {
2957 struct xfs_alloc_query_range_info query;
2958
2959 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2960 query.priv = priv;
2961 query.fn = fn;
2962 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
2963 }
2964
2965 /* Find the size of the AG, in blocks. */
2966 xfs_agblock_t
2967 xfs_ag_block_count(
2968 struct xfs_mount *mp,
2969 xfs_agnumber_t agno)
2970 {
2971 ASSERT(agno < mp->m_sb.sb_agcount);
2972
2973 if (agno < mp->m_sb.sb_agcount - 1)
2974 return mp->m_sb.sb_agblocks;
2975 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
2976 }
2977
2978 /*
2979 * Verify that an AG block number pointer neither points outside the AG
2980 * nor points at static metadata.
2981 */
2982 bool
2983 xfs_verify_agbno(
2984 struct xfs_mount *mp,
2985 xfs_agnumber_t agno,
2986 xfs_agblock_t agbno)
2987 {
2988 xfs_agblock_t eoag;
2989
2990 eoag = xfs_ag_block_count(mp, agno);
2991 if (agbno >= eoag)
2992 return false;
2993 if (agbno <= XFS_AGFL_BLOCK(mp))
2994 return false;
2995 return true;
2996 }
2997
2998 /*
2999 * Verify that an FS block number pointer neither points outside the
3000 * filesystem nor points at static AG metadata.
3001 */
3002 bool
3003 xfs_verify_fsbno(
3004 struct xfs_mount *mp,
3005 xfs_fsblock_t fsbno)
3006 {
3007 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
3008
3009 if (agno >= mp->m_sb.sb_agcount)
3010 return false;
3011 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
3012 }
3013
3014 /* Is there a record covering a given extent? */
3015 int
3016 xfs_alloc_has_record(
3017 struct xfs_btree_cur *cur,
3018 xfs_agblock_t bno,
3019 xfs_extlen_t len,
3020 bool *exists)
3021 {
3022 union xfs_btree_irec low;
3023 union xfs_btree_irec high;
3024
3025 memset(&low, 0, sizeof(low));
3026 low.a.ar_startblock = bno;
3027 memset(&high, 0xFF, sizeof(high));
3028 high.a.ar_startblock = bno + len - 1;
3029
3030 return xfs_btree_has_record(cur, &low, &high, exists);
3031 }