]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc.c
white space cleanup
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
1 /*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <xfs.h>
34
35 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
36 #define XFSA_FIXUP_BNO_OK 1
37 #define XFSA_FIXUP_CNT_OK 2
38
39 /*
40 * Compute aligned version of the found extent.
41 * Takes alignment and min length into account.
42 */
43 STATIC int /* success (>= minlen) */
44 xfs_alloc_compute_aligned(
45 xfs_agblock_t foundbno, /* starting block in found extent */
46 xfs_extlen_t foundlen, /* length in found extent */
47 xfs_extlen_t alignment, /* alignment for allocation */
48 xfs_extlen_t minlen, /* minimum length for allocation */
49 xfs_agblock_t *resbno, /* result block number */
50 xfs_extlen_t *reslen) /* result length */
51 {
52 xfs_agblock_t bno;
53 xfs_extlen_t diff;
54 xfs_extlen_t len;
55
56 if (alignment > 1 && foundlen >= minlen) {
57 bno = roundup(foundbno, alignment);
58 diff = bno - foundbno;
59 len = diff >= foundlen ? 0 : foundlen - diff;
60 } else {
61 bno = foundbno;
62 len = foundlen;
63 }
64 *resbno = bno;
65 *reslen = len;
66 return len >= minlen;
67 }
68
69 /*
70 * Compute best start block and diff for "near" allocations.
71 * freelen >= wantlen already checked by caller.
72 */
73 STATIC xfs_extlen_t /* difference value (absolute) */
74 xfs_alloc_compute_diff(
75 xfs_agblock_t wantbno, /* target starting block */
76 xfs_extlen_t wantlen, /* target length */
77 xfs_extlen_t alignment, /* target alignment */
78 xfs_agblock_t freebno, /* freespace's starting block */
79 xfs_extlen_t freelen, /* freespace's length */
80 xfs_agblock_t *newbnop) /* result: best start block from free */
81 {
82 xfs_agblock_t freeend; /* end of freespace extent */
83 xfs_agblock_t newbno1; /* return block number */
84 xfs_agblock_t newbno2; /* other new block number */
85 xfs_extlen_t newlen1=0; /* length with newbno1 */
86 xfs_extlen_t newlen2=0; /* length with newbno2 */
87 xfs_agblock_t wantend; /* end of target extent */
88
89 ASSERT(freelen >= wantlen);
90 freeend = freebno + freelen;
91 wantend = wantbno + wantlen;
92 if (freebno >= wantbno) {
93 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
94 newbno1 = NULLAGBLOCK;
95 } else if (freeend >= wantend && alignment > 1) {
96 newbno1 = roundup(wantbno, alignment);
97 newbno2 = newbno1 - alignment;
98 if (newbno1 >= freeend)
99 newbno1 = NULLAGBLOCK;
100 else
101 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
102 if (newbno2 < freebno)
103 newbno2 = NULLAGBLOCK;
104 else
105 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
106 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
107 if (newlen1 < newlen2 ||
108 (newlen1 == newlen2 &&
109 XFS_ABSDIFF(newbno1, wantbno) >
110 XFS_ABSDIFF(newbno2, wantbno)))
111 newbno1 = newbno2;
112 } else if (newbno2 != NULLAGBLOCK)
113 newbno1 = newbno2;
114 } else if (freeend >= wantend) {
115 newbno1 = wantbno;
116 } else if (alignment > 1) {
117 newbno1 = roundup(freeend - wantlen, alignment);
118 if (newbno1 > freeend - wantlen &&
119 newbno1 - alignment >= freebno)
120 newbno1 -= alignment;
121 else if (newbno1 >= freeend)
122 newbno1 = NULLAGBLOCK;
123 } else
124 newbno1 = freeend - wantlen;
125 *newbnop = newbno1;
126 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
127 }
128
129 /*
130 * Fix up the length, based on mod and prod.
131 * len should be k * prod + mod for some k.
132 * If len is too small it is returned unchanged.
133 * If len hits maxlen it is left alone.
134 */
135 STATIC void
136 xfs_alloc_fix_len(
137 xfs_alloc_arg_t *args) /* allocation argument structure */
138 {
139 xfs_extlen_t k;
140 xfs_extlen_t rlen;
141
142 ASSERT(args->mod < args->prod);
143 rlen = args->len;
144 ASSERT(rlen >= args->minlen);
145 ASSERT(rlen <= args->maxlen);
146 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
147 (args->mod == 0 && rlen < args->prod))
148 return;
149 k = rlen % args->prod;
150 if (k == args->mod)
151 return;
152 if (k > args->mod) {
153 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
154 return;
155 } else {
156 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
157 (int)args->minlen)
158 return;
159 }
160 ASSERT(rlen >= args->minlen);
161 ASSERT(rlen <= args->maxlen);
162 args->len = rlen;
163 }
164
165 /*
166 * Fix up length if there is too little space left in the a.g.
167 * Return 1 if ok, 0 if too little, should give up.
168 */
169 STATIC int
170 xfs_alloc_fix_minleft(
171 xfs_alloc_arg_t *args) /* allocation argument structure */
172 {
173 xfs_agf_t *agf; /* a.g. freelist header */
174 int diff; /* free space difference */
175
176 if (args->minleft == 0)
177 return 1;
178 agf = XFS_BUF_TO_AGF(args->agbp);
179 diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT)
180 + INT_GET(agf->agf_flcount, ARCH_CONVERT)
181 - args->len - args->minleft;
182 if (diff >= 0)
183 return 1;
184 args->len += diff; /* shrink the allocated space */
185 if (args->len >= args->minlen)
186 return 1;
187 args->agbno = NULLAGBLOCK;
188 return 0;
189 }
190
191 /*
192 * Update the two btrees, logically removing from freespace the extent
193 * starting at rbno, rlen blocks. The extent is contained within the
194 * actual (current) free extent fbno for flen blocks.
195 * Flags are passed in indicating whether the cursors are set to the
196 * relevant records.
197 */
198 STATIC int /* error code */
199 xfs_alloc_fixup_trees(
200 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
201 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
202 xfs_agblock_t fbno, /* starting block of free extent */
203 xfs_extlen_t flen, /* length of free extent */
204 xfs_agblock_t rbno, /* starting block of returned extent */
205 xfs_extlen_t rlen, /* length of returned extent */
206 int flags) /* flags, XFSA_FIXUP_... */
207 {
208 int error; /* error code */
209 int i; /* operation results */
210 xfs_agblock_t nfbno1; /* first new free startblock */
211 xfs_agblock_t nfbno2; /* second new free startblock */
212 xfs_extlen_t nflen1=0; /* first new free length */
213 xfs_extlen_t nflen2=0; /* second new free length */
214
215 /*
216 * Look up the record in the by-size tree if necessary.
217 */
218 if (flags & XFSA_FIXUP_CNT_OK) {
219 #ifdef DEBUG
220 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
221 return error;
222 XFS_WANT_CORRUPTED_RETURN(
223 i == 1 && nfbno1 == fbno && nflen1 == flen);
224 #endif
225 } else {
226 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
227 return error;
228 XFS_WANT_CORRUPTED_RETURN(i == 1);
229 }
230 /*
231 * Look up the record in the by-block tree if necessary.
232 */
233 if (flags & XFSA_FIXUP_BNO_OK) {
234 #ifdef DEBUG
235 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
236 return error;
237 XFS_WANT_CORRUPTED_RETURN(
238 i == 1 && nfbno1 == fbno && nflen1 == flen);
239 #endif
240 } else {
241 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
242 return error;
243 XFS_WANT_CORRUPTED_RETURN(i == 1);
244 }
245 #ifdef DEBUG
246 {
247 xfs_alloc_block_t *bnoblock;
248 xfs_alloc_block_t *cntblock;
249
250 if (bno_cur->bc_nlevels == 1 &&
251 cnt_cur->bc_nlevels == 1) {
252 bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
253 cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
254 XFS_WANT_CORRUPTED_RETURN(
255 INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT));
256 }
257 }
258 #endif
259 /*
260 * Deal with all four cases: the allocated record is contained
261 * within the freespace record, so we can have new freespace
262 * at either (or both) end, or no freespace remaining.
263 */
264 if (rbno == fbno && rlen == flen)
265 nfbno1 = nfbno2 = NULLAGBLOCK;
266 else if (rbno == fbno) {
267 nfbno1 = rbno + rlen;
268 nflen1 = flen - rlen;
269 nfbno2 = NULLAGBLOCK;
270 } else if (rbno + rlen == fbno + flen) {
271 nfbno1 = fbno;
272 nflen1 = flen - rlen;
273 nfbno2 = NULLAGBLOCK;
274 } else {
275 nfbno1 = fbno;
276 nflen1 = rbno - fbno;
277 nfbno2 = rbno + rlen;
278 nflen2 = (fbno + flen) - nfbno2;
279 }
280 /*
281 * Delete the entry from the by-size btree.
282 */
283 if ((error = xfs_alloc_delete(cnt_cur, &i)))
284 return error;
285 XFS_WANT_CORRUPTED_RETURN(i == 1);
286 /*
287 * Add new by-size btree entry(s).
288 */
289 if (nfbno1 != NULLAGBLOCK) {
290 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
291 return error;
292 XFS_WANT_CORRUPTED_RETURN(i == 0);
293 if ((error = xfs_alloc_insert(cnt_cur, &i)))
294 return error;
295 XFS_WANT_CORRUPTED_RETURN(i == 1);
296 }
297 if (nfbno2 != NULLAGBLOCK) {
298 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
299 return error;
300 XFS_WANT_CORRUPTED_RETURN(i == 0);
301 if ((error = xfs_alloc_insert(cnt_cur, &i)))
302 return error;
303 XFS_WANT_CORRUPTED_RETURN(i == 1);
304 }
305 /*
306 * Fix up the by-block btree entry(s).
307 */
308 if (nfbno1 == NULLAGBLOCK) {
309 /*
310 * No remaining freespace, just delete the by-block tree entry.
311 */
312 if ((error = xfs_alloc_delete(bno_cur, &i)))
313 return error;
314 XFS_WANT_CORRUPTED_RETURN(i == 1);
315 } else {
316 /*
317 * Update the by-block entry to start later|be shorter.
318 */
319 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
320 return error;
321 }
322 if (nfbno2 != NULLAGBLOCK) {
323 /*
324 * 2 resulting free entries, need to add one.
325 */
326 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
327 return error;
328 XFS_WANT_CORRUPTED_RETURN(i == 0);
329 if ((error = xfs_alloc_insert(bno_cur, &i)))
330 return error;
331 XFS_WANT_CORRUPTED_RETURN(i == 1);
332 }
333 return 0;
334 }
335
336 /*
337 * Read in the allocation group free block array.
338 */
339 STATIC int /* error */
340 xfs_alloc_read_agfl(
341 xfs_mount_t *mp, /* mount point structure */
342 xfs_trans_t *tp, /* transaction pointer */
343 xfs_agnumber_t agno, /* allocation group number */
344 xfs_buf_t **bpp) /* buffer for the ag free block array */
345 {
346 xfs_buf_t *bp; /* return value */
347 xfs_daddr_t d; /* disk block address */
348 int error;
349
350 ASSERT(agno != NULLAGNUMBER);
351 d = XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR);
352 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, 1, 0, &bp)))
353 return error;
354 ASSERT(bp);
355 ASSERT(!XFS_BUF_GETERROR(bp));
356 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
357 *bpp = bp;
358 return 0;
359 }
360
361 #if defined(XFS_ALLOC_TRACE)
362 /*
363 * Add an allocation trace entry for an alloc call.
364 */
365 STATIC void
366 xfs_alloc_trace_alloc(
367 char *name, /* function tag string */
368 char *str, /* additional string */
369 xfs_alloc_arg_t *args, /* allocation argument structure */
370 int line) /* source line number */
371 {
372 ktrace_enter(xfs_alloc_trace_buf,
373 (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
374 (void *)name,
375 (void *)str,
376 (void *)args->mp,
377 (void *)(__psunsigned_t)args->agno,
378 (void *)(__psunsigned_t)args->agbno,
379 (void *)(__psunsigned_t)args->minlen,
380 (void *)(__psunsigned_t)args->maxlen,
381 (void *)(__psunsigned_t)args->mod,
382 (void *)(__psunsigned_t)args->prod,
383 (void *)(__psunsigned_t)args->minleft,
384 (void *)(__psunsigned_t)args->total,
385 (void *)(__psunsigned_t)args->alignment,
386 (void *)(__psunsigned_t)args->len,
387 (void *)((((__psint_t)args->type) << 16) |
388 (__psint_t)args->otype),
389 (void *)(__psint_t)((args->wasdel << 3) |
390 (args->wasfromfl << 2) |
391 (args->isfl << 1) |
392 (args->userdata << 0)));
393 }
394
395 /*
396 * Add an allocation trace entry for a free call.
397 */
398 STATIC void
399 xfs_alloc_trace_free(
400 char *name, /* function tag string */
401 char *str, /* additional string */
402 xfs_mount_t *mp, /* file system mount point */
403 xfs_agnumber_t agno, /* allocation group number */
404 xfs_agblock_t agbno, /* a.g. relative block number */
405 xfs_extlen_t len, /* length of extent */
406 int isfl, /* set if is freelist allocation/free */
407 int line) /* source line number */
408 {
409 ktrace_enter(xfs_alloc_trace_buf,
410 (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
411 (void *)name,
412 (void *)str,
413 (void *)mp,
414 (void *)(__psunsigned_t)agno,
415 (void *)(__psunsigned_t)agbno,
416 (void *)(__psunsigned_t)len,
417 (void *)(__psint_t)isfl,
418 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
419 }
420
421 /*
422 * Add an allocation trace entry for modifying an agf.
423 */
424 STATIC void
425 xfs_alloc_trace_modagf(
426 char *name, /* function tag string */
427 char *str, /* additional string */
428 xfs_mount_t *mp, /* file system mount point */
429 xfs_agf_t *agf, /* new agf value */
430 int flags, /* logging flags for agf */
431 int line) /* source line number */
432 {
433 ktrace_enter(xfs_alloc_trace_buf,
434 (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
435 (void *)name,
436 (void *)str,
437 (void *)mp,
438 (void *)(__psint_t)flags,
439 (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
440 (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
441 (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
442 ARCH_CONVERT);
443 (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
444 ARCH_CONVERT);
445 (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
446 ARCH_CONVERT);
447 (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
448 ARCH_CONVERT);
449 (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
450 (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
451 (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
452 (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT),
453 (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT));
454 }
455 #endif /* XFS_ALLOC_TRACE */
456
457 /*
458 * Allocation group level functions.
459 */
460
461 /*
462 * Allocate a variable extent in the allocation group agno.
463 * Type and bno are used to determine where in the allocation group the
464 * extent will start.
465 * Extent's length (returned in *len) will be between minlen and maxlen,
466 * and of the form k * prod + mod unless there's nothing that large.
467 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
468 */
469 STATIC int /* error */
470 xfs_alloc_ag_vextent(
471 xfs_alloc_arg_t *args) /* argument structure for allocation */
472 {
473 int error=0;
474 #ifdef XFS_ALLOC_TRACE
475 static char fname[] = "xfs_alloc_ag_vextent";
476 #endif
477
478 ASSERT(args->minlen > 0);
479 ASSERT(args->maxlen > 0);
480 ASSERT(args->minlen <= args->maxlen);
481 ASSERT(args->mod < args->prod);
482 ASSERT(args->alignment > 0);
483 /*
484 * Branch to correct routine based on the type.
485 */
486 args->wasfromfl = 0;
487 switch (args->type) {
488 case XFS_ALLOCTYPE_THIS_AG:
489 error = xfs_alloc_ag_vextent_size(args);
490 break;
491 case XFS_ALLOCTYPE_NEAR_BNO:
492 error = xfs_alloc_ag_vextent_near(args);
493 break;
494 case XFS_ALLOCTYPE_THIS_BNO:
495 error = xfs_alloc_ag_vextent_exact(args);
496 break;
497 default:
498 ASSERT(0);
499 /* NOTREACHED */
500 }
501 if (error)
502 return error;
503 /*
504 * If the allocation worked, need to change the agf structure
505 * (and log it), and the superblock.
506 */
507 if (args->agbno != NULLAGBLOCK) {
508 xfs_agf_t *agf; /* allocation group freelist header */
509 #ifdef XFS_ALLOC_TRACE
510 xfs_mount_t *mp = args->mp;
511 #endif
512 long slen = (long)args->len;
513
514 ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
515 ASSERT(!(args->wasfromfl) || !args->isfl);
516 ASSERT(args->agbno % args->alignment == 0);
517 if (!(args->wasfromfl)) {
518
519 agf = XFS_BUF_TO_AGF(args->agbp);
520 INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len));
521 xfs_trans_agblocks_delta(args->tp,
522 -((long)(args->len)));
523 args->pag->pagf_freeblks -= args->len;
524 ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT)
525 <= INT_GET(agf->agf_length, ARCH_CONVERT));
526 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
527 xfs_alloc_log_agf(args->tp, args->agbp,
528 XFS_AGF_FREEBLKS);
529 /* search the busylist for these blocks */
530 xfs_alloc_search_busy(args->tp, args->agno,
531 args->agbno, args->len);
532 }
533 if (!args->isfl)
534 xfs_trans_mod_sb(args->tp,
535 args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
536 XFS_TRANS_SB_FDBLOCKS, -slen);
537 XFS_STATS_INC(xfsstats.xs_allocx);
538 XFS_STATS_ADD(xfsstats.xs_allocb, args->len);
539 }
540 return 0;
541 }
542
543 /*
544 * Allocate a variable extent at exactly agno/bno.
545 * Extent's length (returned in *len) will be between minlen and maxlen,
546 * and of the form k * prod + mod unless there's nothing that large.
547 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
548 */
549 STATIC int /* error */
550 xfs_alloc_ag_vextent_exact(
551 xfs_alloc_arg_t *args) /* allocation argument structure */
552 {
553 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
554 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
555 xfs_agblock_t end; /* end of allocated extent */
556 int error;
557 xfs_agblock_t fbno; /* start block of found extent */
558 xfs_agblock_t fend; /* end block of found extent */
559 xfs_extlen_t flen; /* length of found extent */
560 #ifdef XFS_ALLOC_TRACE
561 static char fname[] = "xfs_alloc_ag_vextent_exact";
562 #endif
563 int i; /* success/failure of operation */
564 xfs_agblock_t maxend; /* end of maximal extent */
565 xfs_agblock_t minend; /* end of minimal extent */
566 xfs_extlen_t rlen; /* length of returned extent */
567
568 ASSERT(args->alignment == 1);
569 /*
570 * Allocate/initialize a cursor for the by-number freespace btree.
571 */
572 bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
573 args->agno, XFS_BTNUM_BNO, 0, 0);
574 /*
575 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
576 * Look for the closest free block <= bno, it must contain bno
577 * if any free block does.
578 */
579 if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
580 goto error0;
581 if (!i) {
582 /*
583 * Didn't find it, return null.
584 */
585 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
586 args->agbno = NULLAGBLOCK;
587 return 0;
588 }
589 /*
590 * Grab the freespace record.
591 */
592 if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
593 goto error0;
594 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
595 ASSERT(fbno <= args->agbno);
596 minend = args->agbno + args->minlen;
597 maxend = args->agbno + args->maxlen;
598 fend = fbno + flen;
599 /*
600 * Give up if the freespace isn't long enough for the minimum request.
601 */
602 if (fend < minend) {
603 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
604 args->agbno = NULLAGBLOCK;
605 return 0;
606 }
607 /*
608 * End of extent will be smaller of the freespace end and the
609 * maximal requested end.
610 */
611 end = XFS_AGBLOCK_MIN(fend, maxend);
612 /*
613 * Fix the length according to mod and prod if given.
614 */
615 args->len = end - args->agbno;
616 xfs_alloc_fix_len(args);
617 if (!xfs_alloc_fix_minleft(args)) {
618 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
619 return 0;
620 }
621 rlen = args->len;
622 ASSERT(args->agbno + rlen <= fend);
623 end = args->agbno + rlen;
624 /*
625 * We are allocating agbno for rlen [agbno .. end]
626 * Allocate/initialize a cursor for the by-size btree.
627 */
628 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
629 args->agno, XFS_BTNUM_CNT, 0, 0);
630 ASSERT(args->agbno + args->len <=
631 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
632 ARCH_CONVERT));
633 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
634 args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
635 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
636 goto error0;
637 }
638 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
639 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
640 TRACE_ALLOC("normal", args);
641 args->wasfromfl = 0;
642 return 0;
643
644 error0:
645 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
646 TRACE_ALLOC("error", args);
647 return error;
648 }
649
650 /*
651 * Allocate a variable extent near bno in the allocation group agno.
652 * Extent's length (returned in len) will be between minlen and maxlen,
653 * and of the form k * prod + mod unless there's nothing that large.
654 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
655 */
656 STATIC int /* error */
657 xfs_alloc_ag_vextent_near(
658 xfs_alloc_arg_t *args) /* allocation argument structure */
659 {
660 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
661 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
662 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
663 #ifdef XFS_ALLOC_TRACE
664 static char fname[] = "xfs_alloc_ag_vextent_near";
665 #endif
666 xfs_agblock_t gtbno; /* start bno of right side entry */
667 xfs_agblock_t gtbnoa; /* aligned ... */
668 xfs_extlen_t gtdiff; /* difference to right side entry */
669 xfs_extlen_t gtlen; /* length of right side entry */
670 xfs_extlen_t gtlena; /* aligned ... */
671 xfs_agblock_t gtnew; /* useful start bno of right side */
672 int error; /* error code */
673 int i; /* result code, temporary */
674 int j; /* result code, temporary */
675 xfs_agblock_t ltbno; /* start bno of left side entry */
676 xfs_agblock_t ltbnoa; /* aligned ... */
677 xfs_extlen_t ltdiff; /* difference to left side entry */
678 /*REFERENCED*/
679 xfs_agblock_t ltend; /* end bno of left side entry */
680 xfs_extlen_t ltlen; /* length of left side entry */
681 xfs_extlen_t ltlena; /* aligned ... */
682 xfs_agblock_t ltnew; /* useful start bno of left side */
683 xfs_extlen_t rlen; /* length of returned extent */
684 #if defined(DEBUG) && defined(__KERNEL__)
685 /*
686 * Randomly don't execute the first algorithm.
687 */
688 static int seed; /* randomizing seed value */
689 int dofirst; /* set to do first algorithm */
690 timespec_t now; /* current time */
691
692 if (!seed) {
693 nanotime(&now);
694 seed = (int)now.tv_sec ^ (int)now.tv_nsec;
695 }
696 dofirst = random() & 1;
697 #endif
698 /*
699 * Get a cursor for the by-size btree.
700 */
701 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
702 args->agno, XFS_BTNUM_CNT, 0, 0);
703 ltlen = 0;
704 bno_cur_lt = bno_cur_gt = NULL;
705 /*
706 * See if there are any free extents as big as maxlen.
707 */
708 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
709 goto error0;
710 /*
711 * If none, then pick up the last entry in the tree unless the
712 * tree is empty.
713 */
714 if (!i) {
715 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
716 &ltlen, &i)))
717 goto error0;
718 if (i == 0 || ltlen == 0) {
719 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
720 return 0;
721 }
722 ASSERT(i == 1);
723 }
724 args->wasfromfl = 0;
725 /*
726 * First algorithm.
727 * If the requested extent is large wrt the freespaces available
728 * in this a.g., then the cursor will be pointing to a btree entry
729 * near the right edge of the tree. If it's in the last btree leaf
730 * block, then we just examine all the entries in that block
731 * that are big enough, and pick the best one.
732 * This is written as a while loop so we can break out of it,
733 * but we never loop back to the top.
734 */
735 while (xfs_btree_islastblock(cnt_cur, 0)) {
736 xfs_extlen_t bdiff;
737 int besti=0;
738 xfs_extlen_t blen=0;
739 xfs_agblock_t bnew=0;
740
741 #if defined(DEBUG) && defined(__KERNEL__)
742 if (!dofirst)
743 break;
744 #endif
745 /*
746 * Start from the entry that lookup found, sequence through
747 * all larger free blocks. If we're actually pointing at a
748 * record smaller than maxlen, go to the start of this block,
749 * and skip all those smaller than minlen.
750 */
751 if (ltlen || args->alignment > 1) {
752 cnt_cur->bc_ptrs[0] = 1;
753 do {
754 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
755 &ltlen, &i)))
756 goto error0;
757 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
758 if (ltlen >= args->minlen)
759 break;
760 if ((error = xfs_alloc_increment(cnt_cur, 0, &i)))
761 goto error0;
762 } while (i);
763 ASSERT(ltlen >= args->minlen);
764 if (!i)
765 break;
766 }
767 i = cnt_cur->bc_ptrs[0];
768 for (j = 1, blen = 0, bdiff = 0;
769 !error && j && (blen < args->maxlen || bdiff > 0);
770 error = xfs_alloc_increment(cnt_cur, 0, &j)) {
771 /*
772 * For each entry, decide if it's better than
773 * the previous best entry.
774 */
775 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
776 goto error0;
777 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
778 if (!xfs_alloc_compute_aligned(ltbno, ltlen,
779 args->alignment, args->minlen,
780 &ltbnoa, &ltlena))
781 continue;
782 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
783 xfs_alloc_fix_len(args);
784 ASSERT(args->len >= args->minlen);
785 if (args->len < blen)
786 continue;
787 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
788 args->alignment, ltbno, ltlen, &ltnew);
789 if (ltnew != NULLAGBLOCK &&
790 (args->len > blen || ltdiff < bdiff)) {
791 bdiff = ltdiff;
792 bnew = ltnew;
793 blen = args->len;
794 besti = cnt_cur->bc_ptrs[0];
795 }
796 }
797 /*
798 * It didn't work. We COULD be in a case where
799 * there's a good record somewhere, so try again.
800 */
801 if (blen == 0)
802 break;
803 /*
804 * Point at the best entry, and retrieve it again.
805 */
806 cnt_cur->bc_ptrs[0] = besti;
807 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
808 goto error0;
809 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
810 ltend = ltbno + ltlen;
811 ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
812 ARCH_CONVERT));
813 args->len = blen;
814 if (!xfs_alloc_fix_minleft(args)) {
815 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
816 TRACE_ALLOC("nominleft", args);
817 return 0;
818 }
819 blen = args->len;
820 /*
821 * We are allocating starting at bnew for blen blocks.
822 */
823 args->agbno = bnew;
824 ASSERT(bnew >= ltbno);
825 ASSERT(bnew + blen <= ltend);
826 /*
827 * Set up a cursor for the by-bno tree.
828 */
829 bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp,
830 args->agbp, args->agno, XFS_BTNUM_BNO, 0, 0);
831 /*
832 * Fix up the btree entries.
833 */
834 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
835 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
836 goto error0;
837 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
838 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
839 TRACE_ALLOC("first", args);
840 return 0;
841 }
842 /*
843 * Second algorithm.
844 * Search in the by-bno tree to the left and to the right
845 * simultaneously, until in each case we find a space big enough,
846 * or run into the edge of the tree. When we run into the edge,
847 * we deallocate that cursor.
848 * If both searches succeed, we compare the two spaces and pick
849 * the better one.
850 * With alignment, it's possible for both to fail; the upper
851 * level algorithm that picks allocation groups for allocations
852 * is not supposed to do this.
853 */
854 /*
855 * Allocate and initialize the cursor for the leftward search.
856 */
857 bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
858 args->agno, XFS_BTNUM_BNO, 0, 0);
859 /*
860 * Lookup <= bno to find the leftward search's starting point.
861 */
862 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
863 goto error0;
864 if (!i) {
865 /*
866 * Didn't find anything; use this cursor for the rightward
867 * search.
868 */
869 bno_cur_gt = bno_cur_lt;
870 bno_cur_lt = 0;
871 }
872 /*
873 * Found something. Duplicate the cursor for the rightward search.
874 */
875 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
876 goto error0;
877 /*
878 * Increment the cursor, so we will point at the entry just right
879 * of the leftward entry if any, or to the leftmost entry.
880 */
881 if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
882 goto error0;
883 if (!i) {
884 /*
885 * It failed, there are no rightward entries.
886 */
887 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
888 bno_cur_gt = NULL;
889 }
890 /*
891 * Loop going left with the leftward cursor, right with the
892 * rightward cursor, until either both directions give up or
893 * we find an entry at least as big as minlen.
894 */
895 do {
896 if (bno_cur_lt) {
897 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
898 goto error0;
899 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
900 if (xfs_alloc_compute_aligned(ltbno, ltlen,
901 args->alignment, args->minlen,
902 &ltbnoa, &ltlena))
903 break;
904 if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i)))
905 goto error0;
906 if (!i) {
907 xfs_btree_del_cursor(bno_cur_lt,
908 XFS_BTREE_NOERROR);
909 bno_cur_lt = NULL;
910 }
911 }
912 if (bno_cur_gt) {
913 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
914 goto error0;
915 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
916 if (xfs_alloc_compute_aligned(gtbno, gtlen,
917 args->alignment, args->minlen,
918 &gtbnoa, &gtlena))
919 break;
920 if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
921 goto error0;
922 if (!i) {
923 xfs_btree_del_cursor(bno_cur_gt,
924 XFS_BTREE_NOERROR);
925 bno_cur_gt = NULL;
926 }
927 }
928 } while (bno_cur_lt || bno_cur_gt);
929 /*
930 * Got both cursors still active, need to find better entry.
931 */
932 if (bno_cur_lt && bno_cur_gt) {
933 /*
934 * Left side is long enough, look for a right side entry.
935 */
936 if (ltlena >= args->minlen) {
937 /*
938 * Fix up the length.
939 */
940 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
941 xfs_alloc_fix_len(args);
942 rlen = args->len;
943 ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
944 args->alignment, ltbno, ltlen, &ltnew);
945 /*
946 * Not perfect.
947 */
948 if (ltdiff) {
949 /*
950 * Look until we find a better one, run out of
951 * space, or run off the end.
952 */
953 while (bno_cur_lt && bno_cur_gt) {
954 if ((error = xfs_alloc_get_rec(
955 bno_cur_gt, &gtbno,
956 &gtlen, &i)))
957 goto error0;
958 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
959 xfs_alloc_compute_aligned(gtbno, gtlen,
960 args->alignment, args->minlen,
961 &gtbnoa, &gtlena);
962 /*
963 * The left one is clearly better.
964 */
965 if (gtbnoa >= args->agbno + ltdiff) {
966 xfs_btree_del_cursor(
967 bno_cur_gt,
968 XFS_BTREE_NOERROR);
969 bno_cur_gt = NULL;
970 break;
971 }
972 /*
973 * If we reach a big enough entry,
974 * compare the two and pick the best.
975 */
976 if (gtlena >= args->minlen) {
977 args->len =
978 XFS_EXTLEN_MIN(gtlena,
979 args->maxlen);
980 xfs_alloc_fix_len(args);
981 rlen = args->len;
982 gtdiff = xfs_alloc_compute_diff(
983 args->agbno, rlen,
984 args->alignment,
985 gtbno, gtlen, &gtnew);
986 /*
987 * Right side is better.
988 */
989 if (gtdiff < ltdiff) {
990 xfs_btree_del_cursor(
991 bno_cur_lt,
992 XFS_BTREE_NOERROR);
993 bno_cur_lt = NULL;
994 }
995 /*
996 * Left side is better.
997 */
998 else {
999 xfs_btree_del_cursor(
1000 bno_cur_gt,
1001 XFS_BTREE_NOERROR);
1002 bno_cur_gt = NULL;
1003 }
1004 break;
1005 }
1006 /*
1007 * Fell off the right end.
1008 */
1009 if ((error = xfs_alloc_increment(
1010 bno_cur_gt, 0, &i)))
1011 goto error0;
1012 if (!i) {
1013 xfs_btree_del_cursor(
1014 bno_cur_gt,
1015 XFS_BTREE_NOERROR);
1016 bno_cur_gt = NULL;
1017 break;
1018 }
1019 }
1020 }
1021 /*
1022 * The left side is perfect, trash the right side.
1023 */
1024 else {
1025 xfs_btree_del_cursor(bno_cur_gt,
1026 XFS_BTREE_NOERROR);
1027 bno_cur_gt = NULL;
1028 }
1029 }
1030 /*
1031 * It's the right side that was found first, look left.
1032 */
1033 else {
1034 /*
1035 * Fix up the length.
1036 */
1037 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1038 xfs_alloc_fix_len(args);
1039 rlen = args->len;
1040 gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
1041 args->alignment, gtbno, gtlen, &gtnew);
1042 /*
1043 * Right side entry isn't perfect.
1044 */
1045 if (gtdiff) {
1046 /*
1047 * Look until we find a better one, run out of
1048 * space, or run off the end.
1049 */
1050 while (bno_cur_lt && bno_cur_gt) {
1051 if ((error = xfs_alloc_get_rec(
1052 bno_cur_lt, &ltbno,
1053 &ltlen, &i)))
1054 goto error0;
1055 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1056 xfs_alloc_compute_aligned(ltbno, ltlen,
1057 args->alignment, args->minlen,
1058 &ltbnoa, &ltlena);
1059 /*
1060 * The right one is clearly better.
1061 */
1062 if (ltbnoa <= args->agbno - gtdiff) {
1063 xfs_btree_del_cursor(
1064 bno_cur_lt,
1065 XFS_BTREE_NOERROR);
1066 bno_cur_lt = NULL;
1067 break;
1068 }
1069 /*
1070 * If we reach a big enough entry,
1071 * compare the two and pick the best.
1072 */
1073 if (ltlena >= args->minlen) {
1074 args->len = XFS_EXTLEN_MIN(
1075 ltlena, args->maxlen);
1076 xfs_alloc_fix_len(args);
1077 rlen = args->len;
1078 ltdiff = xfs_alloc_compute_diff(
1079 args->agbno, rlen,
1080 args->alignment,
1081 ltbno, ltlen, &ltnew);
1082 /*
1083 * Left side is better.
1084 */
1085 if (ltdiff < gtdiff) {
1086 xfs_btree_del_cursor(
1087 bno_cur_gt,
1088 XFS_BTREE_NOERROR);
1089 bno_cur_gt = NULL;
1090 }
1091 /*
1092 * Right side is better.
1093 */
1094 else {
1095 xfs_btree_del_cursor(
1096 bno_cur_lt,
1097 XFS_BTREE_NOERROR);
1098 bno_cur_lt = NULL;
1099 }
1100 break;
1101 }
1102 /*
1103 * Fell off the left end.
1104 */
1105 if ((error = xfs_alloc_decrement(
1106 bno_cur_lt, 0, &i)))
1107 goto error0;
1108 if (!i) {
1109 xfs_btree_del_cursor(bno_cur_lt,
1110 XFS_BTREE_NOERROR);
1111 bno_cur_lt = NULL;
1112 break;
1113 }
1114 }
1115 }
1116 /*
1117 * The right side is perfect, trash the left side.
1118 */
1119 else {
1120 xfs_btree_del_cursor(bno_cur_lt,
1121 XFS_BTREE_NOERROR);
1122 bno_cur_lt = NULL;
1123 }
1124 }
1125 }
1126 /*
1127 * If we couldn't get anything, give up.
1128 */
1129 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1130 TRACE_ALLOC("neither", args);
1131 args->agbno = NULLAGBLOCK;
1132 return 0;
1133 }
1134 /*
1135 * At this point we have selected a freespace entry, either to the
1136 * left or to the right. If it's on the right, copy all the
1137 * useful variables to the "left" set so we only have one
1138 * copy of this code.
1139 */
1140 if (bno_cur_gt) {
1141 bno_cur_lt = bno_cur_gt;
1142 bno_cur_gt = NULL;
1143 ltbno = gtbno;
1144 ltbnoa = gtbnoa;
1145 ltlen = gtlen;
1146 ltlena = gtlena;
1147 j = 1;
1148 } else
1149 j = 0;
1150 /*
1151 * Fix up the length and compute the useful address.
1152 */
1153 ltend = ltbno + ltlen;
1154 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1155 xfs_alloc_fix_len(args);
1156 if (!xfs_alloc_fix_minleft(args)) {
1157 TRACE_ALLOC("nominleft", args);
1158 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1159 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1160 return 0;
1161 }
1162 rlen = args->len;
1163 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
1164 ltlen, &ltnew);
1165 ASSERT(ltnew >= ltbno);
1166 ASSERT(ltnew + rlen <= ltend);
1167 ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1168 ARCH_CONVERT));
1169 args->agbno = ltnew;
1170 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1171 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1172 goto error0;
1173 TRACE_ALLOC(j ? "gt" : "lt", args);
1174 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1175 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1176 return 0;
1177
1178 error0:
1179 TRACE_ALLOC("error", args);
1180 if (cnt_cur != NULL)
1181 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1182 if (bno_cur_lt != NULL)
1183 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1184 if (bno_cur_gt != NULL)
1185 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1186 return error;
1187 }
1188
1189 /*
1190 * Allocate a variable extent anywhere in the allocation group agno.
1191 * Extent's length (returned in len) will be between minlen and maxlen,
1192 * and of the form k * prod + mod unless there's nothing that large.
1193 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1194 */
1195 STATIC int /* error */
1196 xfs_alloc_ag_vextent_size(
1197 xfs_alloc_arg_t *args) /* allocation argument structure */
1198 {
1199 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1200 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1201 int error; /* error result */
1202 xfs_agblock_t fbno; /* start of found freespace */
1203 xfs_extlen_t flen; /* length of found freespace */
1204 #ifdef XFS_ALLOC_TRACE
1205 static char fname[] = "xfs_alloc_ag_vextent_size";
1206 #endif
1207 int i; /* temp status variable */
1208 xfs_agblock_t rbno; /* returned block number */
1209 xfs_extlen_t rlen; /* length of returned extent */
1210
1211 /*
1212 * Allocate and initialize a cursor for the by-size btree.
1213 */
1214 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
1215 args->agno, XFS_BTNUM_CNT, 0, 0);
1216 bno_cur = NULL;
1217 /*
1218 * Look for an entry >= maxlen+alignment-1 blocks.
1219 */
1220 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1221 args->maxlen + args->alignment - 1, &i)))
1222 goto error0;
1223 /*
1224 * If none, then pick up the last entry in the tree unless the
1225 * tree is empty.
1226 */
1227 if (!i) {
1228 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno,
1229 &flen, &i)))
1230 goto error0;
1231 if (i == 0 || flen == 0) {
1232 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1233 TRACE_ALLOC("noentry", args);
1234 return 0;
1235 }
1236 ASSERT(i == 1);
1237 }
1238 /*
1239 * There's a freespace as big as maxlen+alignment-1, get it.
1240 */
1241 else {
1242 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i)))
1243 goto error0;
1244 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1245 }
1246 /*
1247 * In the first case above, we got the last entry in the
1248 * by-size btree. Now we check to see if the space hits maxlen
1249 * once aligned; if not, we search left for something better.
1250 * This can't happen in the second case above.
1251 */
1252 xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
1253 &rbno, &rlen);
1254 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1255 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1256 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1257 if (rlen < args->maxlen) {
1258 xfs_agblock_t bestfbno;
1259 xfs_extlen_t bestflen;
1260 xfs_agblock_t bestrbno;
1261 xfs_extlen_t bestrlen;
1262
1263 bestrlen = rlen;
1264 bestrbno = rbno;
1265 bestflen = flen;
1266 bestfbno = fbno;
1267 for (;;) {
1268 if ((error = xfs_alloc_decrement(cnt_cur, 0, &i)))
1269 goto error0;
1270 if (i == 0)
1271 break;
1272 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1273 &i)))
1274 goto error0;
1275 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1276 if (flen < bestrlen)
1277 break;
1278 xfs_alloc_compute_aligned(fbno, flen, args->alignment,
1279 args->minlen, &rbno, &rlen);
1280 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1281 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1282 (rlen <= flen && rbno + rlen <= fbno + flen),
1283 error0);
1284 if (rlen > bestrlen) {
1285 bestrlen = rlen;
1286 bestrbno = rbno;
1287 bestflen = flen;
1288 bestfbno = fbno;
1289 if (rlen == args->maxlen)
1290 break;
1291 }
1292 }
1293 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1294 &i)))
1295 goto error0;
1296 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1297 rlen = bestrlen;
1298 rbno = bestrbno;
1299 flen = bestflen;
1300 fbno = bestfbno;
1301 }
1302 args->wasfromfl = 0;
1303 /*
1304 * Fix up the length.
1305 */
1306 args->len = rlen;
1307 xfs_alloc_fix_len(args);
1308 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
1309 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1310 TRACE_ALLOC("nominleft", args);
1311 args->agbno = NULLAGBLOCK;
1312 return 0;
1313 }
1314 rlen = args->len;
1315 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1316 /*
1317 * Allocate and initialize a cursor for the by-block tree.
1318 */
1319 bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
1320 args->agno, XFS_BTNUM_BNO, 0, 0);
1321 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1322 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1323 goto error0;
1324 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1325 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1326 cnt_cur = bno_cur = NULL;
1327 args->len = rlen;
1328 args->agbno = rbno;
1329 XFS_WANT_CORRUPTED_GOTO(
1330 args->agbno + args->len <=
1331 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1332 ARCH_CONVERT),
1333 error0);
1334 TRACE_ALLOC("normal", args);
1335 return 0;
1336
1337 error0:
1338 TRACE_ALLOC("error", args);
1339 if (cnt_cur)
1340 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1341 if (bno_cur)
1342 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1343 return error;
1344 }
1345
1346 /*
1347 * Deal with the case where only small freespaces remain.
1348 * Either return the contents of the last freespace record,
1349 * or allocate space from the freelist if there is nothing in the tree.
1350 */
1351 STATIC int /* error */
1352 xfs_alloc_ag_vextent_small(
1353 xfs_alloc_arg_t *args, /* allocation argument structure */
1354 xfs_btree_cur_t *ccur, /* by-size cursor */
1355 xfs_agblock_t *fbnop, /* result block number */
1356 xfs_extlen_t *flenp, /* result length */
1357 int *stat) /* status: 0-freelist, 1-normal/none */
1358 {
1359 int error;
1360 xfs_agblock_t fbno;
1361 xfs_extlen_t flen;
1362 #ifdef XFS_ALLOC_TRACE
1363 static char fname[] = "xfs_alloc_ag_vextent_small";
1364 #endif
1365 int i;
1366
1367 if ((error = xfs_alloc_decrement(ccur, 0, &i)))
1368 goto error0;
1369 if (i) {
1370 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1371 goto error0;
1372 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1373 }
1374 /*
1375 * Nothing in the btree, try the freelist. Make sure
1376 * to respect minleft even when pulling from the
1377 * freelist.
1378 */
1379 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1380 (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount,
1381 ARCH_CONVERT) > args->minleft)) {
1382 if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno)))
1383 goto error0;
1384 if (fbno != NULLAGBLOCK) {
1385 if (args->userdata) {
1386 xfs_buf_t *bp;
1387
1388 bp = xfs_btree_get_bufs(args->mp, args->tp,
1389 args->agno, fbno, 0);
1390 xfs_trans_binval(args->tp, bp);
1391 }
1392 args->len = 1;
1393 args->agbno = fbno;
1394 XFS_WANT_CORRUPTED_GOTO(
1395 args->agbno + args->len <=
1396 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1397 ARCH_CONVERT),
1398 error0);
1399 args->wasfromfl = 1;
1400 TRACE_ALLOC("freelist", args);
1401 *stat = 0;
1402 return 0;
1403 }
1404 /*
1405 * Nothing in the freelist.
1406 */
1407 else
1408 flen = 0;
1409 }
1410 /*
1411 * Can't allocate from the freelist for some reason.
1412 */
1413 else
1414 flen = 0;
1415 /*
1416 * Can't do the allocation, give up.
1417 */
1418 if (flen < args->minlen) {
1419 args->agbno = NULLAGBLOCK;
1420 TRACE_ALLOC("notenough", args);
1421 flen = 0;
1422 }
1423 *fbnop = fbno;
1424 *flenp = flen;
1425 *stat = 1;
1426 TRACE_ALLOC("normal", args);
1427 return 0;
1428
1429 error0:
1430 TRACE_ALLOC("error", args);
1431 return error;
1432 }
1433
1434 /*
1435 * Free the extent starting at agno/bno for length.
1436 */
1437 STATIC int /* error */
1438 xfs_free_ag_extent(
1439 xfs_trans_t *tp, /* transaction pointer */
1440 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1441 xfs_agnumber_t agno, /* allocation group number */
1442 xfs_agblock_t bno, /* starting block number */
1443 xfs_extlen_t len, /* length of extent */
1444 int isfl) /* set if is freelist blocks - no sb acctg */
1445 {
1446 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1447 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1448 int error; /* error return value */
1449 #ifdef XFS_ALLOC_TRACE
1450 static char fname[] = "xfs_free_ag_extent";
1451 #endif
1452 xfs_agblock_t gtbno; /* start of right neighbor block */
1453 xfs_extlen_t gtlen; /* length of right neighbor block */
1454 int haveleft; /* have a left neighbor block */
1455 int haveright; /* have a right neighbor block */
1456 int i; /* temp, result code */
1457 xfs_agblock_t ltbno; /* start of left neighbor block */
1458 xfs_extlen_t ltlen; /* length of left neighbor block */
1459 xfs_mount_t *mp; /* mount point struct for filesystem */
1460 xfs_agblock_t nbno; /* new starting block of freespace */
1461 xfs_extlen_t nlen; /* new length of freespace */
1462
1463 mp = tp->t_mountp;
1464 /*
1465 * Allocate and initialize a cursor for the by-block btree.
1466 */
1467 bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, 0,
1468 0);
1469 cnt_cur = NULL;
1470 /*
1471 * Look for a neighboring block on the left (lower block numbers)
1472 * that is contiguous with this space.
1473 */
1474 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1475 goto error0;
1476 if (haveleft) {
1477 /*
1478 * There is a block to our left.
1479 */
1480 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1481 goto error0;
1482 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1483 /*
1484 * It's not contiguous, though.
1485 */
1486 if (ltbno + ltlen < bno)
1487 haveleft = 0;
1488 else {
1489 /*
1490 * If this failure happens the request to free this
1491 * space was invalid, it's (partly) already free.
1492 * Very bad.
1493 */
1494 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1495 }
1496 }
1497 /*
1498 * Look for a neighboring block on the right (higher block numbers)
1499 * that is contiguous with this space.
1500 */
1501 if ((error = xfs_alloc_increment(bno_cur, 0, &haveright)))
1502 goto error0;
1503 if (haveright) {
1504 /*
1505 * There is a block to our right.
1506 */
1507 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1508 goto error0;
1509 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1510 /*
1511 * It's not contiguous, though.
1512 */
1513 if (bno + len < gtbno)
1514 haveright = 0;
1515 else {
1516 /*
1517 * If this failure happens the request to free this
1518 * space was invalid, it's (partly) already free.
1519 * Very bad.
1520 */
1521 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1522 }
1523 }
1524 /*
1525 * Now allocate and initialize a cursor for the by-size tree.
1526 */
1527 cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, 0,
1528 0);
1529 /*
1530 * Have both left and right contiguous neighbors.
1531 * Merge all three into a single free block.
1532 */
1533 if (haveleft && haveright) {
1534 /*
1535 * Delete the old by-size entry on the left.
1536 */
1537 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1538 goto error0;
1539 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1540 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1541 goto error0;
1542 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1543 /*
1544 * Delete the old by-size entry on the right.
1545 */
1546 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1547 goto error0;
1548 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1549 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1550 goto error0;
1551 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1552 /*
1553 * Delete the old by-block entry for the right block.
1554 */
1555 if ((error = xfs_alloc_delete(bno_cur, &i)))
1556 goto error0;
1557 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1558 /*
1559 * Move the by-block cursor back to the left neighbor.
1560 */
1561 if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
1562 goto error0;
1563 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1564 #ifdef DEBUG
1565 /*
1566 * Check that this is the right record: delete didn't
1567 * mangle the cursor.
1568 */
1569 {
1570 xfs_agblock_t xxbno;
1571 xfs_extlen_t xxlen;
1572
1573 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1574 &i)))
1575 goto error0;
1576 XFS_WANT_CORRUPTED_GOTO(
1577 i == 1 && xxbno == ltbno && xxlen == ltlen,
1578 error0);
1579 }
1580 #endif
1581 /*
1582 * Update remaining by-block entry to the new, joined block.
1583 */
1584 nbno = ltbno;
1585 nlen = len + ltlen + gtlen;
1586 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1587 goto error0;
1588 }
1589 /*
1590 * Have only a left contiguous neighbor.
1591 * Merge it together with the new freespace.
1592 */
1593 else if (haveleft) {
1594 /*
1595 * Delete the old by-size entry on the left.
1596 */
1597 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1598 goto error0;
1599 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1600 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1601 goto error0;
1602 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1603 /*
1604 * Back up the by-block cursor to the left neighbor, and
1605 * update its length.
1606 */
1607 if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
1608 goto error0;
1609 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1610 nbno = ltbno;
1611 nlen = len + ltlen;
1612 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1613 goto error0;
1614 }
1615 /*
1616 * Have only a right contiguous neighbor.
1617 * Merge it together with the new freespace.
1618 */
1619 else if (haveright) {
1620 /*
1621 * Delete the old by-size entry on the right.
1622 */
1623 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1624 goto error0;
1625 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1626 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1627 goto error0;
1628 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1629 /*
1630 * Update the starting block and length of the right
1631 * neighbor in the by-block tree.
1632 */
1633 nbno = bno;
1634 nlen = len + gtlen;
1635 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1636 goto error0;
1637 }
1638 /*
1639 * No contiguous neighbors.
1640 * Insert the new freespace into the by-block tree.
1641 */
1642 else {
1643 nbno = bno;
1644 nlen = len;
1645 if ((error = xfs_alloc_insert(bno_cur, &i)))
1646 goto error0;
1647 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1648 }
1649 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1650 bno_cur = NULL;
1651 /*
1652 * In all cases we need to insert the new freespace in the by-size tree.
1653 */
1654 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1655 goto error0;
1656 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1657 if ((error = xfs_alloc_insert(cnt_cur, &i)))
1658 goto error0;
1659 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1660 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1661 cnt_cur = NULL;
1662 /*
1663 * Update the freespace totals in the ag and superblock.
1664 */
1665 {
1666 xfs_agf_t *agf;
1667 xfs_perag_t *pag; /* per allocation group data */
1668
1669 agf = XFS_BUF_TO_AGF(agbp);
1670 pag = &mp->m_perag[agno];
1671 INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len);
1672 xfs_trans_agblocks_delta(tp, len);
1673 pag->pagf_freeblks += len;
1674 XFS_WANT_CORRUPTED_GOTO(
1675 INT_GET(agf->agf_freeblks, ARCH_CONVERT)
1676 <= INT_GET(agf->agf_length, ARCH_CONVERT),
1677 error0);
1678 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
1679 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
1680 if (!isfl)
1681 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1682 XFS_STATS_INC(xfsstats.xs_freex);
1683 XFS_STATS_ADD(xfsstats.xs_freeb, len);
1684 }
1685 TRACE_FREE(haveleft ?
1686 (haveright ? "both" : "left") :
1687 (haveright ? "right" : "none"),
1688 agno, bno, len, isfl);
1689
1690 /*
1691 * Since blocks move to the free list without the coordination
1692 * used in xfs_bmap_finish, we can't allow block to be available
1693 * for reallocation and non-transaction writing (user data)
1694 * until we know that the transaction that moved it to the free
1695 * list is permanently on disk. We track the blocks by declaring
1696 * these blocks as "busy"; the busy list is maintained on a per-ag
1697 * basis and each transaction records which entries should be removed
1698 * when the iclog commits to disk. If a busy block is allocated,
1699 * the iclog is pushed up to the LSN that freed the block.
1700 */
1701 xfs_alloc_mark_busy(tp, agno, bno, len);
1702 return 0;
1703
1704 error0:
1705 TRACE_FREE("error", agno, bno, len, isfl);
1706 if (bno_cur)
1707 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1708 if (cnt_cur)
1709 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1710 return error;
1711 }
1712
1713 /*
1714 * Visible (exported) allocation/free functions.
1715 * Some of these are used just by xfs_alloc_btree.c and this file.
1716 */
1717
1718 /*
1719 * Compute and fill in value of m_ag_maxlevels.
1720 */
1721 void
1722 xfs_alloc_compute_maxlevels(
1723 xfs_mount_t *mp) /* file system mount structure */
1724 {
1725 int level;
1726 uint maxblocks;
1727 uint maxleafents;
1728 int minleafrecs;
1729 int minnoderecs;
1730
1731 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1732 minleafrecs = mp->m_alloc_mnr[0];
1733 minnoderecs = mp->m_alloc_mnr[1];
1734 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1735 for (level = 1; maxblocks > 1; level++)
1736 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1737 mp->m_ag_maxlevels = level;
1738 }
1739
1740 /*
1741 * Decide whether to use this allocation group for this allocation.
1742 * If so, fix up the btree freelist's size.
1743 * This is external so mkfs can call it, too.
1744 */
1745 int /* error */
1746 xfs_alloc_fix_freelist(
1747 xfs_alloc_arg_t *args, /* allocation argument structure */
1748 int flags) /* XFS_ALLOC_FLAG_... */
1749 {
1750 xfs_buf_t *agbp; /* agf buffer pointer */
1751 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1752 xfs_buf_t *agflbp;/* agfl buffer pointer */
1753 xfs_agblock_t bno; /* freelist block */
1754 xfs_extlen_t delta; /* new blocks needed in freelist */
1755 int error; /* error result code */
1756 xfs_extlen_t longest;/* longest extent in allocation group */
1757 xfs_mount_t *mp; /* file system mount point structure */
1758 xfs_extlen_t need; /* total blocks needed in freelist */
1759 xfs_perag_t *pag; /* per-ag information structure */
1760 xfs_alloc_arg_t targs; /* local allocation arguments */
1761 xfs_trans_t *tp; /* transaction pointer */
1762
1763 mp = args->mp;
1764
1765 pag = args->pag;
1766 tp = args->tp;
1767 if (!pag->pagf_init) {
1768 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1769 &agbp)))
1770 return error;
1771 if (!pag->pagf_init) {
1772 args->agbp = NULL;
1773 return 0;
1774 }
1775 } else
1776 agbp = NULL;
1777
1778 /* If this is a metadata prefered pag and we are user data
1779 * then try somewhere else if we are not being asked to
1780 * try harder at this point
1781 */
1782 if (pag->pagf_metadata && args->userdata && flags) {
1783 args->agbp = NULL;
1784 return 0;
1785 }
1786
1787 need = XFS_MIN_FREELIST_PAG(pag, mp);
1788 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
1789 /*
1790 * If it looks like there isn't a long enough extent, or enough
1791 * total blocks, reject it.
1792 */
1793 longest = (pag->pagf_longest > delta) ?
1794 (pag->pagf_longest - delta) :
1795 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
1796 if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
1797 (args->minleft &&
1798 (int)(pag->pagf_freeblks + pag->pagf_flcount -
1799 need - args->total) <
1800 (int)args->minleft)) {
1801 if (agbp)
1802 xfs_trans_brelse(tp, agbp);
1803 args->agbp = NULL;
1804 return 0;
1805 }
1806 /*
1807 * Get the a.g. freespace buffer.
1808 * Can fail if we're not blocking on locks, and it's held.
1809 */
1810 if (agbp == NULL) {
1811 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1812 &agbp)))
1813 return error;
1814 if (agbp == NULL) {
1815 args->agbp = NULL;
1816 return 0;
1817 }
1818 }
1819 /*
1820 * Figure out how many blocks we should have in the freelist.
1821 */
1822 agf = XFS_BUF_TO_AGF(agbp);
1823 need = XFS_MIN_FREELIST(agf, mp);
1824 delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ?
1825 (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0;
1826 /*
1827 * If there isn't enough total or single-extent, reject it.
1828 */
1829 longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
1830 longest = (longest > delta) ? (longest - delta) :
1831 (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0);
1832 if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
1833 (args->minleft &&
1834 (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) +
1835 INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) <
1836 (int)args->minleft)) {
1837 xfs_trans_brelse(tp, agbp);
1838 args->agbp = NULL;
1839 return 0;
1840 }
1841 /*
1842 * Make the freelist shorter if it's too long.
1843 */
1844 while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) {
1845 xfs_buf_t *bp;
1846
1847 if ((error = xfs_alloc_get_freelist(tp, agbp, &bno)))
1848 return error;
1849 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1850 return error;
1851 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1852 xfs_trans_binval(tp, bp);
1853 }
1854 /*
1855 * Initialize the args structure.
1856 */
1857 targs.tp = tp;
1858 targs.mp = mp;
1859 targs.agbp = agbp;
1860 targs.agno = args->agno;
1861 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1862 targs.minalignslop = 0;
1863 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1864 targs.type = XFS_ALLOCTYPE_THIS_AG;
1865 targs.pag = pag;
1866 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1867 return error;
1868 /*
1869 * Make the freelist longer if it's too short.
1870 */
1871 while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) {
1872 targs.agbno = 0;
1873 targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT);
1874 /*
1875 * Allocate as many blocks as possible at once.
1876 */
1877 if ((error = xfs_alloc_ag_vextent(&targs)))
1878 return error;
1879 /*
1880 * Stop if we run out. Won't happen if callers are obeying
1881 * the restrictions correctly. Can happen for free calls
1882 * on a completely full ag.
1883 */
1884 if (targs.agbno == NULLAGBLOCK)
1885 break;
1886 /*
1887 * Put each allocated block on the list.
1888 */
1889 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1890 if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp,
1891 bno)))
1892 return error;
1893 }
1894 }
1895 args->agbp = agbp;
1896 return 0;
1897 }
1898
1899 /*
1900 * Get a block from the freelist.
1901 * Returns with the buffer for the block gotten.
1902 */
1903 int /* error */
1904 xfs_alloc_get_freelist(
1905 xfs_trans_t *tp, /* transaction pointer */
1906 xfs_buf_t *agbp, /* buffer containing the agf structure */
1907 xfs_agblock_t *bnop) /* block address retrieved from freelist */
1908 {
1909 xfs_agf_t *agf; /* a.g. freespace structure */
1910 xfs_agfl_t *agfl; /* a.g. freelist structure */
1911 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1912 xfs_agblock_t bno; /* block number returned */
1913 int error;
1914 #ifdef XFS_ALLOC_TRACE
1915 static char fname[] = "xfs_alloc_get_freelist";
1916 #endif
1917 xfs_mount_t *mp; /* mount structure */
1918 xfs_perag_t *pag; /* per allocation group data */
1919
1920 agf = XFS_BUF_TO_AGF(agbp);
1921 /*
1922 * Freelist is empty, give up.
1923 */
1924 if (INT_ISZERO(agf->agf_flcount, ARCH_CONVERT)) {
1925 *bnop = NULLAGBLOCK;
1926 return 0;
1927 }
1928 /*
1929 * Read the array of free blocks.
1930 */
1931 mp = tp->t_mountp;
1932 if ((error = xfs_alloc_read_agfl(mp, tp,
1933 INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
1934 return error;
1935 agfl = XFS_BUF_TO_AGFL(agflbp);
1936 /*
1937 * Get the block number and update the data structures.
1938 */
1939 bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT);
1940 INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1);
1941 xfs_trans_brelse(tp, agflbp);
1942 if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE)
1943 INT_ZERO(agf->agf_flfirst, ARCH_CONVERT);
1944 pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
1945 INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1);
1946 xfs_trans_agflist_delta(tp, -1);
1947 pag->pagf_flcount--;
1948 TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
1949 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
1950 *bnop = bno;
1951
1952 /*
1953 * As blocks are freed, they are added to the per-ag busy list
1954 * and remain there until the freeing transaction is committed to
1955 * disk. Now that we have allocated blocks, this list must be
1956 * searched to see if a block is being reused. If one is, then
1957 * the freeing transaction must be pushed to disk NOW by forcing
1958 * to disk all iclogs up that transaction's LSN.
1959 */
1960 xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
1961 return 0;
1962 }
1963
1964 /*
1965 * Log the given fields from the agf structure.
1966 */
1967 void
1968 xfs_alloc_log_agf(
1969 xfs_trans_t *tp, /* transaction pointer */
1970 xfs_buf_t *bp, /* buffer for a.g. freelist header */
1971 int fields) /* mask of fields to be logged (XFS_AGF_...) */
1972 {
1973 int first; /* first byte offset */
1974 int last; /* last byte offset */
1975 static const short offsets[] = {
1976 offsetof(xfs_agf_t, agf_magicnum),
1977 offsetof(xfs_agf_t, agf_versionnum),
1978 offsetof(xfs_agf_t, agf_seqno),
1979 offsetof(xfs_agf_t, agf_length),
1980 offsetof(xfs_agf_t, agf_roots[0]),
1981 offsetof(xfs_agf_t, agf_levels[0]),
1982 offsetof(xfs_agf_t, agf_flfirst),
1983 offsetof(xfs_agf_t, agf_fllast),
1984 offsetof(xfs_agf_t, agf_flcount),
1985 offsetof(xfs_agf_t, agf_freeblks),
1986 offsetof(xfs_agf_t, agf_longest),
1987 sizeof(xfs_agf_t)
1988 };
1989
1990 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
1991 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
1992 }
1993
1994 /*
1995 * Interface for inode allocation to force the pag data to be initialized.
1996 */
1997 int /* error */
1998 xfs_alloc_pagf_init(
1999 xfs_mount_t *mp, /* file system mount structure */
2000 xfs_trans_t *tp, /* transaction pointer */
2001 xfs_agnumber_t agno, /* allocation group number */
2002 int flags) /* XFS_ALLOC_FLAGS_... */
2003 {
2004 xfs_buf_t *bp;
2005 int error;
2006
2007 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2008 return error;
2009 if (bp)
2010 xfs_trans_brelse(tp, bp);
2011 return 0;
2012 }
2013
2014 /*
2015 * Put the block on the freelist for the allocation group.
2016 */
2017 int /* error */
2018 xfs_alloc_put_freelist(
2019 xfs_trans_t *tp, /* transaction pointer */
2020 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2021 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2022 xfs_agblock_t bno) /* block being freed */
2023 {
2024 xfs_agf_t *agf; /* a.g. freespace structure */
2025 xfs_agfl_t *agfl; /* a.g. free block array */
2026 xfs_agblock_t *blockp;/* pointer to array entry */
2027 int error;
2028 #ifdef XFS_ALLOC_TRACE
2029 static char fname[] = "xfs_alloc_put_freelist";
2030 #endif
2031 xfs_mount_t *mp; /* mount structure */
2032 xfs_perag_t *pag; /* per allocation group data */
2033
2034 agf = XFS_BUF_TO_AGF(agbp);
2035 mp = tp->t_mountp;
2036
2037 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2038 INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
2039 return error;
2040 agfl = XFS_BUF_TO_AGFL(agflbp);
2041 INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1);
2042 if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE)
2043 INT_ZERO(agf->agf_fllast, ARCH_CONVERT);
2044 pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
2045 INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1);
2046 xfs_trans_agflist_delta(tp, 1);
2047 pag->pagf_flcount++;
2048 ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE);
2049 blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)];
2050 INT_SET(*blockp, ARCH_CONVERT, bno);
2051 TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2052 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2053 xfs_trans_log_buf(tp, agflbp,
2054 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2055 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2056 sizeof(xfs_agblock_t) - 1));
2057 /*
2058 * Since blocks move to the free list without the coordination
2059 * used in xfs_bmap_finish, we can't allow block to be available
2060 * for reallocation and non-transaction writing (user data)
2061 * until we know that the transaction that moved it to the free
2062 * list is permanently on disk. We track the blocks by declaring
2063 * these blocks as "busy"; the busy list is maintained on a per-ag
2064 * basis and each transaction records which entries should be removed
2065 * when the iclog commits to disk. If a busy block is allocated,
2066 * the iclog is pushed up to the LSN that freed the block.
2067 */
2068 xfs_alloc_mark_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
2069
2070 return 0;
2071 }
2072
2073 /*
2074 * Read in the allocation group header (free/alloc section).
2075 */
2076 int /* error */
2077 xfs_alloc_read_agf(
2078 xfs_mount_t *mp, /* mount point structure */
2079 xfs_trans_t *tp, /* transaction pointer */
2080 xfs_agnumber_t agno, /* allocation group number */
2081 int flags, /* XFS_ALLOC_FLAG_... */
2082 xfs_buf_t **bpp) /* buffer for the ag freelist header */
2083 {
2084 xfs_agf_t *agf; /* ag freelist header */
2085 int agf_ok; /* set if agf is consistent */
2086 xfs_buf_t *bp; /* return value */
2087 xfs_daddr_t d; /* disk block address */
2088 int error;
2089 xfs_perag_t *pag; /* per allocation group data */
2090
2091 ASSERT(agno != NULLAGNUMBER);
2092 d = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR);
2093 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, 1,
2094 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U,
2095 &bp)))
2096 return error;
2097 ASSERT(!bp || !XFS_BUF_GETERROR(bp));
2098 if (!bp) {
2099 *bpp = NULL;
2100 return 0;
2101 }
2102 /*
2103 * Validate the magic number of the agf block.
2104 */
2105 agf = XFS_BUF_TO_AGF(bp);
2106 agf_ok =
2107 INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC &&
2108 XFS_AGF_GOOD_VERSION(INT_GET(agf->agf_versionnum, ARCH_CONVERT)) &&
2109 INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
2110 INT_GET(agf->agf_length, ARCH_CONVERT) &&
2111 INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE &&
2112 INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE &&
2113 INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE;
2114 if (XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2115 XFS_RANDOM_ALLOC_READ_AGF)) {
2116 xfs_trans_brelse(tp, bp);
2117 #ifdef __KERNEL__ /* additional, temporary, debugging code */
2118 cmn_err(CE_NOTE,
2119 "xfs_alloc_read_agf: error in <%s> AG %d",
2120 mp->m_fsname, agno);
2121 if (INT_GET(agf->agf_magicnum, ARCH_CONVERT) != XFS_AGF_MAGIC)
2122 cmn_err(CE_NOTE, "bad agf_magicnum 0x%x",
2123 INT_GET(agf->agf_magicnum, ARCH_CONVERT));
2124 if (!XFS_AGF_GOOD_VERSION(INT_GET(agf->agf_versionnum, ARCH_CONVERT)))
2125 cmn_err(CE_NOTE, "Bad version number 0x%x",
2126 INT_GET(agf->agf_versionnum, ARCH_CONVERT));
2127 if (!(INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
2128 INT_GET(agf->agf_length, ARCH_CONVERT)))
2129 cmn_err(CE_NOTE, "Bad freeblks %d %d",
2130 INT_GET(agf->agf_freeblks, ARCH_CONVERT),
2131 INT_GET(agf->agf_length, ARCH_CONVERT));
2132 if (!(INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE))
2133 cmn_err(CE_NOTE, "Bad flfirst %d",
2134 INT_GET(agf->agf_flfirst, ARCH_CONVERT));
2135 if (!(INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE))
2136 cmn_err(CE_NOTE, "Bad fllast %d",
2137 INT_GET(agf->agf_fllast, ARCH_CONVERT));
2138 if (!(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE))
2139 cmn_err(CE_NOTE, "Bad flcount %d",
2140 INT_GET(agf->agf_flcount, ARCH_CONVERT));
2141 #endif
2142 return XFS_ERROR(EFSCORRUPTED);
2143 }
2144 pag = &mp->m_perag[agno];
2145 if (!pag->pagf_init) {
2146 pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT);
2147 pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT);
2148 pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
2149 pag->pagf_levels[XFS_BTNUM_BNOi] =
2150 INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT);
2151 pag->pagf_levels[XFS_BTNUM_CNTi] =
2152 INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT);
2153 spinlock_init(&pag->pagb_lock, "xfspagb");
2154 pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
2155 sizeof(xfs_perag_busy_t), KM_SLEEP);
2156 pag->pagf_init = 1;
2157 }
2158 #ifdef DEBUG
2159 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2160 ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT));
2161 ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT));
2162 ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT));
2163 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2164 INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT));
2165 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2166 INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT));
2167 }
2168 #endif
2169 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
2170 *bpp = bp;
2171 return 0;
2172 }
2173
2174 /*
2175 * Allocate an extent (variable-size).
2176 * Depending on the allocation type, we either look in a single allocation
2177 * group or loop over the allocation groups to find the result.
2178 */
2179 int /* error */
2180 xfs_alloc_vextent(
2181 xfs_alloc_arg_t *args) /* allocation argument structure */
2182 {
2183 xfs_agblock_t agsize; /* allocation group size */
2184 int error;
2185 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2186 #ifdef XFS_ALLOC_TRACE
2187 static char fname[] = "xfs_alloc_vextent";
2188 #endif
2189 xfs_extlen_t minleft;/* minimum left value, temp copy */
2190 xfs_mount_t *mp; /* mount structure pointer */
2191 xfs_agnumber_t sagno; /* starting allocation group number */
2192 xfs_alloctype_t type; /* input allocation type */
2193 int bump_rotor = 0;
2194 int no_min = 0;
2195
2196 mp = args->mp;
2197 type = args->otype = args->type;
2198 args->agbno = NULLAGBLOCK;
2199 /*
2200 * Just fix this up, for the case where the last a.g. is shorter
2201 * (or there's only one a.g.) and the caller couldn't easily figure
2202 * that out (xfs_bmap_alloc).
2203 */
2204 agsize = mp->m_sb.sb_agblocks;
2205 if (args->maxlen > agsize)
2206 args->maxlen = agsize;
2207 if (args->alignment == 0)
2208 args->alignment = 1;
2209 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2210 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2211 ASSERT(args->minlen <= args->maxlen);
2212 ASSERT(args->minlen <= agsize);
2213 ASSERT(args->mod < args->prod);
2214 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2215 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2216 args->minlen > args->maxlen || args->minlen > agsize ||
2217 args->mod >= args->prod) {
2218 args->fsbno = NULLFSBLOCK;
2219 TRACE_ALLOC("badargs", args);
2220 return 0;
2221 }
2222 minleft = args->minleft;
2223
2224 switch (type) {
2225 case XFS_ALLOCTYPE_THIS_AG:
2226 case XFS_ALLOCTYPE_NEAR_BNO:
2227 case XFS_ALLOCTYPE_THIS_BNO:
2228 /*
2229 * These three force us into a single a.g.
2230 */
2231 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2232 down_read(&mp->m_peraglock);
2233 args->pag = &mp->m_perag[args->agno];
2234 args->minleft = 0;
2235 error = xfs_alloc_fix_freelist(args, 0);
2236 args->minleft = minleft;
2237 if (error) {
2238 TRACE_ALLOC("nofix", args);
2239 goto error0;
2240 }
2241 if (!args->agbp) {
2242 up_read(&mp->m_peraglock);
2243 TRACE_ALLOC("noagbp", args);
2244 break;
2245 }
2246 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2247 if ((error = xfs_alloc_ag_vextent(args)))
2248 goto error0;
2249 up_read(&mp->m_peraglock);
2250 break;
2251 case XFS_ALLOCTYPE_START_BNO:
2252 /*
2253 * Try near allocation first, then anywhere-in-ag after
2254 * the first a.g. fails.
2255 */
2256 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2257 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2258 args->fsbno = XFS_AGB_TO_FSB(mp, mp->m_agfrotor, 0);
2259 bump_rotor = 1;
2260 }
2261 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2262 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2263 /* FALLTHROUGH */
2264 case XFS_ALLOCTYPE_ANY_AG:
2265 case XFS_ALLOCTYPE_START_AG:
2266 case XFS_ALLOCTYPE_FIRST_AG:
2267 /*
2268 * Rotate through the allocation groups looking for a winner.
2269 */
2270 if (type == XFS_ALLOCTYPE_ANY_AG) {
2271 /*
2272 * Start with the last place we left off.
2273 */
2274 args->agno = sagno = mp->m_agfrotor;
2275 args->type = XFS_ALLOCTYPE_THIS_AG;
2276 flags = XFS_ALLOC_FLAG_TRYLOCK;
2277 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2278 /*
2279 * Start with allocation group given by bno.
2280 */
2281 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2282 args->type = XFS_ALLOCTYPE_THIS_AG;
2283 sagno = 0;
2284 flags = 0;
2285 } else {
2286 if (type == XFS_ALLOCTYPE_START_AG)
2287 args->type = XFS_ALLOCTYPE_THIS_AG;
2288 /*
2289 * Start with the given allocation group.
2290 */
2291 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2292 flags = XFS_ALLOC_FLAG_TRYLOCK;
2293 }
2294 /*
2295 * Loop over allocation groups twice; first time with
2296 * trylock set, second time without.
2297 */
2298 down_read(&mp->m_peraglock);
2299 for (;;) {
2300 args->pag = &mp->m_perag[args->agno];
2301 if (no_min) args->minleft = 0;
2302 error = xfs_alloc_fix_freelist(args, flags);
2303 args->minleft = minleft;
2304 if (error) {
2305 TRACE_ALLOC("nofix", args);
2306 goto error0;
2307 }
2308 /*
2309 * If we get a buffer back then the allocation will fly.
2310 */
2311 if (args->agbp) {
2312 if ((error = xfs_alloc_ag_vextent(args)))
2313 goto error0;
2314 break;
2315 }
2316 TRACE_ALLOC("loopfailed", args);
2317 /*
2318 * Didn't work, figure out the next iteration.
2319 */
2320 if (args->agno == sagno &&
2321 type == XFS_ALLOCTYPE_START_BNO)
2322 args->type = XFS_ALLOCTYPE_THIS_AG;
2323 if (++(args->agno) == mp->m_sb.sb_agcount)
2324 args->agno = 0;
2325 /*
2326 * Reached the starting a.g., must either be done
2327 * or switch to non-trylock mode.
2328 */
2329 if (args->agno == sagno) {
2330 if (no_min == 1) {
2331 args->agbno = NULLAGBLOCK;
2332 TRACE_ALLOC("allfailed", args);
2333 break;
2334 }
2335 if (flags == 0) {
2336 no_min = 1;
2337 } else {
2338 flags = 0;
2339 if (type == XFS_ALLOCTYPE_START_BNO) {
2340 args->agbno = XFS_FSB_TO_AGBNO(mp,
2341 args->fsbno);
2342 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2343 }
2344 }
2345 }
2346 }
2347 up_read(&mp->m_peraglock);
2348 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG))
2349 mp->m_agfrotor = (args->agno + 1) % mp->m_sb.sb_agcount;
2350 break;
2351 default:
2352 ASSERT(0);
2353 /* NOTREACHED */
2354 }
2355 if (args->agbno == NULLAGBLOCK)
2356 args->fsbno = NULLFSBLOCK;
2357 else {
2358 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2359 #ifdef DEBUG
2360 ASSERT(args->len >= args->minlen);
2361 ASSERT(args->len <= args->maxlen);
2362 ASSERT(args->agbno % args->alignment == 0);
2363 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2364 args->len);
2365 #endif
2366 }
2367 return 0;
2368 error0:
2369 up_read(&mp->m_peraglock);
2370 return error;
2371 }
2372
2373 /*
2374 * Free an extent.
2375 * Just break up the extent address and hand off to xfs_free_ag_extent
2376 * after fixing up the freelist.
2377 */
2378 int /* error */
2379 xfs_free_extent(
2380 xfs_trans_t *tp, /* transaction pointer */
2381 xfs_fsblock_t bno, /* starting block number of extent */
2382 xfs_extlen_t len) /* length of extent */
2383 {
2384 #ifdef DEBUG
2385 xfs_agf_t *agf; /* a.g. freespace header */
2386 #endif
2387 xfs_alloc_arg_t args; /* allocation argument structure */
2388 int error;
2389
2390 ASSERT(len != 0);
2391 args.tp = tp;
2392 args.mp = tp->t_mountp;
2393 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2394 ASSERT(args.agno < args.mp->m_sb.sb_agcount);
2395 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2396 args.alignment = 1;
2397 args.minlen = args.minleft = args.minalignslop = 0;
2398 down_read(&args.mp->m_peraglock);
2399 args.pag = &args.mp->m_perag[args.agno];
2400 if ((error = xfs_alloc_fix_freelist(&args, 0)))
2401 goto error0;
2402 #ifdef DEBUG
2403 ASSERT(args.agbp != NULL);
2404 agf = XFS_BUF_TO_AGF(args.agbp);
2405 ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT));
2406 #endif
2407 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
2408 len, 0);
2409 error0:
2410 up_read(&args.mp->m_peraglock);
2411 return error;
2412 }