]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc.c
Update copyright/license notices to match SGI legal prefered boilerplate.
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <xfs.h>
20
21 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
22 #define XFSA_FIXUP_BNO_OK 1
23 #define XFSA_FIXUP_CNT_OK 2
24
25 /*
26 * Compute aligned version of the found extent.
27 * Takes alignment and min length into account.
28 */
29 STATIC int /* success (>= minlen) */
30 xfs_alloc_compute_aligned(
31 xfs_agblock_t foundbno, /* starting block in found extent */
32 xfs_extlen_t foundlen, /* length in found extent */
33 xfs_extlen_t alignment, /* alignment for allocation */
34 xfs_extlen_t minlen, /* minimum length for allocation */
35 xfs_agblock_t *resbno, /* result block number */
36 xfs_extlen_t *reslen) /* result length */
37 {
38 xfs_agblock_t bno;
39 xfs_extlen_t diff;
40 xfs_extlen_t len;
41
42 if (alignment > 1 && foundlen >= minlen) {
43 bno = roundup(foundbno, alignment);
44 diff = bno - foundbno;
45 len = diff >= foundlen ? 0 : foundlen - diff;
46 } else {
47 bno = foundbno;
48 len = foundlen;
49 }
50 *resbno = bno;
51 *reslen = len;
52 return len >= minlen;
53 }
54
55 /*
56 * Compute best start block and diff for "near" allocations.
57 * freelen >= wantlen already checked by caller.
58 */
59 STATIC xfs_extlen_t /* difference value (absolute) */
60 xfs_alloc_compute_diff(
61 xfs_agblock_t wantbno, /* target starting block */
62 xfs_extlen_t wantlen, /* target length */
63 xfs_extlen_t alignment, /* target alignment */
64 xfs_agblock_t freebno, /* freespace's starting block */
65 xfs_extlen_t freelen, /* freespace's length */
66 xfs_agblock_t *newbnop) /* result: best start block from free */
67 {
68 xfs_agblock_t freeend; /* end of freespace extent */
69 xfs_agblock_t newbno1; /* return block number */
70 xfs_agblock_t newbno2; /* other new block number */
71 xfs_extlen_t newlen1=0; /* length with newbno1 */
72 xfs_extlen_t newlen2=0; /* length with newbno2 */
73 xfs_agblock_t wantend; /* end of target extent */
74
75 ASSERT(freelen >= wantlen);
76 freeend = freebno + freelen;
77 wantend = wantbno + wantlen;
78 if (freebno >= wantbno) {
79 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
80 newbno1 = NULLAGBLOCK;
81 } else if (freeend >= wantend && alignment > 1) {
82 newbno1 = roundup(wantbno, alignment);
83 newbno2 = newbno1 - alignment;
84 if (newbno1 >= freeend)
85 newbno1 = NULLAGBLOCK;
86 else
87 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
88 if (newbno2 < freebno)
89 newbno2 = NULLAGBLOCK;
90 else
91 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
92 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
93 if (newlen1 < newlen2 ||
94 (newlen1 == newlen2 &&
95 XFS_ABSDIFF(newbno1, wantbno) >
96 XFS_ABSDIFF(newbno2, wantbno)))
97 newbno1 = newbno2;
98 } else if (newbno2 != NULLAGBLOCK)
99 newbno1 = newbno2;
100 } else if (freeend >= wantend) {
101 newbno1 = wantbno;
102 } else if (alignment > 1) {
103 newbno1 = roundup(freeend - wantlen, alignment);
104 if (newbno1 > freeend - wantlen &&
105 newbno1 - alignment >= freebno)
106 newbno1 -= alignment;
107 else if (newbno1 >= freeend)
108 newbno1 = NULLAGBLOCK;
109 } else
110 newbno1 = freeend - wantlen;
111 *newbnop = newbno1;
112 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
113 }
114
115 /*
116 * Fix up the length, based on mod and prod.
117 * len should be k * prod + mod for some k.
118 * If len is too small it is returned unchanged.
119 * If len hits maxlen it is left alone.
120 */
121 STATIC void
122 xfs_alloc_fix_len(
123 xfs_alloc_arg_t *args) /* allocation argument structure */
124 {
125 xfs_extlen_t k;
126 xfs_extlen_t rlen;
127
128 ASSERT(args->mod < args->prod);
129 rlen = args->len;
130 ASSERT(rlen >= args->minlen);
131 ASSERT(rlen <= args->maxlen);
132 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
133 (args->mod == 0 && rlen < args->prod))
134 return;
135 k = rlen % args->prod;
136 if (k == args->mod)
137 return;
138 if (k > args->mod) {
139 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
140 return;
141 } else {
142 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
143 (int)args->minlen)
144 return;
145 }
146 ASSERT(rlen >= args->minlen);
147 ASSERT(rlen <= args->maxlen);
148 args->len = rlen;
149 }
150
151 /*
152 * Fix up length if there is too little space left in the a.g.
153 * Return 1 if ok, 0 if too little, should give up.
154 */
155 STATIC int
156 xfs_alloc_fix_minleft(
157 xfs_alloc_arg_t *args) /* allocation argument structure */
158 {
159 xfs_agf_t *agf; /* a.g. freelist header */
160 int diff; /* free space difference */
161
162 if (args->minleft == 0)
163 return 1;
164 agf = XFS_BUF_TO_AGF(args->agbp);
165 diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT)
166 + INT_GET(agf->agf_flcount, ARCH_CONVERT)
167 - args->len - args->minleft;
168 if (diff >= 0)
169 return 1;
170 args->len += diff; /* shrink the allocated space */
171 if (args->len >= args->minlen)
172 return 1;
173 args->agbno = NULLAGBLOCK;
174 return 0;
175 }
176
177 /*
178 * Update the two btrees, logically removing from freespace the extent
179 * starting at rbno, rlen blocks. The extent is contained within the
180 * actual (current) free extent fbno for flen blocks.
181 * Flags are passed in indicating whether the cursors are set to the
182 * relevant records.
183 */
184 STATIC int /* error code */
185 xfs_alloc_fixup_trees(
186 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
187 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
188 xfs_agblock_t fbno, /* starting block of free extent */
189 xfs_extlen_t flen, /* length of free extent */
190 xfs_agblock_t rbno, /* starting block of returned extent */
191 xfs_extlen_t rlen, /* length of returned extent */
192 int flags) /* flags, XFSA_FIXUP_... */
193 {
194 int error; /* error code */
195 int i; /* operation results */
196 xfs_agblock_t nfbno1; /* first new free startblock */
197 xfs_agblock_t nfbno2; /* second new free startblock */
198 xfs_extlen_t nflen1=0; /* first new free length */
199 xfs_extlen_t nflen2=0; /* second new free length */
200
201 /*
202 * Look up the record in the by-size tree if necessary.
203 */
204 if (flags & XFSA_FIXUP_CNT_OK) {
205 #ifdef DEBUG
206 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
207 return error;
208 XFS_WANT_CORRUPTED_RETURN(
209 i == 1 && nfbno1 == fbno && nflen1 == flen);
210 #endif
211 } else {
212 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
213 return error;
214 XFS_WANT_CORRUPTED_RETURN(i == 1);
215 }
216 /*
217 * Look up the record in the by-block tree if necessary.
218 */
219 if (flags & XFSA_FIXUP_BNO_OK) {
220 #ifdef DEBUG
221 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
222 return error;
223 XFS_WANT_CORRUPTED_RETURN(
224 i == 1 && nfbno1 == fbno && nflen1 == flen);
225 #endif
226 } else {
227 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
228 return error;
229 XFS_WANT_CORRUPTED_RETURN(i == 1);
230 }
231 #ifdef DEBUG
232 {
233 xfs_alloc_block_t *bnoblock;
234 xfs_alloc_block_t *cntblock;
235
236 if (bno_cur->bc_nlevels == 1 &&
237 cnt_cur->bc_nlevels == 1) {
238 bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
239 cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
240 XFS_WANT_CORRUPTED_RETURN(
241 INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT));
242 }
243 }
244 #endif
245 /*
246 * Deal with all four cases: the allocated record is contained
247 * within the freespace record, so we can have new freespace
248 * at either (or both) end, or no freespace remaining.
249 */
250 if (rbno == fbno && rlen == flen)
251 nfbno1 = nfbno2 = NULLAGBLOCK;
252 else if (rbno == fbno) {
253 nfbno1 = rbno + rlen;
254 nflen1 = flen - rlen;
255 nfbno2 = NULLAGBLOCK;
256 } else if (rbno + rlen == fbno + flen) {
257 nfbno1 = fbno;
258 nflen1 = flen - rlen;
259 nfbno2 = NULLAGBLOCK;
260 } else {
261 nfbno1 = fbno;
262 nflen1 = rbno - fbno;
263 nfbno2 = rbno + rlen;
264 nflen2 = (fbno + flen) - nfbno2;
265 }
266 /*
267 * Delete the entry from the by-size btree.
268 */
269 if ((error = xfs_alloc_delete(cnt_cur, &i)))
270 return error;
271 XFS_WANT_CORRUPTED_RETURN(i == 1);
272 /*
273 * Add new by-size btree entry(s).
274 */
275 if (nfbno1 != NULLAGBLOCK) {
276 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
277 return error;
278 XFS_WANT_CORRUPTED_RETURN(i == 0);
279 if ((error = xfs_alloc_insert(cnt_cur, &i)))
280 return error;
281 XFS_WANT_CORRUPTED_RETURN(i == 1);
282 }
283 if (nfbno2 != NULLAGBLOCK) {
284 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
285 return error;
286 XFS_WANT_CORRUPTED_RETURN(i == 0);
287 if ((error = xfs_alloc_insert(cnt_cur, &i)))
288 return error;
289 XFS_WANT_CORRUPTED_RETURN(i == 1);
290 }
291 /*
292 * Fix up the by-block btree entry(s).
293 */
294 if (nfbno1 == NULLAGBLOCK) {
295 /*
296 * No remaining freespace, just delete the by-block tree entry.
297 */
298 if ((error = xfs_alloc_delete(bno_cur, &i)))
299 return error;
300 XFS_WANT_CORRUPTED_RETURN(i == 1);
301 } else {
302 /*
303 * Update the by-block entry to start later|be shorter.
304 */
305 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
306 return error;
307 }
308 if (nfbno2 != NULLAGBLOCK) {
309 /*
310 * 2 resulting free entries, need to add one.
311 */
312 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
313 return error;
314 XFS_WANT_CORRUPTED_RETURN(i == 0);
315 if ((error = xfs_alloc_insert(bno_cur, &i)))
316 return error;
317 XFS_WANT_CORRUPTED_RETURN(i == 1);
318 }
319 return 0;
320 }
321
322 /*
323 * Read in the allocation group free block array.
324 */
325 STATIC int /* error */
326 xfs_alloc_read_agfl(
327 xfs_mount_t *mp, /* mount point structure */
328 xfs_trans_t *tp, /* transaction pointer */
329 xfs_agnumber_t agno, /* allocation group number */
330 xfs_buf_t **bpp) /* buffer for the ag free block array */
331 {
332 xfs_buf_t *bp; /* return value */
333 int error;
334
335 ASSERT(agno != NULLAGNUMBER);
336 error = xfs_trans_read_buf(
337 mp, tp, mp->m_ddev_targp,
338 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
339 XFS_FSS_TO_BB(mp, 1), 0, &bp);
340 if (error)
341 return error;
342 ASSERT(bp);
343 ASSERT(!XFS_BUF_GETERROR(bp));
344 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
345 *bpp = bp;
346 return 0;
347 }
348
349 #if defined(XFS_ALLOC_TRACE)
350 /*
351 * Add an allocation trace entry for an alloc call.
352 */
353 STATIC void
354 xfs_alloc_trace_alloc(
355 char *name, /* function tag string */
356 char *str, /* additional string */
357 xfs_alloc_arg_t *args, /* allocation argument structure */
358 int line) /* source line number */
359 {
360 ktrace_enter(xfs_alloc_trace_buf,
361 (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
362 (void *)name,
363 (void *)str,
364 (void *)args->mp,
365 (void *)(__psunsigned_t)args->agno,
366 (void *)(__psunsigned_t)args->agbno,
367 (void *)(__psunsigned_t)args->minlen,
368 (void *)(__psunsigned_t)args->maxlen,
369 (void *)(__psunsigned_t)args->mod,
370 (void *)(__psunsigned_t)args->prod,
371 (void *)(__psunsigned_t)args->minleft,
372 (void *)(__psunsigned_t)args->total,
373 (void *)(__psunsigned_t)args->alignment,
374 (void *)(__psunsigned_t)args->len,
375 (void *)((((__psint_t)args->type) << 16) |
376 (__psint_t)args->otype),
377 (void *)(__psint_t)((args->wasdel << 3) |
378 (args->wasfromfl << 2) |
379 (args->isfl << 1) |
380 (args->userdata << 0)));
381 }
382
383 /*
384 * Add an allocation trace entry for a free call.
385 */
386 STATIC void
387 xfs_alloc_trace_free(
388 char *name, /* function tag string */
389 char *str, /* additional string */
390 xfs_mount_t *mp, /* file system mount point */
391 xfs_agnumber_t agno, /* allocation group number */
392 xfs_agblock_t agbno, /* a.g. relative block number */
393 xfs_extlen_t len, /* length of extent */
394 int isfl, /* set if is freelist allocation/free */
395 int line) /* source line number */
396 {
397 ktrace_enter(xfs_alloc_trace_buf,
398 (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
399 (void *)name,
400 (void *)str,
401 (void *)mp,
402 (void *)(__psunsigned_t)agno,
403 (void *)(__psunsigned_t)agbno,
404 (void *)(__psunsigned_t)len,
405 (void *)(__psint_t)isfl,
406 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
407 }
408
409 /*
410 * Add an allocation trace entry for modifying an agf.
411 */
412 STATIC void
413 xfs_alloc_trace_modagf(
414 char *name, /* function tag string */
415 char *str, /* additional string */
416 xfs_mount_t *mp, /* file system mount point */
417 xfs_agf_t *agf, /* new agf value */
418 int flags, /* logging flags for agf */
419 int line) /* source line number */
420 {
421 ktrace_enter(xfs_alloc_trace_buf,
422 (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
423 (void *)name,
424 (void *)str,
425 (void *)mp,
426 (void *)(__psint_t)flags,
427 (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
428 (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
429 (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
430 ARCH_CONVERT),
431 (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
432 ARCH_CONVERT),
433 (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
434 ARCH_CONVERT),
435 (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
436 ARCH_CONVERT),
437 (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
438 (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
439 (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
440 (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT),
441 (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT));
442 }
443 #endif /* XFS_ALLOC_TRACE */
444
445 /*
446 * Allocation group level functions.
447 */
448
449 /*
450 * Allocate a variable extent in the allocation group agno.
451 * Type and bno are used to determine where in the allocation group the
452 * extent will start.
453 * Extent's length (returned in *len) will be between minlen and maxlen,
454 * and of the form k * prod + mod unless there's nothing that large.
455 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
456 */
457 STATIC int /* error */
458 xfs_alloc_ag_vextent(
459 xfs_alloc_arg_t *args) /* argument structure for allocation */
460 {
461 int error=0;
462 #ifdef XFS_ALLOC_TRACE
463 static char fname[] = "xfs_alloc_ag_vextent";
464 #endif
465
466 ASSERT(args->minlen > 0);
467 ASSERT(args->maxlen > 0);
468 ASSERT(args->minlen <= args->maxlen);
469 ASSERT(args->mod < args->prod);
470 ASSERT(args->alignment > 0);
471 /*
472 * Branch to correct routine based on the type.
473 */
474 args->wasfromfl = 0;
475 switch (args->type) {
476 case XFS_ALLOCTYPE_THIS_AG:
477 error = xfs_alloc_ag_vextent_size(args);
478 break;
479 case XFS_ALLOCTYPE_NEAR_BNO:
480 error = xfs_alloc_ag_vextent_near(args);
481 break;
482 case XFS_ALLOCTYPE_THIS_BNO:
483 error = xfs_alloc_ag_vextent_exact(args);
484 break;
485 default:
486 ASSERT(0);
487 /* NOTREACHED */
488 }
489 if (error)
490 return error;
491 /*
492 * If the allocation worked, need to change the agf structure
493 * (and log it), and the superblock.
494 */
495 if (args->agbno != NULLAGBLOCK) {
496 xfs_agf_t *agf; /* allocation group freelist header */
497 #ifdef XFS_ALLOC_TRACE
498 xfs_mount_t *mp = args->mp;
499 #endif
500 long slen = (long)args->len;
501
502 ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
503 ASSERT(!(args->wasfromfl) || !args->isfl);
504 ASSERT(args->agbno % args->alignment == 0);
505 if (!(args->wasfromfl)) {
506
507 agf = XFS_BUF_TO_AGF(args->agbp);
508 INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len));
509 xfs_trans_agblocks_delta(args->tp,
510 -((long)(args->len)));
511 args->pag->pagf_freeblks -= args->len;
512 ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT)
513 <= INT_GET(agf->agf_length, ARCH_CONVERT));
514 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
515 xfs_alloc_log_agf(args->tp, args->agbp,
516 XFS_AGF_FREEBLKS);
517 /* search the busylist for these blocks */
518 xfs_alloc_search_busy(args->tp, args->agno,
519 args->agbno, args->len);
520 }
521 if (!args->isfl)
522 xfs_trans_mod_sb(args->tp,
523 args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
524 XFS_TRANS_SB_FDBLOCKS, -slen);
525 XFS_STATS_INC(xs_allocx);
526 XFS_STATS_ADD(xs_allocb, args->len);
527 }
528 return 0;
529 }
530
531 /*
532 * Allocate a variable extent at exactly agno/bno.
533 * Extent's length (returned in *len) will be between minlen and maxlen,
534 * and of the form k * prod + mod unless there's nothing that large.
535 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
536 */
537 STATIC int /* error */
538 xfs_alloc_ag_vextent_exact(
539 xfs_alloc_arg_t *args) /* allocation argument structure */
540 {
541 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
542 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
543 xfs_agblock_t end; /* end of allocated extent */
544 int error;
545 xfs_agblock_t fbno; /* start block of found extent */
546 xfs_agblock_t fend; /* end block of found extent */
547 xfs_extlen_t flen; /* length of found extent */
548 #ifdef XFS_ALLOC_TRACE
549 static char fname[] = "xfs_alloc_ag_vextent_exact";
550 #endif
551 int i; /* success/failure of operation */
552 xfs_agblock_t maxend; /* end of maximal extent */
553 xfs_agblock_t minend; /* end of minimal extent */
554 xfs_extlen_t rlen; /* length of returned extent */
555
556 ASSERT(args->alignment == 1);
557 /*
558 * Allocate/initialize a cursor for the by-number freespace btree.
559 */
560 bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
561 args->agno, XFS_BTNUM_BNO, NULL, 0);
562 /*
563 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
564 * Look for the closest free block <= bno, it must contain bno
565 * if any free block does.
566 */
567 if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
568 goto error0;
569 if (!i) {
570 /*
571 * Didn't find it, return null.
572 */
573 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
574 args->agbno = NULLAGBLOCK;
575 return 0;
576 }
577 /*
578 * Grab the freespace record.
579 */
580 if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
581 goto error0;
582 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
583 ASSERT(fbno <= args->agbno);
584 minend = args->agbno + args->minlen;
585 maxend = args->agbno + args->maxlen;
586 fend = fbno + flen;
587 /*
588 * Give up if the freespace isn't long enough for the minimum request.
589 */
590 if (fend < minend) {
591 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
592 args->agbno = NULLAGBLOCK;
593 return 0;
594 }
595 /*
596 * End of extent will be smaller of the freespace end and the
597 * maximal requested end.
598 */
599 end = XFS_AGBLOCK_MIN(fend, maxend);
600 /*
601 * Fix the length according to mod and prod if given.
602 */
603 args->len = end - args->agbno;
604 xfs_alloc_fix_len(args);
605 if (!xfs_alloc_fix_minleft(args)) {
606 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
607 return 0;
608 }
609 rlen = args->len;
610 ASSERT(args->agbno + rlen <= fend);
611 end = args->agbno + rlen;
612 /*
613 * We are allocating agbno for rlen [agbno .. end]
614 * Allocate/initialize a cursor for the by-size btree.
615 */
616 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
617 args->agno, XFS_BTNUM_CNT, NULL, 0);
618 ASSERT(args->agbno + args->len <=
619 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
620 ARCH_CONVERT));
621 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
622 args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
623 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
624 goto error0;
625 }
626 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
627 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
628 TRACE_ALLOC("normal", args);
629 args->wasfromfl = 0;
630 return 0;
631
632 error0:
633 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
634 TRACE_ALLOC("error", args);
635 return error;
636 }
637
638 /*
639 * Allocate a variable extent near bno in the allocation group agno.
640 * Extent's length (returned in len) will be between minlen and maxlen,
641 * and of the form k * prod + mod unless there's nothing that large.
642 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
643 */
644 STATIC int /* error */
645 xfs_alloc_ag_vextent_near(
646 xfs_alloc_arg_t *args) /* allocation argument structure */
647 {
648 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
649 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
650 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
651 #ifdef XFS_ALLOC_TRACE
652 static char fname[] = "xfs_alloc_ag_vextent_near";
653 #endif
654 xfs_agblock_t gtbno; /* start bno of right side entry */
655 xfs_agblock_t gtbnoa; /* aligned ... */
656 xfs_extlen_t gtdiff; /* difference to right side entry */
657 xfs_extlen_t gtlen; /* length of right side entry */
658 xfs_extlen_t gtlena; /* aligned ... */
659 xfs_agblock_t gtnew; /* useful start bno of right side */
660 int error; /* error code */
661 int i; /* result code, temporary */
662 int j; /* result code, temporary */
663 xfs_agblock_t ltbno; /* start bno of left side entry */
664 xfs_agblock_t ltbnoa; /* aligned ... */
665 xfs_extlen_t ltdiff; /* difference to left side entry */
666 /*REFERENCED*/
667 xfs_agblock_t ltend; /* end bno of left side entry */
668 xfs_extlen_t ltlen; /* length of left side entry */
669 xfs_extlen_t ltlena; /* aligned ... */
670 xfs_agblock_t ltnew; /* useful start bno of left side */
671 xfs_extlen_t rlen; /* length of returned extent */
672 #if defined(DEBUG) && defined(__KERNEL__)
673 /*
674 * Randomly don't execute the first algorithm.
675 */
676 int dofirst; /* set to do first algorithm */
677
678 dofirst = random() & 1;
679 #endif
680 /*
681 * Get a cursor for the by-size btree.
682 */
683 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
684 args->agno, XFS_BTNUM_CNT, NULL, 0);
685 ltlen = 0;
686 bno_cur_lt = bno_cur_gt = NULL;
687 /*
688 * See if there are any free extents as big as maxlen.
689 */
690 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
691 goto error0;
692 /*
693 * If none, then pick up the last entry in the tree unless the
694 * tree is empty.
695 */
696 if (!i) {
697 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
698 &ltlen, &i)))
699 goto error0;
700 if (i == 0 || ltlen == 0) {
701 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
702 return 0;
703 }
704 ASSERT(i == 1);
705 }
706 args->wasfromfl = 0;
707 /*
708 * First algorithm.
709 * If the requested extent is large wrt the freespaces available
710 * in this a.g., then the cursor will be pointing to a btree entry
711 * near the right edge of the tree. If it's in the last btree leaf
712 * block, then we just examine all the entries in that block
713 * that are big enough, and pick the best one.
714 * This is written as a while loop so we can break out of it,
715 * but we never loop back to the top.
716 */
717 while (xfs_btree_islastblock(cnt_cur, 0)) {
718 xfs_extlen_t bdiff;
719 int besti=0;
720 xfs_extlen_t blen=0;
721 xfs_agblock_t bnew=0;
722
723 #if defined(DEBUG) && defined(__KERNEL__)
724 if (!dofirst)
725 break;
726 #endif
727 /*
728 * Start from the entry that lookup found, sequence through
729 * all larger free blocks. If we're actually pointing at a
730 * record smaller than maxlen, go to the start of this block,
731 * and skip all those smaller than minlen.
732 */
733 if (ltlen || args->alignment > 1) {
734 cnt_cur->bc_ptrs[0] = 1;
735 do {
736 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
737 &ltlen, &i)))
738 goto error0;
739 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
740 if (ltlen >= args->minlen)
741 break;
742 if ((error = xfs_alloc_increment(cnt_cur, 0, &i)))
743 goto error0;
744 } while (i);
745 ASSERT(ltlen >= args->minlen);
746 if (!i)
747 break;
748 }
749 i = cnt_cur->bc_ptrs[0];
750 for (j = 1, blen = 0, bdiff = 0;
751 !error && j && (blen < args->maxlen || bdiff > 0);
752 error = xfs_alloc_increment(cnt_cur, 0, &j)) {
753 /*
754 * For each entry, decide if it's better than
755 * the previous best entry.
756 */
757 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
758 goto error0;
759 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
760 if (!xfs_alloc_compute_aligned(ltbno, ltlen,
761 args->alignment, args->minlen,
762 &ltbnoa, &ltlena))
763 continue;
764 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
765 xfs_alloc_fix_len(args);
766 ASSERT(args->len >= args->minlen);
767 if (args->len < blen)
768 continue;
769 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
770 args->alignment, ltbno, ltlen, &ltnew);
771 if (ltnew != NULLAGBLOCK &&
772 (args->len > blen || ltdiff < bdiff)) {
773 bdiff = ltdiff;
774 bnew = ltnew;
775 blen = args->len;
776 besti = cnt_cur->bc_ptrs[0];
777 }
778 }
779 /*
780 * It didn't work. We COULD be in a case where
781 * there's a good record somewhere, so try again.
782 */
783 if (blen == 0)
784 break;
785 /*
786 * Point at the best entry, and retrieve it again.
787 */
788 cnt_cur->bc_ptrs[0] = besti;
789 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
790 goto error0;
791 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
792 ltend = ltbno + ltlen;
793 ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
794 ARCH_CONVERT));
795 args->len = blen;
796 if (!xfs_alloc_fix_minleft(args)) {
797 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
798 TRACE_ALLOC("nominleft", args);
799 return 0;
800 }
801 blen = args->len;
802 /*
803 * We are allocating starting at bnew for blen blocks.
804 */
805 args->agbno = bnew;
806 ASSERT(bnew >= ltbno);
807 ASSERT(bnew + blen <= ltend);
808 /*
809 * Set up a cursor for the by-bno tree.
810 */
811 bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp,
812 args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0);
813 /*
814 * Fix up the btree entries.
815 */
816 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
817 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
818 goto error0;
819 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
820 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
821 TRACE_ALLOC("first", args);
822 return 0;
823 }
824 /*
825 * Second algorithm.
826 * Search in the by-bno tree to the left and to the right
827 * simultaneously, until in each case we find a space big enough,
828 * or run into the edge of the tree. When we run into the edge,
829 * we deallocate that cursor.
830 * If both searches succeed, we compare the two spaces and pick
831 * the better one.
832 * With alignment, it's possible for both to fail; the upper
833 * level algorithm that picks allocation groups for allocations
834 * is not supposed to do this.
835 */
836 /*
837 * Allocate and initialize the cursor for the leftward search.
838 */
839 bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
840 args->agno, XFS_BTNUM_BNO, NULL, 0);
841 /*
842 * Lookup <= bno to find the leftward search's starting point.
843 */
844 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
845 goto error0;
846 if (!i) {
847 /*
848 * Didn't find anything; use this cursor for the rightward
849 * search.
850 */
851 bno_cur_gt = bno_cur_lt;
852 bno_cur_lt = NULL;
853 }
854 /*
855 * Found something. Duplicate the cursor for the rightward search.
856 */
857 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
858 goto error0;
859 /*
860 * Increment the cursor, so we will point at the entry just right
861 * of the leftward entry if any, or to the leftmost entry.
862 */
863 if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
864 goto error0;
865 if (!i) {
866 /*
867 * It failed, there are no rightward entries.
868 */
869 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
870 bno_cur_gt = NULL;
871 }
872 /*
873 * Loop going left with the leftward cursor, right with the
874 * rightward cursor, until either both directions give up or
875 * we find an entry at least as big as minlen.
876 */
877 do {
878 if (bno_cur_lt) {
879 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
880 goto error0;
881 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
882 if (xfs_alloc_compute_aligned(ltbno, ltlen,
883 args->alignment, args->minlen,
884 &ltbnoa, &ltlena))
885 break;
886 if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i)))
887 goto error0;
888 if (!i) {
889 xfs_btree_del_cursor(bno_cur_lt,
890 XFS_BTREE_NOERROR);
891 bno_cur_lt = NULL;
892 }
893 }
894 if (bno_cur_gt) {
895 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
896 goto error0;
897 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
898 if (xfs_alloc_compute_aligned(gtbno, gtlen,
899 args->alignment, args->minlen,
900 &gtbnoa, &gtlena))
901 break;
902 if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
903 goto error0;
904 if (!i) {
905 xfs_btree_del_cursor(bno_cur_gt,
906 XFS_BTREE_NOERROR);
907 bno_cur_gt = NULL;
908 }
909 }
910 } while (bno_cur_lt || bno_cur_gt);
911 /*
912 * Got both cursors still active, need to find better entry.
913 */
914 if (bno_cur_lt && bno_cur_gt) {
915 /*
916 * Left side is long enough, look for a right side entry.
917 */
918 if (ltlena >= args->minlen) {
919 /*
920 * Fix up the length.
921 */
922 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
923 xfs_alloc_fix_len(args);
924 rlen = args->len;
925 ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
926 args->alignment, ltbno, ltlen, &ltnew);
927 /*
928 * Not perfect.
929 */
930 if (ltdiff) {
931 /*
932 * Look until we find a better one, run out of
933 * space, or run off the end.
934 */
935 while (bno_cur_lt && bno_cur_gt) {
936 if ((error = xfs_alloc_get_rec(
937 bno_cur_gt, &gtbno,
938 &gtlen, &i)))
939 goto error0;
940 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
941 xfs_alloc_compute_aligned(gtbno, gtlen,
942 args->alignment, args->minlen,
943 &gtbnoa, &gtlena);
944 /*
945 * The left one is clearly better.
946 */
947 if (gtbnoa >= args->agbno + ltdiff) {
948 xfs_btree_del_cursor(
949 bno_cur_gt,
950 XFS_BTREE_NOERROR);
951 bno_cur_gt = NULL;
952 break;
953 }
954 /*
955 * If we reach a big enough entry,
956 * compare the two and pick the best.
957 */
958 if (gtlena >= args->minlen) {
959 args->len =
960 XFS_EXTLEN_MIN(gtlena,
961 args->maxlen);
962 xfs_alloc_fix_len(args);
963 rlen = args->len;
964 gtdiff = xfs_alloc_compute_diff(
965 args->agbno, rlen,
966 args->alignment,
967 gtbno, gtlen, &gtnew);
968 /*
969 * Right side is better.
970 */
971 if (gtdiff < ltdiff) {
972 xfs_btree_del_cursor(
973 bno_cur_lt,
974 XFS_BTREE_NOERROR);
975 bno_cur_lt = NULL;
976 }
977 /*
978 * Left side is better.
979 */
980 else {
981 xfs_btree_del_cursor(
982 bno_cur_gt,
983 XFS_BTREE_NOERROR);
984 bno_cur_gt = NULL;
985 }
986 break;
987 }
988 /*
989 * Fell off the right end.
990 */
991 if ((error = xfs_alloc_increment(
992 bno_cur_gt, 0, &i)))
993 goto error0;
994 if (!i) {
995 xfs_btree_del_cursor(
996 bno_cur_gt,
997 XFS_BTREE_NOERROR);
998 bno_cur_gt = NULL;
999 break;
1000 }
1001 }
1002 }
1003 /*
1004 * The left side is perfect, trash the right side.
1005 */
1006 else {
1007 xfs_btree_del_cursor(bno_cur_gt,
1008 XFS_BTREE_NOERROR);
1009 bno_cur_gt = NULL;
1010 }
1011 }
1012 /*
1013 * It's the right side that was found first, look left.
1014 */
1015 else {
1016 /*
1017 * Fix up the length.
1018 */
1019 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1020 xfs_alloc_fix_len(args);
1021 rlen = args->len;
1022 gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
1023 args->alignment, gtbno, gtlen, &gtnew);
1024 /*
1025 * Right side entry isn't perfect.
1026 */
1027 if (gtdiff) {
1028 /*
1029 * Look until we find a better one, run out of
1030 * space, or run off the end.
1031 */
1032 while (bno_cur_lt && bno_cur_gt) {
1033 if ((error = xfs_alloc_get_rec(
1034 bno_cur_lt, &ltbno,
1035 &ltlen, &i)))
1036 goto error0;
1037 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1038 xfs_alloc_compute_aligned(ltbno, ltlen,
1039 args->alignment, args->minlen,
1040 &ltbnoa, &ltlena);
1041 /*
1042 * The right one is clearly better.
1043 */
1044 if (ltbnoa <= args->agbno - gtdiff) {
1045 xfs_btree_del_cursor(
1046 bno_cur_lt,
1047 XFS_BTREE_NOERROR);
1048 bno_cur_lt = NULL;
1049 break;
1050 }
1051 /*
1052 * If we reach a big enough entry,
1053 * compare the two and pick the best.
1054 */
1055 if (ltlena >= args->minlen) {
1056 args->len = XFS_EXTLEN_MIN(
1057 ltlena, args->maxlen);
1058 xfs_alloc_fix_len(args);
1059 rlen = args->len;
1060 ltdiff = xfs_alloc_compute_diff(
1061 args->agbno, rlen,
1062 args->alignment,
1063 ltbno, ltlen, &ltnew);
1064 /*
1065 * Left side is better.
1066 */
1067 if (ltdiff < gtdiff) {
1068 xfs_btree_del_cursor(
1069 bno_cur_gt,
1070 XFS_BTREE_NOERROR);
1071 bno_cur_gt = NULL;
1072 }
1073 /*
1074 * Right side is better.
1075 */
1076 else {
1077 xfs_btree_del_cursor(
1078 bno_cur_lt,
1079 XFS_BTREE_NOERROR);
1080 bno_cur_lt = NULL;
1081 }
1082 break;
1083 }
1084 /*
1085 * Fell off the left end.
1086 */
1087 if ((error = xfs_alloc_decrement(
1088 bno_cur_lt, 0, &i)))
1089 goto error0;
1090 if (!i) {
1091 xfs_btree_del_cursor(bno_cur_lt,
1092 XFS_BTREE_NOERROR);
1093 bno_cur_lt = NULL;
1094 break;
1095 }
1096 }
1097 }
1098 /*
1099 * The right side is perfect, trash the left side.
1100 */
1101 else {
1102 xfs_btree_del_cursor(bno_cur_lt,
1103 XFS_BTREE_NOERROR);
1104 bno_cur_lt = NULL;
1105 }
1106 }
1107 }
1108 /*
1109 * If we couldn't get anything, give up.
1110 */
1111 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1112 TRACE_ALLOC("neither", args);
1113 args->agbno = NULLAGBLOCK;
1114 return 0;
1115 }
1116 /*
1117 * At this point we have selected a freespace entry, either to the
1118 * left or to the right. If it's on the right, copy all the
1119 * useful variables to the "left" set so we only have one
1120 * copy of this code.
1121 */
1122 if (bno_cur_gt) {
1123 bno_cur_lt = bno_cur_gt;
1124 bno_cur_gt = NULL;
1125 ltbno = gtbno;
1126 ltbnoa = gtbnoa;
1127 ltlen = gtlen;
1128 ltlena = gtlena;
1129 j = 1;
1130 } else
1131 j = 0;
1132 /*
1133 * Fix up the length and compute the useful address.
1134 */
1135 ltend = ltbno + ltlen;
1136 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1137 xfs_alloc_fix_len(args);
1138 if (!xfs_alloc_fix_minleft(args)) {
1139 TRACE_ALLOC("nominleft", args);
1140 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1141 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1142 return 0;
1143 }
1144 rlen = args->len;
1145 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
1146 ltlen, &ltnew);
1147 ASSERT(ltnew >= ltbno);
1148 ASSERT(ltnew + rlen <= ltend);
1149 ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1150 ARCH_CONVERT));
1151 args->agbno = ltnew;
1152 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1153 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1154 goto error0;
1155 TRACE_ALLOC(j ? "gt" : "lt", args);
1156 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1157 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1158 return 0;
1159
1160 error0:
1161 TRACE_ALLOC("error", args);
1162 if (cnt_cur != NULL)
1163 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1164 if (bno_cur_lt != NULL)
1165 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1166 if (bno_cur_gt != NULL)
1167 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1168 return error;
1169 }
1170
1171 /*
1172 * Allocate a variable extent anywhere in the allocation group agno.
1173 * Extent's length (returned in len) will be between minlen and maxlen,
1174 * and of the form k * prod + mod unless there's nothing that large.
1175 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1176 */
1177 STATIC int /* error */
1178 xfs_alloc_ag_vextent_size(
1179 xfs_alloc_arg_t *args) /* allocation argument structure */
1180 {
1181 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1182 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1183 int error; /* error result */
1184 xfs_agblock_t fbno; /* start of found freespace */
1185 xfs_extlen_t flen; /* length of found freespace */
1186 #ifdef XFS_ALLOC_TRACE
1187 static char fname[] = "xfs_alloc_ag_vextent_size";
1188 #endif
1189 int i; /* temp status variable */
1190 xfs_agblock_t rbno; /* returned block number */
1191 xfs_extlen_t rlen; /* length of returned extent */
1192
1193 /*
1194 * Allocate and initialize a cursor for the by-size btree.
1195 */
1196 cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
1197 args->agno, XFS_BTNUM_CNT, NULL, 0);
1198 bno_cur = NULL;
1199 /*
1200 * Look for an entry >= maxlen+alignment-1 blocks.
1201 */
1202 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1203 args->maxlen + args->alignment - 1, &i)))
1204 goto error0;
1205 /*
1206 * If none, then pick up the last entry in the tree unless the
1207 * tree is empty.
1208 */
1209 if (!i) {
1210 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno,
1211 &flen, &i)))
1212 goto error0;
1213 if (i == 0 || flen == 0) {
1214 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1215 TRACE_ALLOC("noentry", args);
1216 return 0;
1217 }
1218 ASSERT(i == 1);
1219 }
1220 /*
1221 * There's a freespace as big as maxlen+alignment-1, get it.
1222 */
1223 else {
1224 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i)))
1225 goto error0;
1226 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1227 }
1228 /*
1229 * In the first case above, we got the last entry in the
1230 * by-size btree. Now we check to see if the space hits maxlen
1231 * once aligned; if not, we search left for something better.
1232 * This can't happen in the second case above.
1233 */
1234 xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
1235 &rbno, &rlen);
1236 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1237 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1238 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1239 if (rlen < args->maxlen) {
1240 xfs_agblock_t bestfbno;
1241 xfs_extlen_t bestflen;
1242 xfs_agblock_t bestrbno;
1243 xfs_extlen_t bestrlen;
1244
1245 bestrlen = rlen;
1246 bestrbno = rbno;
1247 bestflen = flen;
1248 bestfbno = fbno;
1249 for (;;) {
1250 if ((error = xfs_alloc_decrement(cnt_cur, 0, &i)))
1251 goto error0;
1252 if (i == 0)
1253 break;
1254 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1255 &i)))
1256 goto error0;
1257 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1258 if (flen < bestrlen)
1259 break;
1260 xfs_alloc_compute_aligned(fbno, flen, args->alignment,
1261 args->minlen, &rbno, &rlen);
1262 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1263 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1264 (rlen <= flen && rbno + rlen <= fbno + flen),
1265 error0);
1266 if (rlen > bestrlen) {
1267 bestrlen = rlen;
1268 bestrbno = rbno;
1269 bestflen = flen;
1270 bestfbno = fbno;
1271 if (rlen == args->maxlen)
1272 break;
1273 }
1274 }
1275 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1276 &i)))
1277 goto error0;
1278 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1279 rlen = bestrlen;
1280 rbno = bestrbno;
1281 flen = bestflen;
1282 fbno = bestfbno;
1283 }
1284 args->wasfromfl = 0;
1285 /*
1286 * Fix up the length.
1287 */
1288 args->len = rlen;
1289 xfs_alloc_fix_len(args);
1290 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
1291 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1292 TRACE_ALLOC("nominleft", args);
1293 args->agbno = NULLAGBLOCK;
1294 return 0;
1295 }
1296 rlen = args->len;
1297 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1298 /*
1299 * Allocate and initialize a cursor for the by-block tree.
1300 */
1301 bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
1302 args->agno, XFS_BTNUM_BNO, NULL, 0);
1303 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1304 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1305 goto error0;
1306 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1307 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1308 cnt_cur = bno_cur = NULL;
1309 args->len = rlen;
1310 args->agbno = rbno;
1311 XFS_WANT_CORRUPTED_GOTO(
1312 args->agbno + args->len <=
1313 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1314 ARCH_CONVERT),
1315 error0);
1316 TRACE_ALLOC("normal", args);
1317 return 0;
1318
1319 error0:
1320 TRACE_ALLOC("error", args);
1321 if (cnt_cur)
1322 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1323 if (bno_cur)
1324 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1325 return error;
1326 }
1327
1328 /*
1329 * Deal with the case where only small freespaces remain.
1330 * Either return the contents of the last freespace record,
1331 * or allocate space from the freelist if there is nothing in the tree.
1332 */
1333 STATIC int /* error */
1334 xfs_alloc_ag_vextent_small(
1335 xfs_alloc_arg_t *args, /* allocation argument structure */
1336 xfs_btree_cur_t *ccur, /* by-size cursor */
1337 xfs_agblock_t *fbnop, /* result block number */
1338 xfs_extlen_t *flenp, /* result length */
1339 int *stat) /* status: 0-freelist, 1-normal/none */
1340 {
1341 int error;
1342 xfs_agblock_t fbno;
1343 xfs_extlen_t flen;
1344 #ifdef XFS_ALLOC_TRACE
1345 static char fname[] = "xfs_alloc_ag_vextent_small";
1346 #endif
1347 int i;
1348
1349 if ((error = xfs_alloc_decrement(ccur, 0, &i)))
1350 goto error0;
1351 if (i) {
1352 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1353 goto error0;
1354 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1355 }
1356 /*
1357 * Nothing in the btree, try the freelist. Make sure
1358 * to respect minleft even when pulling from the
1359 * freelist.
1360 */
1361 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1362 (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount,
1363 ARCH_CONVERT) > args->minleft)) {
1364 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno);
1365 if (error)
1366 goto error0;
1367 if (fbno != NULLAGBLOCK) {
1368 if (args->userdata) {
1369 xfs_buf_t *bp;
1370
1371 bp = xfs_btree_get_bufs(args->mp, args->tp,
1372 args->agno, fbno, 0);
1373 xfs_trans_binval(args->tp, bp);
1374 }
1375 args->len = 1;
1376 args->agbno = fbno;
1377 XFS_WANT_CORRUPTED_GOTO(
1378 args->agbno + args->len <=
1379 INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
1380 ARCH_CONVERT),
1381 error0);
1382 args->wasfromfl = 1;
1383 TRACE_ALLOC("freelist", args);
1384 *stat = 0;
1385 return 0;
1386 }
1387 /*
1388 * Nothing in the freelist.
1389 */
1390 else
1391 flen = 0;
1392 }
1393 /*
1394 * Can't allocate from the freelist for some reason.
1395 */
1396 else
1397 flen = 0;
1398 /*
1399 * Can't do the allocation, give up.
1400 */
1401 if (flen < args->minlen) {
1402 args->agbno = NULLAGBLOCK;
1403 TRACE_ALLOC("notenough", args);
1404 flen = 0;
1405 }
1406 *fbnop = fbno;
1407 *flenp = flen;
1408 *stat = 1;
1409 TRACE_ALLOC("normal", args);
1410 return 0;
1411
1412 error0:
1413 TRACE_ALLOC("error", args);
1414 return error;
1415 }
1416
1417 /*
1418 * Free the extent starting at agno/bno for length.
1419 */
1420 STATIC int /* error */
1421 xfs_free_ag_extent(
1422 xfs_trans_t *tp, /* transaction pointer */
1423 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1424 xfs_agnumber_t agno, /* allocation group number */
1425 xfs_agblock_t bno, /* starting block number */
1426 xfs_extlen_t len, /* length of extent */
1427 int isfl) /* set if is freelist blocks - no sb acctg */
1428 {
1429 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1430 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1431 int error; /* error return value */
1432 #ifdef XFS_ALLOC_TRACE
1433 static char fname[] = "xfs_free_ag_extent";
1434 #endif
1435 xfs_agblock_t gtbno; /* start of right neighbor block */
1436 xfs_extlen_t gtlen; /* length of right neighbor block */
1437 int haveleft; /* have a left neighbor block */
1438 int haveright; /* have a right neighbor block */
1439 int i; /* temp, result code */
1440 xfs_agblock_t ltbno; /* start of left neighbor block */
1441 xfs_extlen_t ltlen; /* length of left neighbor block */
1442 xfs_mount_t *mp; /* mount point struct for filesystem */
1443 xfs_agblock_t nbno; /* new starting block of freespace */
1444 xfs_extlen_t nlen; /* new length of freespace */
1445
1446 mp = tp->t_mountp;
1447 /*
1448 * Allocate and initialize a cursor for the by-block btree.
1449 */
1450 bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL,
1451 0);
1452 cnt_cur = NULL;
1453 /*
1454 * Look for a neighboring block on the left (lower block numbers)
1455 * that is contiguous with this space.
1456 */
1457 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1458 goto error0;
1459 if (haveleft) {
1460 /*
1461 * There is a block to our left.
1462 */
1463 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1464 goto error0;
1465 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1466 /*
1467 * It's not contiguous, though.
1468 */
1469 if (ltbno + ltlen < bno)
1470 haveleft = 0;
1471 else {
1472 /*
1473 * If this failure happens the request to free this
1474 * space was invalid, it's (partly) already free.
1475 * Very bad.
1476 */
1477 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1478 }
1479 }
1480 /*
1481 * Look for a neighboring block on the right (higher block numbers)
1482 * that is contiguous with this space.
1483 */
1484 if ((error = xfs_alloc_increment(bno_cur, 0, &haveright)))
1485 goto error0;
1486 if (haveright) {
1487 /*
1488 * There is a block to our right.
1489 */
1490 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1491 goto error0;
1492 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1493 /*
1494 * It's not contiguous, though.
1495 */
1496 if (bno + len < gtbno)
1497 haveright = 0;
1498 else {
1499 /*
1500 * If this failure happens the request to free this
1501 * space was invalid, it's (partly) already free.
1502 * Very bad.
1503 */
1504 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1505 }
1506 }
1507 /*
1508 * Now allocate and initialize a cursor for the by-size tree.
1509 */
1510 cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL,
1511 0);
1512 /*
1513 * Have both left and right contiguous neighbors.
1514 * Merge all three into a single free block.
1515 */
1516 if (haveleft && haveright) {
1517 /*
1518 * Delete the old by-size entry on the left.
1519 */
1520 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1521 goto error0;
1522 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1523 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1524 goto error0;
1525 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1526 /*
1527 * Delete the old by-size entry on the right.
1528 */
1529 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1530 goto error0;
1531 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1532 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1533 goto error0;
1534 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1535 /*
1536 * Delete the old by-block entry for the right block.
1537 */
1538 if ((error = xfs_alloc_delete(bno_cur, &i)))
1539 goto error0;
1540 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1541 /*
1542 * Move the by-block cursor back to the left neighbor.
1543 */
1544 if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
1545 goto error0;
1546 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1547 #ifdef DEBUG
1548 /*
1549 * Check that this is the right record: delete didn't
1550 * mangle the cursor.
1551 */
1552 {
1553 xfs_agblock_t xxbno;
1554 xfs_extlen_t xxlen;
1555
1556 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1557 &i)))
1558 goto error0;
1559 XFS_WANT_CORRUPTED_GOTO(
1560 i == 1 && xxbno == ltbno && xxlen == ltlen,
1561 error0);
1562 }
1563 #endif
1564 /*
1565 * Update remaining by-block entry to the new, joined block.
1566 */
1567 nbno = ltbno;
1568 nlen = len + ltlen + gtlen;
1569 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1570 goto error0;
1571 }
1572 /*
1573 * Have only a left contiguous neighbor.
1574 * Merge it together with the new freespace.
1575 */
1576 else if (haveleft) {
1577 /*
1578 * Delete the old by-size entry on the left.
1579 */
1580 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1581 goto error0;
1582 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1583 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1584 goto error0;
1585 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1586 /*
1587 * Back up the by-block cursor to the left neighbor, and
1588 * update its length.
1589 */
1590 if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
1591 goto error0;
1592 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1593 nbno = ltbno;
1594 nlen = len + ltlen;
1595 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1596 goto error0;
1597 }
1598 /*
1599 * Have only a right contiguous neighbor.
1600 * Merge it together with the new freespace.
1601 */
1602 else if (haveright) {
1603 /*
1604 * Delete the old by-size entry on the right.
1605 */
1606 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1607 goto error0;
1608 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1609 if ((error = xfs_alloc_delete(cnt_cur, &i)))
1610 goto error0;
1611 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1612 /*
1613 * Update the starting block and length of the right
1614 * neighbor in the by-block tree.
1615 */
1616 nbno = bno;
1617 nlen = len + gtlen;
1618 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1619 goto error0;
1620 }
1621 /*
1622 * No contiguous neighbors.
1623 * Insert the new freespace into the by-block tree.
1624 */
1625 else {
1626 nbno = bno;
1627 nlen = len;
1628 if ((error = xfs_alloc_insert(bno_cur, &i)))
1629 goto error0;
1630 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1631 }
1632 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1633 bno_cur = NULL;
1634 /*
1635 * In all cases we need to insert the new freespace in the by-size tree.
1636 */
1637 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1638 goto error0;
1639 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1640 if ((error = xfs_alloc_insert(cnt_cur, &i)))
1641 goto error0;
1642 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1643 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1644 cnt_cur = NULL;
1645 /*
1646 * Update the freespace totals in the ag and superblock.
1647 */
1648 {
1649 xfs_agf_t *agf;
1650 xfs_perag_t *pag; /* per allocation group data */
1651
1652 agf = XFS_BUF_TO_AGF(agbp);
1653 pag = &mp->m_perag[agno];
1654 INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len);
1655 xfs_trans_agblocks_delta(tp, len);
1656 pag->pagf_freeblks += len;
1657 XFS_WANT_CORRUPTED_GOTO(
1658 INT_GET(agf->agf_freeblks, ARCH_CONVERT)
1659 <= INT_GET(agf->agf_length, ARCH_CONVERT),
1660 error0);
1661 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
1662 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
1663 if (!isfl)
1664 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1665 XFS_STATS_INC(xs_freex);
1666 XFS_STATS_ADD(xs_freeb, len);
1667 }
1668 TRACE_FREE(haveleft ?
1669 (haveright ? "both" : "left") :
1670 (haveright ? "right" : "none"),
1671 agno, bno, len, isfl);
1672
1673 /*
1674 * Since blocks move to the free list without the coordination
1675 * used in xfs_bmap_finish, we can't allow block to be available
1676 * for reallocation and non-transaction writing (user data)
1677 * until we know that the transaction that moved it to the free
1678 * list is permanently on disk. We track the blocks by declaring
1679 * these blocks as "busy"; the busy list is maintained on a per-ag
1680 * basis and each transaction records which entries should be removed
1681 * when the iclog commits to disk. If a busy block is allocated,
1682 * the iclog is pushed up to the LSN that freed the block.
1683 */
1684 xfs_alloc_mark_busy(tp, agno, bno, len);
1685 return 0;
1686
1687 error0:
1688 TRACE_FREE("error", agno, bno, len, isfl);
1689 if (bno_cur)
1690 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1691 if (cnt_cur)
1692 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1693 return error;
1694 }
1695
1696 /*
1697 * Visible (exported) allocation/free functions.
1698 * Some of these are used just by xfs_alloc_btree.c and this file.
1699 */
1700
1701 /*
1702 * Compute and fill in value of m_ag_maxlevels.
1703 */
1704 void
1705 xfs_alloc_compute_maxlevels(
1706 xfs_mount_t *mp) /* file system mount structure */
1707 {
1708 int level;
1709 uint maxblocks;
1710 uint maxleafents;
1711 int minleafrecs;
1712 int minnoderecs;
1713
1714 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1715 minleafrecs = mp->m_alloc_mnr[0];
1716 minnoderecs = mp->m_alloc_mnr[1];
1717 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1718 for (level = 1; maxblocks > 1; level++)
1719 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1720 mp->m_ag_maxlevels = level;
1721 }
1722
1723 /*
1724 * Decide whether to use this allocation group for this allocation.
1725 * If so, fix up the btree freelist's size.
1726 * This is external so mkfs can call it, too.
1727 */
1728 int /* error */
1729 xfs_alloc_fix_freelist(
1730 xfs_alloc_arg_t *args, /* allocation argument structure */
1731 int flags) /* XFS_ALLOC_FLAG_... */
1732 {
1733 xfs_buf_t *agbp; /* agf buffer pointer */
1734 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1735 xfs_buf_t *agflbp;/* agfl buffer pointer */
1736 xfs_agblock_t bno; /* freelist block */
1737 xfs_extlen_t delta; /* new blocks needed in freelist */
1738 int error; /* error result code */
1739 xfs_extlen_t longest;/* longest extent in allocation group */
1740 xfs_mount_t *mp; /* file system mount point structure */
1741 xfs_extlen_t need; /* total blocks needed in freelist */
1742 xfs_perag_t *pag; /* per-ag information structure */
1743 xfs_alloc_arg_t targs; /* local allocation arguments */
1744 xfs_trans_t *tp; /* transaction pointer */
1745
1746 mp = args->mp;
1747
1748 pag = args->pag;
1749 tp = args->tp;
1750 if (!pag->pagf_init) {
1751 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1752 &agbp)))
1753 return error;
1754 if (!pag->pagf_init) {
1755 args->agbp = NULL;
1756 return 0;
1757 }
1758 } else
1759 agbp = NULL;
1760
1761 /* If this is a metadata prefered pag and we are user data
1762 * then try somewhere else if we are not being asked to
1763 * try harder at this point
1764 */
1765 if (pag->pagf_metadata && args->userdata && flags) {
1766 args->agbp = NULL;
1767 return 0;
1768 }
1769
1770 need = XFS_MIN_FREELIST_PAG(pag, mp);
1771 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
1772 /*
1773 * If it looks like there isn't a long enough extent, or enough
1774 * total blocks, reject it.
1775 */
1776 longest = (pag->pagf_longest > delta) ?
1777 (pag->pagf_longest - delta) :
1778 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
1779 if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
1780 (args->minleft &&
1781 (int)(pag->pagf_freeblks + pag->pagf_flcount -
1782 need - args->total) <
1783 (int)args->minleft)) {
1784 if (agbp)
1785 xfs_trans_brelse(tp, agbp);
1786 args->agbp = NULL;
1787 return 0;
1788 }
1789 /*
1790 * Get the a.g. freespace buffer.
1791 * Can fail if we're not blocking on locks, and it's held.
1792 */
1793 if (agbp == NULL) {
1794 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1795 &agbp)))
1796 return error;
1797 if (agbp == NULL) {
1798 args->agbp = NULL;
1799 return 0;
1800 }
1801 }
1802 /*
1803 * Figure out how many blocks we should have in the freelist.
1804 */
1805 agf = XFS_BUF_TO_AGF(agbp);
1806 need = XFS_MIN_FREELIST(agf, mp);
1807 delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ?
1808 (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0;
1809 /*
1810 * If there isn't enough total or single-extent, reject it.
1811 */
1812 longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
1813 longest = (longest > delta) ? (longest - delta) :
1814 (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0);
1815 if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
1816 (args->minleft &&
1817 (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) +
1818 INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) <
1819 (int)args->minleft)) {
1820 xfs_trans_brelse(tp, agbp);
1821 args->agbp = NULL;
1822 return 0;
1823 }
1824 /*
1825 * Make the freelist shorter if it's too long.
1826 */
1827 while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) {
1828 xfs_buf_t *bp;
1829
1830 if ((error = xfs_alloc_get_freelist(tp, agbp, &bno)))
1831 return error;
1832 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1833 return error;
1834 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1835 xfs_trans_binval(tp, bp);
1836 }
1837 /*
1838 * Initialize the args structure.
1839 */
1840 targs.tp = tp;
1841 targs.mp = mp;
1842 targs.agbp = agbp;
1843 targs.agno = args->agno;
1844 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1845 targs.minalignslop = 0;
1846 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1847 targs.type = XFS_ALLOCTYPE_THIS_AG;
1848 targs.pag = pag;
1849 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1850 return error;
1851 /*
1852 * Make the freelist longer if it's too short.
1853 */
1854 while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) {
1855 targs.agbno = 0;
1856 targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT);
1857 /*
1858 * Allocate as many blocks as possible at once.
1859 */
1860 if ((error = xfs_alloc_ag_vextent(&targs)))
1861 return error;
1862 /*
1863 * Stop if we run out. Won't happen if callers are obeying
1864 * the restrictions correctly. Can happen for free calls
1865 * on a completely full ag.
1866 */
1867 if (targs.agbno == NULLAGBLOCK)
1868 break;
1869 /*
1870 * Put each allocated block on the list.
1871 */
1872 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1873 if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp,
1874 bno)))
1875 return error;
1876 }
1877 }
1878 args->agbp = agbp;
1879 return 0;
1880 }
1881
1882 /*
1883 * Get a block from the freelist.
1884 * Returns with the buffer for the block gotten.
1885 */
1886 int /* error */
1887 xfs_alloc_get_freelist(
1888 xfs_trans_t *tp, /* transaction pointer */
1889 xfs_buf_t *agbp, /* buffer containing the agf structure */
1890 xfs_agblock_t *bnop) /* block address retrieved from freelist */
1891 {
1892 xfs_agf_t *agf; /* a.g. freespace structure */
1893 xfs_agfl_t *agfl; /* a.g. freelist structure */
1894 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1895 xfs_agblock_t bno; /* block number returned */
1896 int error;
1897 #ifdef XFS_ALLOC_TRACE
1898 static char fname[] = "xfs_alloc_get_freelist";
1899 #endif
1900 xfs_mount_t *mp; /* mount structure */
1901 xfs_perag_t *pag; /* per allocation group data */
1902
1903 agf = XFS_BUF_TO_AGF(agbp);
1904 /*
1905 * Freelist is empty, give up.
1906 */
1907 if (!agf->agf_flcount) {
1908 *bnop = NULLAGBLOCK;
1909 return 0;
1910 }
1911 /*
1912 * Read the array of free blocks.
1913 */
1914 mp = tp->t_mountp;
1915 if ((error = xfs_alloc_read_agfl(mp, tp,
1916 INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
1917 return error;
1918 agfl = XFS_BUF_TO_AGFL(agflbp);
1919 /*
1920 * Get the block number and update the data structures.
1921 */
1922 bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT);
1923 INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1);
1924 xfs_trans_brelse(tp, agflbp);
1925 if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
1926 agf->agf_flfirst = 0;
1927 pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
1928 INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1);
1929 xfs_trans_agflist_delta(tp, -1);
1930 pag->pagf_flcount--;
1931 TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
1932 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
1933 *bnop = bno;
1934
1935 /*
1936 * As blocks are freed, they are added to the per-ag busy list
1937 * and remain there until the freeing transaction is committed to
1938 * disk. Now that we have allocated blocks, this list must be
1939 * searched to see if a block is being reused. If one is, then
1940 * the freeing transaction must be pushed to disk NOW by forcing
1941 * to disk all iclogs up that transaction's LSN.
1942 */
1943 xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
1944 return 0;
1945 }
1946
1947 /*
1948 * Log the given fields from the agf structure.
1949 */
1950 void
1951 xfs_alloc_log_agf(
1952 xfs_trans_t *tp, /* transaction pointer */
1953 xfs_buf_t *bp, /* buffer for a.g. freelist header */
1954 int fields) /* mask of fields to be logged (XFS_AGF_...) */
1955 {
1956 int first; /* first byte offset */
1957 int last; /* last byte offset */
1958 static const short offsets[] = {
1959 offsetof(xfs_agf_t, agf_magicnum),
1960 offsetof(xfs_agf_t, agf_versionnum),
1961 offsetof(xfs_agf_t, agf_seqno),
1962 offsetof(xfs_agf_t, agf_length),
1963 offsetof(xfs_agf_t, agf_roots[0]),
1964 offsetof(xfs_agf_t, agf_levels[0]),
1965 offsetof(xfs_agf_t, agf_flfirst),
1966 offsetof(xfs_agf_t, agf_fllast),
1967 offsetof(xfs_agf_t, agf_flcount),
1968 offsetof(xfs_agf_t, agf_freeblks),
1969 offsetof(xfs_agf_t, agf_longest),
1970 sizeof(xfs_agf_t)
1971 };
1972
1973 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
1974 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
1975 }
1976
1977 /*
1978 * Interface for inode allocation to force the pag data to be initialized.
1979 */
1980 int /* error */
1981 xfs_alloc_pagf_init(
1982 xfs_mount_t *mp, /* file system mount structure */
1983 xfs_trans_t *tp, /* transaction pointer */
1984 xfs_agnumber_t agno, /* allocation group number */
1985 int flags) /* XFS_ALLOC_FLAGS_... */
1986 {
1987 xfs_buf_t *bp;
1988 int error;
1989
1990 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
1991 return error;
1992 if (bp)
1993 xfs_trans_brelse(tp, bp);
1994 return 0;
1995 }
1996
1997 /*
1998 * Put the block on the freelist for the allocation group.
1999 */
2000 int /* error */
2001 xfs_alloc_put_freelist(
2002 xfs_trans_t *tp, /* transaction pointer */
2003 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2004 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2005 xfs_agblock_t bno) /* block being freed */
2006 {
2007 xfs_agf_t *agf; /* a.g. freespace structure */
2008 xfs_agfl_t *agfl; /* a.g. free block array */
2009 xfs_agblock_t *blockp;/* pointer to array entry */
2010 int error;
2011 #ifdef XFS_ALLOC_TRACE
2012 static char fname[] = "xfs_alloc_put_freelist";
2013 #endif
2014 xfs_mount_t *mp; /* mount structure */
2015 xfs_perag_t *pag; /* per allocation group data */
2016
2017 agf = XFS_BUF_TO_AGF(agbp);
2018 mp = tp->t_mountp;
2019
2020 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2021 INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
2022 return error;
2023 agfl = XFS_BUF_TO_AGFL(agflbp);
2024 INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1);
2025 if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
2026 agf->agf_fllast = 0;
2027 pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
2028 INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1);
2029 xfs_trans_agflist_delta(tp, 1);
2030 pag->pagf_flcount++;
2031 ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp));
2032 blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)];
2033 INT_SET(*blockp, ARCH_CONVERT, bno);
2034 TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2035 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2036 xfs_trans_log_buf(tp, agflbp,
2037 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2038 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2039 sizeof(xfs_agblock_t) - 1));
2040 return 0;
2041 }
2042
2043 /*
2044 * Read in the allocation group header (free/alloc section).
2045 */
2046 int /* error */
2047 xfs_alloc_read_agf(
2048 xfs_mount_t *mp, /* mount point structure */
2049 xfs_trans_t *tp, /* transaction pointer */
2050 xfs_agnumber_t agno, /* allocation group number */
2051 int flags, /* XFS_ALLOC_FLAG_... */
2052 xfs_buf_t **bpp) /* buffer for the ag freelist header */
2053 {
2054 xfs_agf_t *agf; /* ag freelist header */
2055 int agf_ok; /* set if agf is consistent */
2056 xfs_buf_t *bp; /* return value */
2057 xfs_perag_t *pag; /* per allocation group data */
2058 int error;
2059
2060 ASSERT(agno != NULLAGNUMBER);
2061 error = xfs_trans_read_buf(
2062 mp, tp, mp->m_ddev_targp,
2063 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2064 XFS_FSS_TO_BB(mp, 1),
2065 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U,
2066 &bp);
2067 if (error)
2068 return error;
2069 ASSERT(!bp || !XFS_BUF_GETERROR(bp));
2070 if (!bp) {
2071 *bpp = NULL;
2072 return 0;
2073 }
2074 /*
2075 * Validate the magic number of the agf block.
2076 */
2077 agf = XFS_BUF_TO_AGF(bp);
2078 agf_ok =
2079 INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC &&
2080 XFS_AGF_GOOD_VERSION(
2081 INT_GET(agf->agf_versionnum, ARCH_CONVERT)) &&
2082 INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
2083 INT_GET(agf->agf_length, ARCH_CONVERT) &&
2084 INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
2085 INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
2086 INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp);
2087 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2088 XFS_RANDOM_ALLOC_READ_AGF))) {
2089 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2090 XFS_ERRLEVEL_LOW, mp, agf);
2091 xfs_trans_brelse(tp, bp);
2092 return XFS_ERROR(EFSCORRUPTED);
2093 }
2094 pag = &mp->m_perag[agno];
2095 if (!pag->pagf_init) {
2096 pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT);
2097 pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT);
2098 pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
2099 pag->pagf_levels[XFS_BTNUM_BNOi] =
2100 INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT);
2101 pag->pagf_levels[XFS_BTNUM_CNTi] =
2102 INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT);
2103 spinlock_init(&pag->pagb_lock, "xfspagb");
2104 pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
2105 sizeof(xfs_perag_busy_t), KM_SLEEP);
2106 pag->pagf_init = 1;
2107 }
2108 #ifdef DEBUG
2109 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2110 ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT));
2111 ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT));
2112 ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT));
2113 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2114 INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT));
2115 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2116 INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT));
2117 }
2118 #endif
2119 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
2120 *bpp = bp;
2121 return 0;
2122 }
2123
2124 /*
2125 * Allocate an extent (variable-size).
2126 * Depending on the allocation type, we either look in a single allocation
2127 * group or loop over the allocation groups to find the result.
2128 */
2129 int /* error */
2130 xfs_alloc_vextent(
2131 xfs_alloc_arg_t *args) /* allocation argument structure */
2132 {
2133 xfs_agblock_t agsize; /* allocation group size */
2134 int error;
2135 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2136 #ifdef XFS_ALLOC_TRACE
2137 static char fname[] = "xfs_alloc_vextent";
2138 #endif
2139 xfs_extlen_t minleft;/* minimum left value, temp copy */
2140 xfs_mount_t *mp; /* mount structure pointer */
2141 xfs_agnumber_t sagno; /* starting allocation group number */
2142 xfs_alloctype_t type; /* input allocation type */
2143 int bump_rotor = 0;
2144 int no_min = 0;
2145 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2146
2147 mp = args->mp;
2148 type = args->otype = args->type;
2149 args->agbno = NULLAGBLOCK;
2150 /*
2151 * Just fix this up, for the case where the last a.g. is shorter
2152 * (or there's only one a.g.) and the caller couldn't easily figure
2153 * that out (xfs_bmap_alloc).
2154 */
2155 agsize = mp->m_sb.sb_agblocks;
2156 if (args->maxlen > agsize)
2157 args->maxlen = agsize;
2158 if (args->alignment == 0)
2159 args->alignment = 1;
2160 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2161 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2162 ASSERT(args->minlen <= args->maxlen);
2163 ASSERT(args->minlen <= agsize);
2164 ASSERT(args->mod < args->prod);
2165 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2166 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2167 args->minlen > args->maxlen || args->minlen > agsize ||
2168 args->mod >= args->prod) {
2169 args->fsbno = NULLFSBLOCK;
2170 TRACE_ALLOC("badargs", args);
2171 return 0;
2172 }
2173 minleft = args->minleft;
2174
2175 switch (type) {
2176 case XFS_ALLOCTYPE_THIS_AG:
2177 case XFS_ALLOCTYPE_NEAR_BNO:
2178 case XFS_ALLOCTYPE_THIS_BNO:
2179 /*
2180 * These three force us into a single a.g.
2181 */
2182 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2183 down_read(&mp->m_peraglock);
2184 args->pag = &mp->m_perag[args->agno];
2185 args->minleft = 0;
2186 error = xfs_alloc_fix_freelist(args, 0);
2187 args->minleft = minleft;
2188 if (error) {
2189 TRACE_ALLOC("nofix", args);
2190 goto error0;
2191 }
2192 if (!args->agbp) {
2193 up_read(&mp->m_peraglock);
2194 TRACE_ALLOC("noagbp", args);
2195 break;
2196 }
2197 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2198 if ((error = xfs_alloc_ag_vextent(args)))
2199 goto error0;
2200 up_read(&mp->m_peraglock);
2201 break;
2202 case XFS_ALLOCTYPE_START_BNO:
2203 /*
2204 * Try near allocation first, then anywhere-in-ag after
2205 * the first a.g. fails.
2206 */
2207 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2208 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2209 args->fsbno = XFS_AGB_TO_FSB(mp,
2210 ((mp->m_agfrotor / rotorstep) %
2211 mp->m_sb.sb_agcount), 0);
2212 bump_rotor = 1;
2213 }
2214 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2215 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2216 /* FALLTHROUGH */
2217 case XFS_ALLOCTYPE_ANY_AG:
2218 case XFS_ALLOCTYPE_START_AG:
2219 case XFS_ALLOCTYPE_FIRST_AG:
2220 /*
2221 * Rotate through the allocation groups looking for a winner.
2222 */
2223 if (type == XFS_ALLOCTYPE_ANY_AG) {
2224 /*
2225 * Start with the last place we left off.
2226 */
2227 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2228 mp->m_sb.sb_agcount;
2229 args->type = XFS_ALLOCTYPE_THIS_AG;
2230 flags = XFS_ALLOC_FLAG_TRYLOCK;
2231 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2232 /*
2233 * Start with allocation group given by bno.
2234 */
2235 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2236 args->type = XFS_ALLOCTYPE_THIS_AG;
2237 sagno = 0;
2238 flags = 0;
2239 } else {
2240 if (type == XFS_ALLOCTYPE_START_AG)
2241 args->type = XFS_ALLOCTYPE_THIS_AG;
2242 /*
2243 * Start with the given allocation group.
2244 */
2245 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2246 flags = XFS_ALLOC_FLAG_TRYLOCK;
2247 }
2248 /*
2249 * Loop over allocation groups twice; first time with
2250 * trylock set, second time without.
2251 */
2252 down_read(&mp->m_peraglock);
2253 for (;;) {
2254 args->pag = &mp->m_perag[args->agno];
2255 if (no_min) args->minleft = 0;
2256 error = xfs_alloc_fix_freelist(args, flags);
2257 args->minleft = minleft;
2258 if (error) {
2259 TRACE_ALLOC("nofix", args);
2260 goto error0;
2261 }
2262 /*
2263 * If we get a buffer back then the allocation will fly.
2264 */
2265 if (args->agbp) {
2266 if ((error = xfs_alloc_ag_vextent(args)))
2267 goto error0;
2268 break;
2269 }
2270 TRACE_ALLOC("loopfailed", args);
2271 /*
2272 * Didn't work, figure out the next iteration.
2273 */
2274 if (args->agno == sagno &&
2275 type == XFS_ALLOCTYPE_START_BNO)
2276 args->type = XFS_ALLOCTYPE_THIS_AG;
2277 if (++(args->agno) == mp->m_sb.sb_agcount)
2278 args->agno = 0;
2279 /*
2280 * Reached the starting a.g., must either be done
2281 * or switch to non-trylock mode.
2282 */
2283 if (args->agno == sagno) {
2284 if (no_min == 1) {
2285 args->agbno = NULLAGBLOCK;
2286 TRACE_ALLOC("allfailed", args);
2287 break;
2288 }
2289 if (flags == 0) {
2290 no_min = 1;
2291 } else {
2292 flags = 0;
2293 if (type == XFS_ALLOCTYPE_START_BNO) {
2294 args->agbno = XFS_FSB_TO_AGBNO(mp,
2295 args->fsbno);
2296 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2297 }
2298 }
2299 }
2300 }
2301 up_read(&mp->m_peraglock);
2302 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2303 if (args->agno == sagno)
2304 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2305 (mp->m_sb.sb_agcount * rotorstep);
2306 else
2307 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2308 (mp->m_sb.sb_agcount * rotorstep);
2309 }
2310 break;
2311 default:
2312 ASSERT(0);
2313 /* NOTREACHED */
2314 }
2315 if (args->agbno == NULLAGBLOCK)
2316 args->fsbno = NULLFSBLOCK;
2317 else {
2318 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2319 #ifdef DEBUG
2320 ASSERT(args->len >= args->minlen);
2321 ASSERT(args->len <= args->maxlen);
2322 ASSERT(args->agbno % args->alignment == 0);
2323 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2324 args->len);
2325 #endif
2326 }
2327 return 0;
2328 error0:
2329 up_read(&mp->m_peraglock);
2330 return error;
2331 }
2332
2333 /*
2334 * Free an extent.
2335 * Just break up the extent address and hand off to xfs_free_ag_extent
2336 * after fixing up the freelist.
2337 */
2338 int /* error */
2339 xfs_free_extent(
2340 xfs_trans_t *tp, /* transaction pointer */
2341 xfs_fsblock_t bno, /* starting block number of extent */
2342 xfs_extlen_t len) /* length of extent */
2343 {
2344 #ifdef DEBUG
2345 xfs_agf_t *agf; /* a.g. freespace header */
2346 #endif
2347 xfs_alloc_arg_t args; /* allocation argument structure */
2348 int error;
2349
2350 ASSERT(len != 0);
2351 args.tp = tp;
2352 args.mp = tp->t_mountp;
2353 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2354 ASSERT(args.agno < args.mp->m_sb.sb_agcount);
2355 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2356 args.alignment = 1;
2357 args.minlen = args.minleft = args.minalignslop = 0;
2358 down_read(&args.mp->m_peraglock);
2359 args.pag = &args.mp->m_perag[args.agno];
2360 if ((error = xfs_alloc_fix_freelist(&args, 0)))
2361 goto error0;
2362 #ifdef DEBUG
2363 ASSERT(args.agbp != NULL);
2364 agf = XFS_BUF_TO_AGF(args.agbp);
2365 ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT));
2366 #endif
2367 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
2368 len, 0);
2369 error0:
2370 up_read(&args.mp->m_peraglock);
2371 return error;
2372 }