]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/xfs_alloc.c
xfs: detect agfl count corruption and reset agfl
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc.c
CommitLineData
2bd0ea18 1/*
5e656dbb 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
da23017d 3 * All Rights Reserved.
5000d01d 4 *
da23017d
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
2bd0ea18 7 * published by the Free Software Foundation.
5000d01d 8 *
da23017d
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
5000d01d 13 *
da23017d
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2bd0ea18 17 */
9c799827 18#include "libxfs_priv.h"
b626fb59
DC
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_shared.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
f944d3d0 27#include "xfs_defer.h"
b626fb59
DC
28#include "xfs_inode.h"
29#include "xfs_btree.h"
631ac87a 30#include "xfs_rmap.h"
b626fb59
DC
31#include "xfs_alloc_btree.h"
32#include "xfs_alloc.h"
56d3fc2b 33#include "xfs_errortag.h"
b626fb59
DC
34#include "xfs_cksum.h"
35#include "xfs_trace.h"
36#include "xfs_trans.h"
cf8ce220 37#include "xfs_ag_resv.h"
2bd0ea18 38
ff105f75
DC
39struct workqueue_struct *xfs_alloc_wq;
40
2bd0ea18 41#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
5e656dbb
BN
42
43#define XFSA_FIXUP_BNO_OK 1
44#define XFSA_FIXUP_CNT_OK 2
45
5e656dbb
BN
46STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
47STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
48STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
49STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
a2ceac1f 50 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
2bd0ea18 51
b8165508
DC
52/*
53 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
54 * the beginning of the block for a proper header with the location information
55 * and CRC.
56 */
57unsigned int
58xfs_agfl_size(
59 struct xfs_mount *mp)
60{
61 unsigned int size = mp->m_sb.sb_sectsize;
62
63 if (xfs_sb_version_hascrc(&mp->m_sb))
64 size -= sizeof(struct xfs_agfl);
65
66 return size / sizeof(xfs_agblock_t);
67}
68
2a96beb9
DW
69unsigned int
70xfs_refc_block(
71 struct xfs_mount *mp)
72{
73 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_sb_version_hasfinobt(&mp->m_sb))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78}
79
ef5340cd
DW
80xfs_extlen_t
81xfs_prealloc_blocks(
82 struct xfs_mount *mp)
83{
2a96beb9
DW
84 if (xfs_sb_version_hasreflink(&mp->m_sb))
85 return xfs_refc_block(mp) + 1;
ef5340cd
DW
86 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
87 return XFS_RMAP_BLOCK(mp) + 1;
88 if (xfs_sb_version_hasfinobt(&mp->m_sb))
89 return XFS_FIBT_BLOCK(mp) + 1;
90 return XFS_IBT_BLOCK(mp) + 1;
91}
92
b8a8d6e5
DW
93/*
94 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
95 * AGF buffer (PV 947395), we place constraints on the relationship among
96 * actual allocations for data blocks, freelist blocks, and potential file data
97 * bmap btree blocks. However, these restrictions may result in no actual space
98 * allocated for a delayed extent, for example, a data block in a certain AG is
99 * allocated but there is no additional block for the additional bmap btree
100 * block due to a split of the bmap btree of the file. The result of this may
101 * lead to an infinite loop when the file gets flushed to disk and all delayed
102 * extents need to be actually allocated. To get around this, we explicitly set
103 * aside a few blocks which will not be reserved in delayed allocation.
104 *
cf8ce220
DW
105 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
106 * potential split of the file's bmap btree.
b8a8d6e5
DW
107 */
108unsigned int
109xfs_alloc_set_aside(
110 struct xfs_mount *mp)
111{
8eeb15ea 112 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
b8a8d6e5
DW
113}
114
115/*
116 * When deciding how much space to allocate out of an AG, we limit the
117 * allocation maximum size to the size the AG. However, we cannot use all the
118 * blocks in the AG - some are permanently used by metadata. These
119 * blocks are generally:
120 * - the AG superblock, AGF, AGI and AGFL
121 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
122 * the AGI free inode and rmap btree root blocks.
123 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
124 * - the rmapbt root block
125 *
126 * The AG headers are sector sized, so the amount of space they take up is
127 * dependent on filesystem geometry. The others are all single blocks.
128 */
129unsigned int
130xfs_alloc_ag_max_usable(
131 struct xfs_mount *mp)
132{
133 unsigned int blocks;
134
135 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
136 blocks += XFS_ALLOC_AGFL_RESERVE;
137 blocks += 3; /* AGF, AGI btree root blocks */
138 if (xfs_sb_version_hasfinobt(&mp->m_sb))
139 blocks++; /* finobt root block */
140 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
141 blocks++; /* rmap root block */
868c70e3
DW
142 if (xfs_sb_version_hasreflink(&mp->m_sb))
143 blocks++; /* refcount root block */
b8a8d6e5
DW
144
145 return mp->m_sb.sb_agblocks - blocks;
146}
147
b194c7d8
BN
148/*
149 * Lookup the record equal to [bno, len] in the btree given by cur.
150 */
151STATIC int /* error */
152xfs_alloc_lookup_eq(
153 struct xfs_btree_cur *cur, /* btree cursor */
154 xfs_agblock_t bno, /* starting block of extent */
155 xfs_extlen_t len, /* length of extent */
156 int *stat) /* success/failure */
157{
158 cur->bc_rec.a.ar_startblock = bno;
159 cur->bc_rec.a.ar_blockcount = len;
160 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
161}
162
163/*
164 * Lookup the first record greater than or equal to [bno, len]
165 * in the btree given by cur.
166 */
a2ceac1f 167int /* error */
b194c7d8
BN
168xfs_alloc_lookup_ge(
169 struct xfs_btree_cur *cur, /* btree cursor */
170 xfs_agblock_t bno, /* starting block of extent */
171 xfs_extlen_t len, /* length of extent */
172 int *stat) /* success/failure */
173{
174 cur->bc_rec.a.ar_startblock = bno;
175 cur->bc_rec.a.ar_blockcount = len;
176 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
177}
178
179/*
180 * Lookup the first record less than or equal to [bno, len]
181 * in the btree given by cur.
182 */
1fe41a73 183int /* error */
b194c7d8
BN
184xfs_alloc_lookup_le(
185 struct xfs_btree_cur *cur, /* btree cursor */
186 xfs_agblock_t bno, /* starting block of extent */
187 xfs_extlen_t len, /* length of extent */
188 int *stat) /* success/failure */
189{
190 cur->bc_rec.a.ar_startblock = bno;
191 cur->bc_rec.a.ar_blockcount = len;
192 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
193}
194
195/*
196 * Update the record referred to by cur to the value given
197 * by [bno, len].
198 * This either works (return 0) or gets an EFSCORRUPTED error.
199 */
200STATIC int /* error */
201xfs_alloc_update(
202 struct xfs_btree_cur *cur, /* btree cursor */
203 xfs_agblock_t bno, /* starting block of extent */
204 xfs_extlen_t len) /* length of extent */
205{
206 union xfs_btree_rec rec;
207
208 rec.alloc.ar_startblock = cpu_to_be32(bno);
209 rec.alloc.ar_blockcount = cpu_to_be32(len);
210 return xfs_btree_update(cur, &rec);
211}
212
213/*
214 * Get the data from the pointed-to record.
215 */
a2ceac1f 216int /* error */
b194c7d8
BN
217xfs_alloc_get_rec(
218 struct xfs_btree_cur *cur, /* btree cursor */
219 xfs_agblock_t *bno, /* output: starting block of extent */
220 xfs_extlen_t *len, /* output: length of extent */
221 int *stat) /* output: success/failure */
222{
223 union xfs_btree_rec *rec;
224 int error;
225
226 error = xfs_btree_get_rec(cur, &rec, stat);
227 if (!error && *stat == 1) {
228 *bno = be32_to_cpu(rec->alloc.ar_startblock);
229 *len = be32_to_cpu(rec->alloc.ar_blockcount);
230 }
231 return error;
232}
233
2bd0ea18
NS
234/*
235 * Compute aligned version of the found extent.
236 * Takes alignment and min length into account.
237 */
cd80de04 238STATIC bool
2bd0ea18 239xfs_alloc_compute_aligned(
a2ceac1f 240 xfs_alloc_arg_t *args, /* allocation argument structure */
2bd0ea18
NS
241 xfs_agblock_t foundbno, /* starting block in found extent */
242 xfs_extlen_t foundlen, /* length in found extent */
2bd0ea18 243 xfs_agblock_t *resbno, /* result block number */
cd80de04
CH
244 xfs_extlen_t *reslen, /* result length */
245 unsigned *busy_gen)
2bd0ea18 246{
cd80de04
CH
247 xfs_agblock_t bno = foundbno;
248 xfs_extlen_t len = foundlen;
ff3263dd 249 xfs_extlen_t diff;
cd80de04 250 bool busy;
2bd0ea18 251
a2ceac1f 252 /* Trim busy sections out of found extent */
cd80de04 253 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
a2ceac1f 254
ff3263dd
BF
255 /*
256 * If we have a largish extent that happens to start before min_agbno,
257 * see if we can shift it into range...
258 */
259 if (bno < args->min_agbno && bno + len > args->min_agbno) {
260 diff = args->min_agbno - bno;
261 if (len > diff) {
262 bno += diff;
263 len -= diff;
264 }
265 }
266
a2ceac1f
DC
267 if (args->alignment > 1 && len >= args->minlen) {
268 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
ff3263dd
BF
269
270 diff = aligned_bno - bno;
a2ceac1f
DC
271
272 *resbno = aligned_bno;
273 *reslen = diff >= len ? 0 : len - diff;
2bd0ea18 274 } else {
a2ceac1f
DC
275 *resbno = bno;
276 *reslen = len;
2bd0ea18 277 }
cd80de04
CH
278
279 return busy;
2bd0ea18
NS
280}
281
282/*
283 * Compute best start block and diff for "near" allocations.
284 * freelen >= wantlen already checked by caller.
285 */
286STATIC xfs_extlen_t /* difference value (absolute) */
287xfs_alloc_compute_diff(
288 xfs_agblock_t wantbno, /* target starting block */
289 xfs_extlen_t wantlen, /* target length */
290 xfs_extlen_t alignment, /* target alignment */
1fccd5c8 291 int datatype, /* are we allocating data? */
2bd0ea18
NS
292 xfs_agblock_t freebno, /* freespace's starting block */
293 xfs_extlen_t freelen, /* freespace's length */
294 xfs_agblock_t *newbnop) /* result: best start block from free */
295{
296 xfs_agblock_t freeend; /* end of freespace extent */
297 xfs_agblock_t newbno1; /* return block number */
298 xfs_agblock_t newbno2; /* other new block number */
0e266570
NS
299 xfs_extlen_t newlen1=0; /* length with newbno1 */
300 xfs_extlen_t newlen2=0; /* length with newbno2 */
2bd0ea18 301 xfs_agblock_t wantend; /* end of target extent */
1fccd5c8 302 bool userdata = xfs_alloc_is_userdata(datatype);
2bd0ea18
NS
303
304 ASSERT(freelen >= wantlen);
305 freeend = freebno + freelen;
306 wantend = wantbno + wantlen;
84a62eea
DC
307 /*
308 * We want to allocate from the start of a free extent if it is past
309 * the desired block or if we are allocating user data and the free
310 * extent is before desired block. The second case is there to allow
311 * for contiguous allocation from the remaining free space if the file
312 * grows in the short term.
313 */
314 if (freebno >= wantbno || (userdata && freeend < wantend)) {
2bd0ea18
NS
315 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
316 newbno1 = NULLAGBLOCK;
317 } else if (freeend >= wantend && alignment > 1) {
318 newbno1 = roundup(wantbno, alignment);
319 newbno2 = newbno1 - alignment;
320 if (newbno1 >= freeend)
321 newbno1 = NULLAGBLOCK;
322 else
323 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
324 if (newbno2 < freebno)
325 newbno2 = NULLAGBLOCK;
326 else
327 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
328 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
329 if (newlen1 < newlen2 ||
330 (newlen1 == newlen2 &&
331 XFS_ABSDIFF(newbno1, wantbno) >
332 XFS_ABSDIFF(newbno2, wantbno)))
333 newbno1 = newbno2;
334 } else if (newbno2 != NULLAGBLOCK)
335 newbno1 = newbno2;
336 } else if (freeend >= wantend) {
337 newbno1 = wantbno;
338 } else if (alignment > 1) {
339 newbno1 = roundup(freeend - wantlen, alignment);
340 if (newbno1 > freeend - wantlen &&
341 newbno1 - alignment >= freebno)
342 newbno1 -= alignment;
343 else if (newbno1 >= freeend)
344 newbno1 = NULLAGBLOCK;
345 } else
346 newbno1 = freeend - wantlen;
347 *newbnop = newbno1;
348 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
349}
350
351/*
352 * Fix up the length, based on mod and prod.
353 * len should be k * prod + mod for some k.
354 * If len is too small it is returned unchanged.
355 * If len hits maxlen it is left alone.
356 */
357STATIC void
358xfs_alloc_fix_len(
dfc130f3 359 xfs_alloc_arg_t *args) /* allocation argument structure */
2bd0ea18
NS
360{
361 xfs_extlen_t k;
362 xfs_extlen_t rlen;
363
364 ASSERT(args->mod < args->prod);
365 rlen = args->len;
366 ASSERT(rlen >= args->minlen);
367 ASSERT(rlen <= args->maxlen);
368 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
369 (args->mod == 0 && rlen < args->prod))
370 return;
371 k = rlen % args->prod;
372 if (k == args->mod)
373 return;
ff105f75
DC
374 if (k > args->mod)
375 rlen = rlen - (k - args->mod);
376 else
377 rlen = rlen - args->prod + (args->mod - k);
19ebedcf 378 /* casts to (int) catch length underflows */
ff105f75
DC
379 if ((int)rlen < (int)args->minlen)
380 return;
381 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
382 ASSERT(rlen % args->prod == args->mod);
2c003dc2
CH
383 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
384 rlen + args->minleft);
2bd0ea18
NS
385 args->len = rlen;
386}
387
2bd0ea18
NS
388/*
389 * Update the two btrees, logically removing from freespace the extent
390 * starting at rbno, rlen blocks. The extent is contained within the
391 * actual (current) free extent fbno for flen blocks.
392 * Flags are passed in indicating whether the cursors are set to the
393 * relevant records.
394 */
395STATIC int /* error code */
396xfs_alloc_fixup_trees(
dfc130f3
RC
397 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
398 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
2bd0ea18
NS
399 xfs_agblock_t fbno, /* starting block of free extent */
400 xfs_extlen_t flen, /* length of free extent */
401 xfs_agblock_t rbno, /* starting block of returned extent */
402 xfs_extlen_t rlen, /* length of returned extent */
403 int flags) /* flags, XFSA_FIXUP_... */
404{
405 int error; /* error code */
406 int i; /* operation results */
407 xfs_agblock_t nfbno1; /* first new free startblock */
408 xfs_agblock_t nfbno2; /* second new free startblock */
0e266570
NS
409 xfs_extlen_t nflen1=0; /* first new free length */
410 xfs_extlen_t nflen2=0; /* second new free length */
19ebedcf
DC
411 struct xfs_mount *mp;
412
413 mp = cnt_cur->bc_mp;
2bd0ea18
NS
414
415 /*
416 * Look up the record in the by-size tree if necessary.
417 */
418 if (flags & XFSA_FIXUP_CNT_OK) {
419#ifdef DEBUG
0e266570 420 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
2bd0ea18 421 return error;
19ebedcf 422 XFS_WANT_CORRUPTED_RETURN(mp,
2bd0ea18
NS
423 i == 1 && nfbno1 == fbno && nflen1 == flen);
424#endif
425 } else {
0e266570 426 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
2bd0ea18 427 return error;
19ebedcf 428 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
429 }
430 /*
431 * Look up the record in the by-block tree if necessary.
432 */
433 if (flags & XFSA_FIXUP_BNO_OK) {
434#ifdef DEBUG
0e266570 435 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
2bd0ea18 436 return error;
19ebedcf 437 XFS_WANT_CORRUPTED_RETURN(mp,
2bd0ea18
NS
438 i == 1 && nfbno1 == fbno && nflen1 == flen);
439#endif
440 } else {
0e266570 441 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
2bd0ea18 442 return error;
19ebedcf 443 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18 444 }
b3563c19 445
2bd0ea18 446#ifdef DEBUG
b3563c19
BN
447 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
448 struct xfs_btree_block *bnoblock;
449 struct xfs_btree_block *cntblock;
450
451 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
452 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
2bd0ea18 453
19ebedcf 454 XFS_WANT_CORRUPTED_RETURN(mp,
b3563c19 455 bnoblock->bb_numrecs == cntblock->bb_numrecs);
2bd0ea18
NS
456 }
457#endif
b3563c19 458
2bd0ea18
NS
459 /*
460 * Deal with all four cases: the allocated record is contained
461 * within the freespace record, so we can have new freespace
462 * at either (or both) end, or no freespace remaining.
463 */
464 if (rbno == fbno && rlen == flen)
465 nfbno1 = nfbno2 = NULLAGBLOCK;
466 else if (rbno == fbno) {
467 nfbno1 = rbno + rlen;
468 nflen1 = flen - rlen;
469 nfbno2 = NULLAGBLOCK;
470 } else if (rbno + rlen == fbno + flen) {
471 nfbno1 = fbno;
472 nflen1 = flen - rlen;
473 nfbno2 = NULLAGBLOCK;
474 } else {
475 nfbno1 = fbno;
476 nflen1 = rbno - fbno;
477 nfbno2 = rbno + rlen;
478 nflen2 = (fbno + flen) - nfbno2;
479 }
480 /*
481 * Delete the entry from the by-size btree.
482 */
b194c7d8 483 if ((error = xfs_btree_delete(cnt_cur, &i)))
2bd0ea18 484 return error;
19ebedcf 485 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
486 /*
487 * Add new by-size btree entry(s).
488 */
489 if (nfbno1 != NULLAGBLOCK) {
0e266570 490 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
2bd0ea18 491 return error;
19ebedcf 492 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
b194c7d8 493 if ((error = xfs_btree_insert(cnt_cur, &i)))
2bd0ea18 494 return error;
19ebedcf 495 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
496 }
497 if (nfbno2 != NULLAGBLOCK) {
0e266570 498 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
2bd0ea18 499 return error;
19ebedcf 500 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
b194c7d8 501 if ((error = xfs_btree_insert(cnt_cur, &i)))
2bd0ea18 502 return error;
19ebedcf 503 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
504 }
505 /*
506 * Fix up the by-block btree entry(s).
507 */
508 if (nfbno1 == NULLAGBLOCK) {
509 /*
510 * No remaining freespace, just delete the by-block tree entry.
511 */
b194c7d8 512 if ((error = xfs_btree_delete(bno_cur, &i)))
2bd0ea18 513 return error;
19ebedcf 514 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
515 } else {
516 /*
517 * Update the by-block entry to start later|be shorter.
518 */
0e266570 519 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
2bd0ea18
NS
520 return error;
521 }
522 if (nfbno2 != NULLAGBLOCK) {
523 /*
524 * 2 resulting free entries, need to add one.
525 */
0e266570 526 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
2bd0ea18 527 return error;
19ebedcf 528 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
b194c7d8 529 if ((error = xfs_btree_insert(bno_cur, &i)))
2bd0ea18 530 return error;
19ebedcf 531 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
2bd0ea18
NS
532 }
533 return 0;
534}
535
bc01119d 536static xfs_failaddr_t
a2ceac1f
DC
537xfs_agfl_verify(
538 struct xfs_buf *bp)
539{
a2ceac1f
DC
540 struct xfs_mount *mp = bp->b_target->bt_mount;
541 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
a2ceac1f
DC
542 int i;
543
95d9582b
DW
544 /*
545 * There is no verification of non-crc AGFLs because mkfs does not
546 * initialise the AGFL to zero or NULL. Hence the only valid part of the
547 * AGFL is what the AGF says is active. We can't get to the AGF, so we
548 * can't verify just those entries are valid.
549 */
550 if (!xfs_sb_version_hascrc(&mp->m_sb))
551 return NULL;
552
9c4e12fb 553 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
bc01119d 554 return __this_address;
dd5b876e 555 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
bc01119d 556 return __this_address;
dd5b876e
DC
557 /*
558 * during growfs operations, the perag is not fully initialised,
559 * so we can't use it for any useful checking. growfs ensures we can't
560 * use it by using uncached buffers that don't have the perag attached
561 * so we can detect and avoid this problem.
562 */
563 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
bc01119d 564 return __this_address;
dd5b876e 565
b8165508 566 for (i = 0; i < xfs_agfl_size(mp); i++) {
dd5b876e 567 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
a2ceac1f 568 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
bc01119d 569 return __this_address;
a2ceac1f 570 }
a65d8d29 571
bc01119d
DW
572 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
573 return __this_address;
574 return NULL;
dd5b876e
DC
575}
576
577static void
578xfs_agfl_read_verify(
579 struct xfs_buf *bp)
580{
581 struct xfs_mount *mp = bp->b_target->bt_mount;
1e697959 582 xfs_failaddr_t fa;
dd5b876e
DC
583
584 /*
585 * There is no verification of non-crc AGFLs because mkfs does not
586 * initialise the AGFL to zero or NULL. Hence the only valid part of the
587 * AGFL is what the AGF says is active. We can't get to the AGF, so we
588 * can't verify just those entries are valid.
589 */
590 if (!xfs_sb_version_hascrc(&mp->m_sb))
591 return;
592
45922933 593 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
1e697959
DW
594 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
595 else {
596 fa = xfs_agfl_verify(bp);
597 if (fa)
598 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
599 }
a2ceac1f
DC
600}
601
602static void
603xfs_agfl_write_verify(
604 struct xfs_buf *bp)
605{
37d086ca
CM
606 struct xfs_mount *mp = bp->b_target->bt_mount;
607 struct xfs_buf_log_item *bip = bp->b_log_item;
1e697959 608 xfs_failaddr_t fa;
a2ceac1f 609
dd5b876e
DC
610 /* no verification of non-crc AGFLs */
611 if (!xfs_sb_version_hascrc(&mp->m_sb))
612 return;
613
1e697959
DW
614 fa = xfs_agfl_verify(bp);
615 if (fa) {
616 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
dd5b876e
DC
617 return;
618 }
619
620 if (bip)
621 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
622
43b5aeed 623 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
a2ceac1f
DC
624}
625
626const struct xfs_buf_ops xfs_agfl_buf_ops = {
a3fac935 627 .name = "xfs_agfl",
a2ceac1f
DC
628 .verify_read = xfs_agfl_read_verify,
629 .verify_write = xfs_agfl_write_verify,
95d9582b 630 .verify_struct = xfs_agfl_verify,
a2ceac1f
DC
631};
632
2bd0ea18
NS
633/*
634 * Read in the allocation group free block array.
635 */
50bb67d6 636int /* error */
2bd0ea18
NS
637xfs_alloc_read_agfl(
638 xfs_mount_t *mp, /* mount point structure */
639 xfs_trans_t *tp, /* transaction pointer */
640 xfs_agnumber_t agno, /* allocation group number */
641 xfs_buf_t **bpp) /* buffer for the ag free block array */
642{
643 xfs_buf_t *bp; /* return value */
2bd0ea18
NS
644 int error;
645
646 ASSERT(agno != NULLAGNUMBER);
9440d84d
NS
647 error = xfs_trans_read_buf(
648 mp, tp, mp->m_ddev_targp,
649 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
a2ceac1f 650 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
9440d84d 651 if (error)
2bd0ea18 652 return error;
a2ceac1f 653 xfs_buf_set_ref(bp, XFS_AGFL_REF);
2bd0ea18
NS
654 *bpp = bp;
655 return 0;
656}
657
a2ceac1f
DC
658STATIC int
659xfs_alloc_update_counters(
660 struct xfs_trans *tp,
661 struct xfs_perag *pag,
662 struct xfs_buf *agbp,
663 long len)
664{
665 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
666
667 pag->pagf_freeblks += len;
668 be32_add_cpu(&agf->agf_freeblks, len);
669
670 xfs_trans_agblocks_delta(tp, len);
671 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
672 be32_to_cpu(agf->agf_length)))
12b53197 673 return -EFSCORRUPTED;
a2ceac1f
DC
674
675 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
676 return 0;
677}
678
2bd0ea18
NS
679/*
680 * Allocation group level functions.
681 */
682
683/*
684 * Allocate a variable extent in the allocation group agno.
685 * Type and bno are used to determine where in the allocation group the
686 * extent will start.
687 * Extent's length (returned in *len) will be between minlen and maxlen,
688 * and of the form k * prod + mod unless there's nothing that large.
689 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
690 */
691STATIC int /* error */
692xfs_alloc_ag_vextent(
dfc130f3 693 xfs_alloc_arg_t *args) /* argument structure for allocation */
2bd0ea18 694{
0e266570 695 int error=0;
2bd0ea18
NS
696
697 ASSERT(args->minlen > 0);
698 ASSERT(args->maxlen > 0);
699 ASSERT(args->minlen <= args->maxlen);
700 ASSERT(args->mod < args->prod);
701 ASSERT(args->alignment > 0);
cf8ce220 702
2bd0ea18
NS
703 /*
704 * Branch to correct routine based on the type.
705 */
706 args->wasfromfl = 0;
707 switch (args->type) {
708 case XFS_ALLOCTYPE_THIS_AG:
709 error = xfs_alloc_ag_vextent_size(args);
710 break;
711 case XFS_ALLOCTYPE_NEAR_BNO:
712 error = xfs_alloc_ag_vextent_near(args);
713 break;
714 case XFS_ALLOCTYPE_THIS_BNO:
715 error = xfs_alloc_ag_vextent_exact(args);
716 break;
717 default:
718 ASSERT(0);
719 /* NOTREACHED */
720 }
a2ceac1f
DC
721
722 if (error || args->agbno == NULLAGBLOCK)
2bd0ea18 723 return error;
2bd0ea18 724
a2ceac1f
DC
725 ASSERT(args->len >= args->minlen);
726 ASSERT(args->len <= args->maxlen);
9760cac2 727 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
a2ceac1f
DC
728 ASSERT(args->agbno % args->alignment == 0);
729
631ac87a 730 /* if not file data, insert new block into the reverse map btree */
3ee858aa 731 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
631ac87a
DW
732 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
733 args->agbno, args->len, &args->oinfo);
734 if (error)
735 return error;
736 }
737
a2ceac1f
DC
738 if (!args->wasfromfl) {
739 error = xfs_alloc_update_counters(args->tp, args->pag,
740 args->agbp,
741 -((long)(args->len)));
742 if (error)
743 return error;
744
745 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
746 args->agbno, args->len));
2bd0ea18 747 }
a2ceac1f 748
cf8ce220 749 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
a2ceac1f 750
79896434
BD
751 XFS_STATS_INC(args->mp, xs_allocx);
752 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
a2ceac1f 753 return error;
2bd0ea18
NS
754}
755
756/*
757 * Allocate a variable extent at exactly agno/bno.
758 * Extent's length (returned in *len) will be between minlen and maxlen,
759 * and of the form k * prod + mod unless there's nothing that large.
760 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
761 */
762STATIC int /* error */
763xfs_alloc_ag_vextent_exact(
dfc130f3 764 xfs_alloc_arg_t *args) /* allocation argument structure */
2bd0ea18 765{
dfc130f3
RC
766 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
767 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
2bd0ea18
NS
768 int error;
769 xfs_agblock_t fbno; /* start block of found extent */
2bd0ea18 770 xfs_extlen_t flen; /* length of found extent */
cd80de04
CH
771 xfs_agblock_t tbno; /* start block of busy extent */
772 xfs_extlen_t tlen; /* length of busy extent */
773 xfs_agblock_t tend; /* end block of busy extent */
2bd0ea18 774 int i; /* success/failure of operation */
cd80de04 775 unsigned busy_gen;
2bd0ea18
NS
776
777 ASSERT(args->alignment == 1);
a2ceac1f 778
2bd0ea18
NS
779 /*
780 * Allocate/initialize a cursor for the by-number freespace btree.
781 */
b194c7d8 782 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
56b2de80
DC
783 args->agno, XFS_BTNUM_BNO);
784
2bd0ea18
NS
785 /*
786 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
787 * Look for the closest free block <= bno, it must contain bno
788 * if any free block does.
789 */
56b2de80
DC
790 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
791 if (error)
2bd0ea18 792 goto error0;
56b2de80
DC
793 if (!i)
794 goto not_found;
795
2bd0ea18
NS
796 /*
797 * Grab the freespace record.
798 */
56b2de80
DC
799 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
800 if (error)
2bd0ea18 801 goto error0;
19ebedcf 802 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
2bd0ea18 803 ASSERT(fbno <= args->agbno);
56b2de80 804
5000d01d 805 /*
a2ceac1f
DC
806 * Check for overlapping busy extents.
807 */
cd80de04
CH
808 tbno = fbno;
809 tlen = flen;
810 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
a2ceac1f
DC
811
812 /*
813 * Give up if the start of the extent is busy, or the freespace isn't
814 * long enough for the minimum request.
2bd0ea18 815 */
a2ceac1f
DC
816 if (tbno > args->agbno)
817 goto not_found;
818 if (tlen < args->minlen)
819 goto not_found;
820 tend = tbno + tlen;
821 if (tend < args->agbno + args->minlen)
56b2de80
DC
822 goto not_found;
823
2bd0ea18
NS
824 /*
825 * End of extent will be smaller of the freespace end and the
826 * maximal requested end.
56b2de80 827 *
2bd0ea18
NS
828 * Fix the length according to mod and prod if given.
829 */
a2ceac1f
DC
830 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
831 - args->agbno;
2bd0ea18 832 xfs_alloc_fix_len(args);
a2ceac1f 833 ASSERT(args->agbno + args->len <= tend);
56b2de80 834
2bd0ea18 835 /*
a2ceac1f 836 * We are allocating agbno for args->len
2bd0ea18
NS
837 * Allocate/initialize a cursor for the by-size btree.
838 */
b194c7d8
BN
839 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
840 args->agno, XFS_BTNUM_CNT);
2bd0ea18 841 ASSERT(args->agbno + args->len <=
6e3140c7 842 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
56b2de80
DC
843 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
844 args->len, XFSA_FIXUP_BNO_OK);
845 if (error) {
2bd0ea18
NS
846 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
847 goto error0;
848 }
a2ceac1f 849
2bd0ea18
NS
850 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
851 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
a2ceac1f 852
2bd0ea18 853 args->wasfromfl = 0;
56b2de80
DC
854 trace_xfs_alloc_exact_done(args);
855 return 0;
856
857not_found:
858 /* Didn't find it, return null. */
859 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
860 args->agbno = NULLAGBLOCK;
861 trace_xfs_alloc_exact_notfound(args);
2bd0ea18
NS
862 return 0;
863
864error0:
865 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
56b2de80
DC
866 trace_xfs_alloc_exact_error(args);
867 return error;
868}
869
870/*
871 * Search the btree in a given direction via the search cursor and compare
872 * the records found against the good extent we've already found.
873 */
874STATIC int
875xfs_alloc_find_best_extent(
876 struct xfs_alloc_arg *args, /* allocation argument structure */
877 struct xfs_btree_cur **gcur, /* good cursor */
878 struct xfs_btree_cur **scur, /* searching cursor */
879 xfs_agblock_t gdiff, /* difference for search comparison */
880 xfs_agblock_t *sbno, /* extent found by search */
a2ceac1f
DC
881 xfs_extlen_t *slen, /* extent length */
882 xfs_agblock_t *sbnoa, /* aligned extent found by search */
883 xfs_extlen_t *slena, /* aligned extent length */
56b2de80
DC
884 int dir) /* 0 = search right, 1 = search left */
885{
56b2de80
DC
886 xfs_agblock_t new;
887 xfs_agblock_t sdiff;
888 int error;
889 int i;
cd80de04 890 unsigned busy_gen;
56b2de80
DC
891
892 /* The good extent is perfect, no need to search. */
893 if (!gdiff)
894 goto out_use_good;
895
896 /*
897 * Look until we find a better one, run out of space or run off the end.
898 */
899 do {
900 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
901 if (error)
902 goto error0;
19ebedcf 903 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
cd80de04
CH
904 xfs_alloc_compute_aligned(args, *sbno, *slen,
905 sbnoa, slena, &busy_gen);
56b2de80
DC
906
907 /*
908 * The good extent is closer than this one.
909 */
910 if (!dir) {
ff3263dd
BF
911 if (*sbnoa > args->max_agbno)
912 goto out_use_good;
a2ceac1f 913 if (*sbnoa >= args->agbno + gdiff)
56b2de80
DC
914 goto out_use_good;
915 } else {
ff3263dd
BF
916 if (*sbnoa < args->min_agbno)
917 goto out_use_good;
a2ceac1f 918 if (*sbnoa <= args->agbno - gdiff)
56b2de80
DC
919 goto out_use_good;
920 }
921
922 /*
923 * Same distance, compare length and pick the best.
924 */
925 if (*slena >= args->minlen) {
926 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
927 xfs_alloc_fix_len(args);
928
929 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
84a62eea 930 args->alignment,
1fccd5c8 931 args->datatype, *sbnoa,
a2ceac1f 932 *slena, &new);
56b2de80
DC
933
934 /*
935 * Choose closer size and invalidate other cursor.
936 */
937 if (sdiff < gdiff)
938 goto out_use_search;
939 goto out_use_good;
940 }
941
942 if (!dir)
943 error = xfs_btree_increment(*scur, 0, &i);
944 else
945 error = xfs_btree_decrement(*scur, 0, &i);
946 if (error)
947 goto error0;
948 } while (i);
949
950out_use_good:
951 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
952 *scur = NULL;
953 return 0;
954
955out_use_search:
956 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
957 *gcur = NULL;
958 return 0;
959
960error0:
961 /* caller invalidates cursors */
2bd0ea18
NS
962 return error;
963}
964
965/*
966 * Allocate a variable extent near bno in the allocation group agno.
967 * Extent's length (returned in len) will be between minlen and maxlen,
968 * and of the form k * prod + mod unless there's nothing that large.
969 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
970 */
971STATIC int /* error */
972xfs_alloc_ag_vextent_near(
dfc130f3 973 xfs_alloc_arg_t *args) /* allocation argument structure */
2bd0ea18 974{
dfc130f3
RC
975 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
976 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
977 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
2bd0ea18
NS
978 xfs_agblock_t gtbno; /* start bno of right side entry */
979 xfs_agblock_t gtbnoa; /* aligned ... */
980 xfs_extlen_t gtdiff; /* difference to right side entry */
981 xfs_extlen_t gtlen; /* length of right side entry */
a2ceac1f 982 xfs_extlen_t gtlena; /* aligned ... */
2bd0ea18
NS
983 xfs_agblock_t gtnew; /* useful start bno of right side */
984 int error; /* error code */
985 int i; /* result code, temporary */
986 int j; /* result code, temporary */
987 xfs_agblock_t ltbno; /* start bno of left side entry */
988 xfs_agblock_t ltbnoa; /* aligned ... */
989 xfs_extlen_t ltdiff; /* difference to left side entry */
2bd0ea18 990 xfs_extlen_t ltlen; /* length of left side entry */
a2ceac1f 991 xfs_extlen_t ltlena; /* aligned ... */
2bd0ea18
NS
992 xfs_agblock_t ltnew; /* useful start bno of left side */
993 xfs_extlen_t rlen; /* length of returned extent */
cd80de04
CH
994 bool busy;
995 unsigned busy_gen;
6beba453 996#ifdef DEBUG
2bd0ea18
NS
997 /*
998 * Randomly don't execute the first algorithm.
999 */
2bd0ea18 1000 int dofirst; /* set to do first algorithm */
2bd0ea18 1001
49f693fa 1002 dofirst = prandom_u32() & 1;
2bd0ea18 1003#endif
a2ceac1f 1004
ff3263dd
BF
1005 /* handle unitialized agbno range so caller doesn't have to */
1006 if (!args->min_agbno && !args->max_agbno)
1007 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1008 ASSERT(args->min_agbno <= args->max_agbno);
1009
1010 /* clamp agbno to the range if it's outside */
1011 if (args->agbno < args->min_agbno)
1012 args->agbno = args->min_agbno;
1013 if (args->agbno > args->max_agbno)
1014 args->agbno = args->max_agbno;
1015
a2ceac1f
DC
1016restart:
1017 bno_cur_lt = NULL;
1018 bno_cur_gt = NULL;
1019 ltlen = 0;
1020 gtlena = 0;
1021 ltlena = 0;
cd80de04 1022 busy = false;
a2ceac1f 1023
2bd0ea18
NS
1024 /*
1025 * Get a cursor for the by-size btree.
1026 */
b194c7d8
BN
1027 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1028 args->agno, XFS_BTNUM_CNT);
a2ceac1f 1029
2bd0ea18
NS
1030 /*
1031 * See if there are any free extents as big as maxlen.
1032 */
0e266570 1033 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
2bd0ea18
NS
1034 goto error0;
1035 /*
1036 * If none, then pick up the last entry in the tree unless the
1037 * tree is empty.
5000d01d 1038 */
2bd0ea18 1039 if (!i) {
0e266570
NS
1040 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1041 &ltlen, &i)))
2bd0ea18
NS
1042 goto error0;
1043 if (i == 0 || ltlen == 0) {
1044 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
a2ceac1f 1045 trace_xfs_alloc_near_noentry(args);
2bd0ea18
NS
1046 return 0;
1047 }
1048 ASSERT(i == 1);
1049 }
1050 args->wasfromfl = 0;
a2ceac1f 1051
5000d01d 1052 /*
2bd0ea18
NS
1053 * First algorithm.
1054 * If the requested extent is large wrt the freespaces available
1055 * in this a.g., then the cursor will be pointing to a btree entry
1056 * near the right edge of the tree. If it's in the last btree leaf
1057 * block, then we just examine all the entries in that block
1058 * that are big enough, and pick the best one.
1059 * This is written as a while loop so we can break out of it,
1060 * but we never loop back to the top.
1061 */
1062 while (xfs_btree_islastblock(cnt_cur, 0)) {
1063 xfs_extlen_t bdiff;
0e266570
NS
1064 int besti=0;
1065 xfs_extlen_t blen=0;
1066 xfs_agblock_t bnew=0;
2bd0ea18 1067
6beba453
DC
1068#ifdef DEBUG
1069 if (dofirst)
2bd0ea18
NS
1070 break;
1071#endif
1072 /*
1073 * Start from the entry that lookup found, sequence through
1074 * all larger free blocks. If we're actually pointing at a
1075 * record smaller than maxlen, go to the start of this block,
1076 * and skip all those smaller than minlen.
1077 */
1078 if (ltlen || args->alignment > 1) {
1079 cnt_cur->bc_ptrs[0] = 1;
1080 do {
0e266570
NS
1081 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1082 &ltlen, &i)))
2bd0ea18 1083 goto error0;
19ebedcf 1084 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
2bd0ea18
NS
1085 if (ltlen >= args->minlen)
1086 break;
b194c7d8 1087 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
2bd0ea18
NS
1088 goto error0;
1089 } while (i);
1090 ASSERT(ltlen >= args->minlen);
1091 if (!i)
1092 break;
1093 }
1094 i = cnt_cur->bc_ptrs[0];
1095 for (j = 1, blen = 0, bdiff = 0;
1096 !error && j && (blen < args->maxlen || bdiff > 0);
b194c7d8 1097 error = xfs_btree_increment(cnt_cur, 0, &j)) {
2bd0ea18
NS
1098 /*
1099 * For each entry, decide if it's better than
1100 * the previous best entry.
1101 */
0e266570 1102 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
2bd0ea18 1103 goto error0;
19ebedcf 1104 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
cd80de04
CH
1105 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1106 &ltbnoa, &ltlena, &busy_gen);
5e656dbb 1107 if (ltlena < args->minlen)
2bd0ea18 1108 continue;
ff3263dd
BF
1109 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1110 continue;
2bd0ea18
NS
1111 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1112 xfs_alloc_fix_len(args);
1113 ASSERT(args->len >= args->minlen);
1114 if (args->len < blen)
1115 continue;
1116 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1fccd5c8 1117 args->alignment, args->datatype, ltbnoa,
84a62eea 1118 ltlena, &ltnew);
2bd0ea18
NS
1119 if (ltnew != NULLAGBLOCK &&
1120 (args->len > blen || ltdiff < bdiff)) {
1121 bdiff = ltdiff;
1122 bnew = ltnew;
1123 blen = args->len;
1124 besti = cnt_cur->bc_ptrs[0];
1125 }
1126 }
1127 /*
1128 * It didn't work. We COULD be in a case where
1129 * there's a good record somewhere, so try again.
1130 */
1131 if (blen == 0)
1132 break;
1133 /*
1134 * Point at the best entry, and retrieve it again.
1135 */
1136 cnt_cur->bc_ptrs[0] = besti;
0e266570 1137 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
2bd0ea18 1138 goto error0;
19ebedcf 1139 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
56b2de80 1140 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
2bd0ea18 1141 args->len = blen;
2c003dc2 1142
2bd0ea18
NS
1143 /*
1144 * We are allocating starting at bnew for blen blocks.
1145 */
1146 args->agbno = bnew;
1147 ASSERT(bnew >= ltbno);
56b2de80 1148 ASSERT(bnew + blen <= ltbno + ltlen);
2bd0ea18
NS
1149 /*
1150 * Set up a cursor for the by-bno tree.
1151 */
b194c7d8
BN
1152 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1153 args->agbp, args->agno, XFS_BTNUM_BNO);
2bd0ea18
NS
1154 /*
1155 * Fix up the btree entries.
1156 */
0e266570
NS
1157 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1158 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
2bd0ea18
NS
1159 goto error0;
1160 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1161 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
56b2de80
DC
1162
1163 trace_xfs_alloc_near_first(args);
2bd0ea18
NS
1164 return 0;
1165 }
1166 /*
1167 * Second algorithm.
1168 * Search in the by-bno tree to the left and to the right
1169 * simultaneously, until in each case we find a space big enough,
1170 * or run into the edge of the tree. When we run into the edge,
1171 * we deallocate that cursor.
1172 * If both searches succeed, we compare the two spaces and pick
1173 * the better one.
1174 * With alignment, it's possible for both to fail; the upper
1175 * level algorithm that picks allocation groups for allocations
1176 * is not supposed to do this.
1177 */
1178 /*
1179 * Allocate and initialize the cursor for the leftward search.
1180 */
b194c7d8
BN
1181 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1182 args->agno, XFS_BTNUM_BNO);
2bd0ea18
NS
1183 /*
1184 * Lookup <= bno to find the leftward search's starting point.
1185 */
0e266570 1186 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
2bd0ea18
NS
1187 goto error0;
1188 if (!i) {
1189 /*
1190 * Didn't find anything; use this cursor for the rightward
1191 * search.
1192 */
1193 bno_cur_gt = bno_cur_lt;
062998e3 1194 bno_cur_lt = NULL;
2bd0ea18
NS
1195 }
1196 /*
1197 * Found something. Duplicate the cursor for the rightward search.
1198 */
0e266570 1199 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
2bd0ea18
NS
1200 goto error0;
1201 /*
1202 * Increment the cursor, so we will point at the entry just right
1203 * of the leftward entry if any, or to the leftmost entry.
1204 */
b194c7d8 1205 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
2bd0ea18
NS
1206 goto error0;
1207 if (!i) {
1208 /*
1209 * It failed, there are no rightward entries.
1210 */
1211 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1212 bno_cur_gt = NULL;
1213 }
1214 /*
1215 * Loop going left with the leftward cursor, right with the
1216 * rightward cursor, until either both directions give up or
1217 * we find an entry at least as big as minlen.
1218 */
1219 do {
1220 if (bno_cur_lt) {
0e266570 1221 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
2bd0ea18 1222 goto error0;
19ebedcf 1223 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
cd80de04
CH
1224 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1225 &ltbnoa, &ltlena, &busy_gen);
ff3263dd 1226 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
2bd0ea18 1227 break;
b194c7d8 1228 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
2bd0ea18 1229 goto error0;
ff3263dd 1230 if (!i || ltbnoa < args->min_agbno) {
2bd0ea18
NS
1231 xfs_btree_del_cursor(bno_cur_lt,
1232 XFS_BTREE_NOERROR);
1233 bno_cur_lt = NULL;
1234 }
1235 }
1236 if (bno_cur_gt) {
0e266570 1237 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
2bd0ea18 1238 goto error0;
19ebedcf 1239 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
cd80de04
CH
1240 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1241 &gtbnoa, &gtlena, &busy_gen);
ff3263dd 1242 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
2bd0ea18 1243 break;
b194c7d8 1244 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
2bd0ea18 1245 goto error0;
ff3263dd 1246 if (!i || gtbnoa > args->max_agbno) {
2bd0ea18
NS
1247 xfs_btree_del_cursor(bno_cur_gt,
1248 XFS_BTREE_NOERROR);
1249 bno_cur_gt = NULL;
1250 }
1251 }
1252 } while (bno_cur_lt || bno_cur_gt);
56b2de80 1253
2bd0ea18
NS
1254 /*
1255 * Got both cursors still active, need to find better entry.
1256 */
1257 if (bno_cur_lt && bno_cur_gt) {
2bd0ea18
NS
1258 if (ltlena >= args->minlen) {
1259 /*
56b2de80 1260 * Left side is good, look for a right side entry.
2bd0ea18
NS
1261 */
1262 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1263 xfs_alloc_fix_len(args);
56b2de80 1264 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1fccd5c8 1265 args->alignment, args->datatype, ltbnoa,
84a62eea 1266 ltlena, &ltnew);
56b2de80
DC
1267
1268 error = xfs_alloc_find_best_extent(args,
1269 &bno_cur_lt, &bno_cur_gt,
a2ceac1f
DC
1270 ltdiff, &gtbno, &gtlen,
1271 &gtbnoa, &gtlena,
56b2de80
DC
1272 0 /* search right */);
1273 } else {
1274 ASSERT(gtlena >= args->minlen);
1275
2bd0ea18 1276 /*
56b2de80 1277 * Right side is good, look for a left side entry.
2bd0ea18
NS
1278 */
1279 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1280 xfs_alloc_fix_len(args);
56b2de80 1281 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1fccd5c8 1282 args->alignment, args->datatype, gtbnoa,
84a62eea 1283 gtlena, &gtnew);
56b2de80
DC
1284
1285 error = xfs_alloc_find_best_extent(args,
1286 &bno_cur_gt, &bno_cur_lt,
a2ceac1f
DC
1287 gtdiff, &ltbno, &ltlen,
1288 &ltbnoa, &ltlena,
56b2de80 1289 1 /* search left */);
2bd0ea18 1290 }
56b2de80
DC
1291
1292 if (error)
1293 goto error0;
2bd0ea18 1294 }
56b2de80 1295
2bd0ea18
NS
1296 /*
1297 * If we couldn't get anything, give up.
1298 */
1299 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
a2ceac1f
DC
1300 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1301
cd80de04 1302 if (busy) {
a2ceac1f 1303 trace_xfs_alloc_near_busy(args);
cd80de04 1304 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
a2ceac1f
DC
1305 goto restart;
1306 }
56b2de80 1307 trace_xfs_alloc_size_neither(args);
2bd0ea18
NS
1308 args->agbno = NULLAGBLOCK;
1309 return 0;
1310 }
56b2de80 1311
2bd0ea18
NS
1312 /*
1313 * At this point we have selected a freespace entry, either to the
1314 * left or to the right. If it's on the right, copy all the
1315 * useful variables to the "left" set so we only have one
1316 * copy of this code.
1317 */
1318 if (bno_cur_gt) {
1319 bno_cur_lt = bno_cur_gt;
1320 bno_cur_gt = NULL;
1321 ltbno = gtbno;
1322 ltbnoa = gtbnoa;
1323 ltlen = gtlen;
1324 ltlena = gtlena;
1325 j = 1;
1326 } else
1327 j = 0;
56b2de80 1328
2bd0ea18
NS
1329 /*
1330 * Fix up the length and compute the useful address.
1331 */
2bd0ea18
NS
1332 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1333 xfs_alloc_fix_len(args);
2bd0ea18 1334 rlen = args->len;
a2ceac1f 1335 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1fccd5c8 1336 args->datatype, ltbnoa, ltlena, &ltnew);
2bd0ea18 1337 ASSERT(ltnew >= ltbno);
a2ceac1f 1338 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
6e3140c7 1339 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
ff3263dd 1340 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
2bd0ea18 1341 args->agbno = ltnew;
a2ceac1f 1342
0e266570
NS
1343 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1344 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
2bd0ea18 1345 goto error0;
56b2de80
DC
1346
1347 if (j)
1348 trace_xfs_alloc_near_greater(args);
1349 else
1350 trace_xfs_alloc_near_lesser(args);
1351
2bd0ea18
NS
1352 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1353 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1354 return 0;
1355
1356 error0:
56b2de80 1357 trace_xfs_alloc_near_error(args);
2bd0ea18
NS
1358 if (cnt_cur != NULL)
1359 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1360 if (bno_cur_lt != NULL)
1361 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1362 if (bno_cur_gt != NULL)
1363 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1364 return error;
1365}
1366
1367/*
1368 * Allocate a variable extent anywhere in the allocation group agno.
1369 * Extent's length (returned in len) will be between minlen and maxlen,
1370 * and of the form k * prod + mod unless there's nothing that large.
1371 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1372 */
1373STATIC int /* error */
1374xfs_alloc_ag_vextent_size(
dfc130f3 1375 xfs_alloc_arg_t *args) /* allocation argument structure */
2bd0ea18 1376{
dfc130f3
RC
1377 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1378 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
2bd0ea18
NS
1379 int error; /* error result */
1380 xfs_agblock_t fbno; /* start of found freespace */
1381 xfs_extlen_t flen; /* length of found freespace */
2bd0ea18
NS
1382 int i; /* temp status variable */
1383 xfs_agblock_t rbno; /* returned block number */
1384 xfs_extlen_t rlen; /* length of returned extent */
cd80de04
CH
1385 bool busy;
1386 unsigned busy_gen;
2bd0ea18 1387
a2ceac1f 1388restart:
2bd0ea18
NS
1389 /*
1390 * Allocate and initialize a cursor for the by-size btree.
1391 */
b194c7d8
BN
1392 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1393 args->agno, XFS_BTNUM_CNT);
2bd0ea18 1394 bno_cur = NULL;
cd80de04 1395 busy = false;
a2ceac1f 1396
2bd0ea18
NS
1397 /*
1398 * Look for an entry >= maxlen+alignment-1 blocks.
1399 */
0e266570
NS
1400 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1401 args->maxlen + args->alignment - 1, &i)))
2bd0ea18 1402 goto error0;
a2ceac1f 1403
2bd0ea18 1404 /*
cd80de04
CH
1405 * If none then we have to settle for a smaller extent. In the case that
1406 * there are no large extents, this will return the last entry in the
1407 * tree unless the tree is empty. In the case that there are only busy
1408 * large extents, this will return the largest small extent unless there
a2ceac1f 1409 * are no smaller extents available.
5000d01d 1410 */
cd80de04 1411 if (!i) {
a2ceac1f
DC
1412 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1413 &fbno, &flen, &i);
1414 if (error)
2bd0ea18
NS
1415 goto error0;
1416 if (i == 0 || flen == 0) {
1417 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
56b2de80 1418 trace_xfs_alloc_size_noentry(args);
2bd0ea18
NS
1419 return 0;
1420 }
1421 ASSERT(i == 1);
cd80de04
CH
1422 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1423 &rlen, &busy_gen);
a2ceac1f
DC
1424 } else {
1425 /*
1426 * Search for a non-busy extent that is large enough.
a2ceac1f
DC
1427 */
1428 for (;;) {
1429 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1430 if (error)
1431 goto error0;
19ebedcf 1432 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
a2ceac1f 1433
cd80de04
CH
1434 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1435 &rbno, &rlen, &busy_gen);
a2ceac1f
DC
1436
1437 if (rlen >= args->maxlen)
1438 break;
1439
1440 error = xfs_btree_increment(cnt_cur, 0, &i);
1441 if (error)
1442 goto error0;
1443 if (i == 0) {
1444 /*
1445 * Our only valid extents must have been busy.
1446 * Make it unbusy by forcing the log out and
cd80de04 1447 * retrying.
a2ceac1f
DC
1448 */
1449 xfs_btree_del_cursor(cnt_cur,
1450 XFS_BTREE_NOERROR);
1451 trace_xfs_alloc_size_busy(args);
cd80de04
CH
1452 xfs_extent_busy_flush(args->mp,
1453 args->pag, busy_gen);
a2ceac1f
DC
1454 goto restart;
1455 }
1456 }
2bd0ea18 1457 }
a2ceac1f 1458
2bd0ea18
NS
1459 /*
1460 * In the first case above, we got the last entry in the
1461 * by-size btree. Now we check to see if the space hits maxlen
1462 * once aligned; if not, we search left for something better.
1463 * This can't happen in the second case above.
1464 */
2bd0ea18 1465 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
19ebedcf 1466 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
2bd0ea18
NS
1467 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1468 if (rlen < args->maxlen) {
1469 xfs_agblock_t bestfbno;
1470 xfs_extlen_t bestflen;
1471 xfs_agblock_t bestrbno;
1472 xfs_extlen_t bestrlen;
1473
1474 bestrlen = rlen;
1475 bestrbno = rbno;
1476 bestflen = flen;
1477 bestfbno = fbno;
1478 for (;;) {
b194c7d8 1479 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
2bd0ea18
NS
1480 goto error0;
1481 if (i == 0)
1482 break;
0e266570
NS
1483 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1484 &i)))
2bd0ea18 1485 goto error0;
19ebedcf 1486 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
2bd0ea18
NS
1487 if (flen < bestrlen)
1488 break;
cd80de04
CH
1489 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1490 &rbno, &rlen, &busy_gen);
2bd0ea18 1491 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
19ebedcf 1492 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
2bd0ea18
NS
1493 (rlen <= flen && rbno + rlen <= fbno + flen),
1494 error0);
1495 if (rlen > bestrlen) {
1496 bestrlen = rlen;
1497 bestrbno = rbno;
1498 bestflen = flen;
1499 bestfbno = fbno;
1500 if (rlen == args->maxlen)
1501 break;
1502 }
5000d01d 1503 }
0e266570
NS
1504 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1505 &i)))
2bd0ea18 1506 goto error0;
19ebedcf 1507 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
2bd0ea18
NS
1508 rlen = bestrlen;
1509 rbno = bestrbno;
1510 flen = bestflen;
1511 fbno = bestfbno;
1512 }
1513 args->wasfromfl = 0;
1514 /*
1515 * Fix up the length.
1516 */
1517 args->len = rlen;
a2ceac1f 1518 if (rlen < args->minlen) {
cd80de04 1519 if (busy) {
a2ceac1f
DC
1520 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1521 trace_xfs_alloc_size_busy(args);
cd80de04 1522 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
a2ceac1f
DC
1523 goto restart;
1524 }
1525 goto out_nominleft;
2bd0ea18 1526 }
a2ceac1f
DC
1527 xfs_alloc_fix_len(args);
1528
2bd0ea18 1529 rlen = args->len;
19ebedcf 1530 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
2bd0ea18
NS
1531 /*
1532 * Allocate and initialize a cursor for the by-block tree.
1533 */
b194c7d8
BN
1534 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1535 args->agno, XFS_BTNUM_BNO);
0e266570
NS
1536 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1537 rbno, rlen, XFSA_FIXUP_CNT_OK)))
2bd0ea18
NS
1538 goto error0;
1539 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1540 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1541 cnt_cur = bno_cur = NULL;
1542 args->len = rlen;
1543 args->agbno = rbno;
19ebedcf 1544 XFS_WANT_CORRUPTED_GOTO(args->mp,
2bd0ea18 1545 args->agbno + args->len <=
6e3140c7 1546 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
2bd0ea18 1547 error0);
56b2de80 1548 trace_xfs_alloc_size_done(args);
2bd0ea18
NS
1549 return 0;
1550
1551error0:
56b2de80 1552 trace_xfs_alloc_size_error(args);
2bd0ea18
NS
1553 if (cnt_cur)
1554 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1555 if (bno_cur)
1556 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1557 return error;
a2ceac1f
DC
1558
1559out_nominleft:
1560 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1561 trace_xfs_alloc_size_nominleft(args);
1562 args->agbno = NULLAGBLOCK;
1563 return 0;
2bd0ea18
NS
1564}
1565
1566/*
1567 * Deal with the case where only small freespaces remain.
1568 * Either return the contents of the last freespace record,
1569 * or allocate space from the freelist if there is nothing in the tree.
1570 */
1571STATIC int /* error */
1572xfs_alloc_ag_vextent_small(
dfc130f3
RC
1573 xfs_alloc_arg_t *args, /* allocation argument structure */
1574 xfs_btree_cur_t *ccur, /* by-size cursor */
1575 xfs_agblock_t *fbnop, /* result block number */
1576 xfs_extlen_t *flenp, /* result length */
2bd0ea18
NS
1577 int *stat) /* status: 0-freelist, 1-normal/none */
1578{
59b86360 1579 struct xfs_owner_info oinfo;
2bd0ea18
NS
1580 int error;
1581 xfs_agblock_t fbno;
1582 xfs_extlen_t flen;
2bd0ea18
NS
1583 int i;
1584
b194c7d8 1585 if ((error = xfs_btree_decrement(ccur, 0, &i)))
2bd0ea18
NS
1586 goto error0;
1587 if (i) {
0e266570 1588 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
2bd0ea18 1589 goto error0;
19ebedcf 1590 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
2bd0ea18
NS
1591 }
1592 /*
1593 * Nothing in the btree, try the freelist. Make sure
1594 * to respect minleft even when pulling from the
1595 * freelist.
1596 */
cf8ce220 1597 else if (args->minlen == 1 && args->alignment == 1 &&
9760cac2 1598 args->resv != XFS_AG_RESV_AGFL &&
6e3140c7
NS
1599 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1600 > args->minleft)) {
cdded3d8
DC
1601 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1602 if (error)
2bd0ea18
NS
1603 goto error0;
1604 if (fbno != NULLAGBLOCK) {
a2ceac1f 1605 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1fccd5c8 1606 xfs_alloc_allow_busy_reuse(args->datatype));
a2ceac1f 1607
1fccd5c8 1608 if (xfs_alloc_is_userdata(args->datatype)) {
2bd0ea18
NS
1609 xfs_buf_t *bp;
1610
1611 bp = xfs_btree_get_bufs(args->mp, args->tp,
1612 args->agno, fbno, 0);
b2284d05
ES
1613 if (!bp) {
1614 error = -EFSCORRUPTED;
1615 goto error0;
1616 }
2bd0ea18 1617 xfs_trans_binval(args->tp, bp);
2bd0ea18
NS
1618 }
1619 args->len = 1;
1620 args->agbno = fbno;
19ebedcf 1621 XFS_WANT_CORRUPTED_GOTO(args->mp,
2bd0ea18 1622 args->agbno + args->len <=
6e3140c7 1623 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
2bd0ea18
NS
1624 error0);
1625 args->wasfromfl = 1;
56b2de80 1626 trace_xfs_alloc_small_freelist(args);
59b86360
DW
1627
1628 /*
1629 * If we're feeding an AGFL block to something that
1630 * doesn't live in the free space, we need to clear
9760cac2 1631 * out the OWN_AG rmap.
59b86360
DW
1632 */
1633 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1634 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1635 fbno, 1, &oinfo);
1636 if (error)
1637 goto error0;
59b86360 1638
2bd0ea18
NS
1639 *stat = 0;
1640 return 0;
1641 }
1642 /*
1643 * Nothing in the freelist.
1644 */
1645 else
1646 flen = 0;
1647 }
1648 /*
1649 * Can't allocate from the freelist for some reason.
1650 */
5e656dbb
BN
1651 else {
1652 fbno = NULLAGBLOCK;
2bd0ea18 1653 flen = 0;
5e656dbb 1654 }
2bd0ea18
NS
1655 /*
1656 * Can't do the allocation, give up.
1657 */
1658 if (flen < args->minlen) {
1659 args->agbno = NULLAGBLOCK;
56b2de80 1660 trace_xfs_alloc_small_notenough(args);
2bd0ea18
NS
1661 flen = 0;
1662 }
1663 *fbnop = fbno;
1664 *flenp = flen;
1665 *stat = 1;
56b2de80 1666 trace_xfs_alloc_small_done(args);
2bd0ea18
NS
1667 return 0;
1668
1669error0:
56b2de80 1670 trace_xfs_alloc_small_error(args);
2bd0ea18
NS
1671 return error;
1672}
1673
1674/*
1675 * Free the extent starting at agno/bno for length.
1676 */
85aec44f 1677STATIC int
2bd0ea18 1678xfs_free_ag_extent(
85aec44f
DW
1679 xfs_trans_t *tp,
1680 xfs_buf_t *agbp,
1681 xfs_agnumber_t agno,
1682 xfs_agblock_t bno,
1683 xfs_extlen_t len,
1684 struct xfs_owner_info *oinfo,
cf8ce220 1685 enum xfs_ag_resv_type type)
2bd0ea18 1686{
dfc130f3
RC
1687 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1688 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
2bd0ea18 1689 int error; /* error return value */
2bd0ea18
NS
1690 xfs_agblock_t gtbno; /* start of right neighbor block */
1691 xfs_extlen_t gtlen; /* length of right neighbor block */
1692 int haveleft; /* have a left neighbor block */
1693 int haveright; /* have a right neighbor block */
1694 int i; /* temp, result code */
1695 xfs_agblock_t ltbno; /* start of left neighbor block */
1696 xfs_extlen_t ltlen; /* length of left neighbor block */
1697 xfs_mount_t *mp; /* mount point struct for filesystem */
1698 xfs_agblock_t nbno; /* new starting block of freespace */
1699 xfs_extlen_t nlen; /* new length of freespace */
a2ceac1f 1700 xfs_perag_t *pag; /* per allocation group data */
2bd0ea18 1701
631ac87a 1702 bno_cur = cnt_cur = NULL;
2bd0ea18 1703 mp = tp->t_mountp;
631ac87a 1704
3ee858aa 1705 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
631ac87a
DW
1706 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1707 if (error)
1708 goto error0;
1709 }
1710
5000d01d 1711 /*
2bd0ea18
NS
1712 * Allocate and initialize a cursor for the by-block btree.
1713 */
b194c7d8 1714 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
5000d01d 1715 /*
2bd0ea18
NS
1716 * Look for a neighboring block on the left (lower block numbers)
1717 * that is contiguous with this space.
1718 */
0e266570 1719 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
2bd0ea18
NS
1720 goto error0;
1721 if (haveleft) {
1722 /*
1723 * There is a block to our left.
1724 */
0e266570 1725 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
2bd0ea18 1726 goto error0;
19ebedcf 1727 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1728 /*
1729 * It's not contiguous, though.
1730 */
1731 if (ltbno + ltlen < bno)
1732 haveleft = 0;
1733 else {
1734 /*
1735 * If this failure happens the request to free this
1736 * space was invalid, it's (partly) already free.
1737 * Very bad.
1738 */
19ebedcf
DC
1739 XFS_WANT_CORRUPTED_GOTO(mp,
1740 ltbno + ltlen <= bno, error0);
2bd0ea18
NS
1741 }
1742 }
5000d01d 1743 /*
2bd0ea18
NS
1744 * Look for a neighboring block on the right (higher block numbers)
1745 * that is contiguous with this space.
1746 */
b194c7d8 1747 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
2bd0ea18
NS
1748 goto error0;
1749 if (haveright) {
1750 /*
1751 * There is a block to our right.
1752 */
0e266570 1753 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
2bd0ea18 1754 goto error0;
19ebedcf 1755 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1756 /*
1757 * It's not contiguous, though.
1758 */
1759 if (bno + len < gtbno)
1760 haveright = 0;
1761 else {
1762 /*
1763 * If this failure happens the request to free this
1764 * space was invalid, it's (partly) already free.
1765 * Very bad.
1766 */
19ebedcf 1767 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
2bd0ea18
NS
1768 }
1769 }
1770 /*
1771 * Now allocate and initialize a cursor for the by-size tree.
1772 */
b194c7d8 1773 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
2bd0ea18
NS
1774 /*
1775 * Have both left and right contiguous neighbors.
1776 * Merge all three into a single free block.
1777 */
1778 if (haveleft && haveright) {
1779 /*
1780 * Delete the old by-size entry on the left.
1781 */
0e266570 1782 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2bd0ea18 1783 goto error0;
19ebedcf 1784 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
b194c7d8 1785 if ((error = xfs_btree_delete(cnt_cur, &i)))
2bd0ea18 1786 goto error0;
19ebedcf 1787 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1788 /*
1789 * Delete the old by-size entry on the right.
1790 */
0e266570 1791 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2bd0ea18 1792 goto error0;
19ebedcf 1793 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
b194c7d8 1794 if ((error = xfs_btree_delete(cnt_cur, &i)))
2bd0ea18 1795 goto error0;
19ebedcf 1796 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1797 /*
1798 * Delete the old by-block entry for the right block.
1799 */
b194c7d8 1800 if ((error = xfs_btree_delete(bno_cur, &i)))
2bd0ea18 1801 goto error0;
19ebedcf 1802 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1803 /*
1804 * Move the by-block cursor back to the left neighbor.
1805 */
b194c7d8 1806 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2bd0ea18 1807 goto error0;
19ebedcf 1808 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1809#ifdef DEBUG
1810 /*
1811 * Check that this is the right record: delete didn't
1812 * mangle the cursor.
1813 */
1814 {
1815 xfs_agblock_t xxbno;
1816 xfs_extlen_t xxlen;
1817
0e266570
NS
1818 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1819 &i)))
2bd0ea18 1820 goto error0;
19ebedcf 1821 XFS_WANT_CORRUPTED_GOTO(mp,
2bd0ea18
NS
1822 i == 1 && xxbno == ltbno && xxlen == ltlen,
1823 error0);
1824 }
1825#endif
1826 /*
1827 * Update remaining by-block entry to the new, joined block.
1828 */
1829 nbno = ltbno;
1830 nlen = len + ltlen + gtlen;
0e266570 1831 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2bd0ea18
NS
1832 goto error0;
1833 }
1834 /*
1835 * Have only a left contiguous neighbor.
1836 * Merge it together with the new freespace.
1837 */
1838 else if (haveleft) {
1839 /*
1840 * Delete the old by-size entry on the left.
1841 */
0e266570 1842 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2bd0ea18 1843 goto error0;
19ebedcf 1844 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
b194c7d8 1845 if ((error = xfs_btree_delete(cnt_cur, &i)))
2bd0ea18 1846 goto error0;
19ebedcf 1847 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1848 /*
1849 * Back up the by-block cursor to the left neighbor, and
1850 * update its length.
1851 */
b194c7d8 1852 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2bd0ea18 1853 goto error0;
19ebedcf 1854 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1855 nbno = ltbno;
1856 nlen = len + ltlen;
0e266570 1857 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2bd0ea18
NS
1858 goto error0;
1859 }
1860 /*
1861 * Have only a right contiguous neighbor.
1862 * Merge it together with the new freespace.
1863 */
1864 else if (haveright) {
1865 /*
1866 * Delete the old by-size entry on the right.
1867 */
0e266570 1868 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2bd0ea18 1869 goto error0;
19ebedcf 1870 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
b194c7d8 1871 if ((error = xfs_btree_delete(cnt_cur, &i)))
2bd0ea18 1872 goto error0;
19ebedcf 1873 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18 1874 /*
5000d01d 1875 * Update the starting block and length of the right
2bd0ea18
NS
1876 * neighbor in the by-block tree.
1877 */
1878 nbno = bno;
1879 nlen = len + gtlen;
0e266570 1880 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2bd0ea18
NS
1881 goto error0;
1882 }
1883 /*
1884 * No contiguous neighbors.
1885 * Insert the new freespace into the by-block tree.
1886 */
1887 else {
1888 nbno = bno;
1889 nlen = len;
b194c7d8 1890 if ((error = xfs_btree_insert(bno_cur, &i)))
2bd0ea18 1891 goto error0;
19ebedcf 1892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1893 }
1894 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1895 bno_cur = NULL;
1896 /*
1897 * In all cases we need to insert the new freespace in the by-size tree.
1898 */
0e266570 1899 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2bd0ea18 1900 goto error0;
19ebedcf 1901 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
b194c7d8 1902 if ((error = xfs_btree_insert(cnt_cur, &i)))
2bd0ea18 1903 goto error0;
19ebedcf 1904 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
2bd0ea18
NS
1905 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1906 cnt_cur = NULL;
a2ceac1f 1907
2bd0ea18
NS
1908 /*
1909 * Update the freespace totals in the ag and superblock.
1910 */
a2ceac1f
DC
1911 pag = xfs_perag_get(mp, agno);
1912 error = xfs_alloc_update_counters(tp, pag, agbp, len);
cf8ce220 1913 xfs_ag_resv_free_extent(pag, type, tp, len);
a2ceac1f
DC
1914 xfs_perag_put(pag);
1915 if (error)
1916 goto error0;
1917
79896434
BD
1918 XFS_STATS_INC(mp, xs_freex);
1919 XFS_STATS_ADD(mp, xs_freeb, len);
56b2de80 1920
65a15e06 1921 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
3e535bba 1922
2bd0ea18
NS
1923 return 0;
1924
1925 error0:
65a15e06 1926 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2bd0ea18
NS
1927 if (bno_cur)
1928 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1929 if (cnt_cur)
1930 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1931 return error;
1932}
1933
5000d01d 1934/*
2bd0ea18
NS
1935 * Visible (exported) allocation/free functions.
1936 * Some of these are used just by xfs_alloc_btree.c and this file.
1937 */
1938
1939/*
1940 * Compute and fill in value of m_ag_maxlevels.
1941 */
1942void
1943xfs_alloc_compute_maxlevels(
1944 xfs_mount_t *mp) /* file system mount structure */
1945{
730e2a19
DW
1946 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1947 (mp->m_sb.sb_agblocks + 1) / 2);
2bd0ea18
NS
1948}
1949
56b2de80 1950/*
cf8ce220
DW
1951 * Find the length of the longest extent in an AG. The 'need' parameter
1952 * specifies how much space we're going to need for the AGFL and the
1953 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1954 * other callers.
56b2de80
DC
1955 */
1956xfs_extlen_t
1957xfs_alloc_longest_free_extent(
1958 struct xfs_mount *mp,
72bda06d 1959 struct xfs_perag *pag,
cf8ce220
DW
1960 xfs_extlen_t need,
1961 xfs_extlen_t reserved)
56b2de80 1962{
72bda06d 1963 xfs_extlen_t delta = 0;
56b2de80 1964
cf8ce220
DW
1965 /*
1966 * If the AGFL needs a recharge, we'll have to subtract that from the
1967 * longest extent.
1968 */
56b2de80
DC
1969 if (need > pag->pagf_flcount)
1970 delta = need - pag->pagf_flcount;
1971
cf8ce220
DW
1972 /*
1973 * If we cannot maintain others' reservations with space from the
1974 * not-longest freesp extents, we'll have to subtract /that/ from
1975 * the longest extent too.
1976 */
1977 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1978 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1979
1980 /*
1981 * If the longest extent is long enough to satisfy all the
1982 * reservations and AGFL rules in place, we can return this extent.
1983 */
56b2de80
DC
1984 if (pag->pagf_longest > delta)
1985 return pag->pagf_longest - delta;
cf8ce220
DW
1986
1987 /* Otherwise, let the caller try for 1 block if there's space. */
56b2de80
DC
1988 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1989}
1990
de046644
DC
1991unsigned int
1992xfs_alloc_min_freelist(
1993 struct xfs_mount *mp,
1994 struct xfs_perag *pag)
1995{
1996 unsigned int min_free;
1997
1998 /* space needed by-bno freespace btree */
1999 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2000 mp->m_ag_maxlevels);
2001 /* space needed by-size freespace btree */
2002 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2003 mp->m_ag_maxlevels);
b8a8d6e5
DW
2004 /* space needed reverse mapping used space btree */
2005 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2006 min_free += min_t(unsigned int,
2007 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2008 mp->m_rmap_maxlevels);
de046644
DC
2009
2010 return min_free;
2011}
2012
5515b7c1
DC
2013/*
2014 * Check if the operation we are fixing up the freelist for should go ahead or
2015 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2016 * is dependent on whether the size and shape of free space available will
2017 * permit the requested allocation to take place.
2018 */
2019static bool
2020xfs_alloc_space_available(
2021 struct xfs_alloc_arg *args,
2022 xfs_extlen_t min_free,
2023 int flags)
2024{
2025 struct xfs_perag *pag = args->pag;
3fe4a6dd 2026 xfs_extlen_t alloc_len, longest;
cf8ce220 2027 xfs_extlen_t reservation; /* blocks that are still reserved */
5515b7c1
DC
2028 int available;
2029
2030 if (flags & XFS_ALLOC_FLAG_FREEING)
2031 return true;
2032
cf8ce220
DW
2033 reservation = xfs_ag_resv_needed(pag, args->resv);
2034
5515b7c1 2035 /* do we have enough contiguous free space for the allocation? */
3fe4a6dd 2036 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
cf8ce220
DW
2037 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2038 reservation);
3fe4a6dd 2039 if (longest < alloc_len)
5515b7c1
DC
2040 return false;
2041
cf8ce220 2042 /* do we have enough free space remaining for the allocation? */
5515b7c1 2043 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2c003dc2 2044 reservation - min_free - args->minleft);
3fe4a6dd 2045 if (available < (int)max(args->total, alloc_len))
5515b7c1
DC
2046 return false;
2047
2c003dc2
CH
2048 /*
2049 * Clamp maxlen to the amount of free space available for the actual
2050 * extent allocation.
2051 */
2052 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2053 args->maxlen = available;
2054 ASSERT(args->maxlen > 0);
2055 ASSERT(args->maxlen >= args->minlen);
2056 }
2057
5515b7c1
DC
2058 return true;
2059}
2060
8dbee8f5
BF
2061/*
2062 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2063 * is to detect an agfl header padding mismatch between current and early v5
2064 * kernels. This problem manifests as a 1-slot size difference between the
2065 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2066 * may also catch variants of agfl count corruption unrelated to padding. Either
2067 * way, we'll reset the agfl and warn the user.
2068 *
2069 * Return true if a reset is required before the agfl can be used, false
2070 * otherwise.
2071 */
2072static bool
2073xfs_agfl_needs_reset(
2074 struct xfs_mount *mp,
2075 struct xfs_agf *agf)
2076{
2077 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2078 uint32_t l = be32_to_cpu(agf->agf_fllast);
2079 uint32_t c = be32_to_cpu(agf->agf_flcount);
2080 int agfl_size = xfs_agfl_size(mp);
2081 int active;
2082
2083 /* no agfl header on v4 supers */
2084 if (!xfs_sb_version_hascrc(&mp->m_sb))
2085 return false;
2086
2087 /*
2088 * The agf read verifier catches severe corruption of these fields.
2089 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2090 * the verifier allows it.
2091 */
2092 if (f >= agfl_size || l >= agfl_size)
2093 return true;
2094 if (c > agfl_size)
2095 return true;
2096
2097 /*
2098 * Check consistency between the on-disk count and the active range. An
2099 * agfl padding mismatch manifests as an inconsistent flcount.
2100 */
2101 if (c && l >= f)
2102 active = l - f + 1;
2103 else if (c)
2104 active = agfl_size - f + l + 1;
2105 else
2106 active = 0;
2107
2108 return active != c;
2109}
2110
2111/*
2112 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2113 * agfl content cannot be trusted. Warn the user that a repair is required to
2114 * recover leaked blocks.
2115 *
2116 * The purpose of this mechanism is to handle filesystems affected by the agfl
2117 * header padding mismatch problem. A reset keeps the filesystem online with a
2118 * relatively minor free space accounting inconsistency rather than suffer the
2119 * inevitable crash from use of an invalid agfl block.
2120 */
2121static void
2122xfs_agfl_reset(
2123 struct xfs_trans *tp,
2124 struct xfs_buf *agbp,
2125 struct xfs_perag *pag)
2126{
2127 struct xfs_mount *mp = tp->t_mountp;
2128 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
2129
2130 ASSERT(pag->pagf_agflreset);
2131 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2132
2133 xfs_warn(mp,
2134 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2135 "Please unmount and run xfs_repair.",
2136 pag->pag_agno, pag->pagf_flcount);
2137
2138 agf->agf_flfirst = 0;
2139 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2140 agf->agf_flcount = 0;
2141 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2142 XFS_AGF_FLCOUNT);
2143
2144 pag->pagf_flcount = 0;
2145 pag->pagf_agflreset = false;
2146}
2147
2bd0ea18
NS
2148/*
2149 * Decide whether to use this allocation group for this allocation.
2150 * If so, fix up the btree freelist's size.
2bd0ea18 2151 */
ff105f75 2152int /* error */
2bd0ea18 2153xfs_alloc_fix_freelist(
c98e644e
DC
2154 struct xfs_alloc_arg *args, /* allocation argument structure */
2155 int flags) /* XFS_ALLOC_FLAG_... */
2bd0ea18 2156{
c98e644e
DC
2157 struct xfs_mount *mp = args->mp;
2158 struct xfs_perag *pag = args->pag;
2159 struct xfs_trans *tp = args->tp;
2160 struct xfs_buf *agbp = NULL;
2161 struct xfs_buf *agflbp = NULL;
2162 struct xfs_alloc_arg targs; /* local allocation arguments */
2163 xfs_agblock_t bno; /* freelist block */
2164 xfs_extlen_t need; /* total blocks needed in freelist */
fcdd428c 2165 int error = 0;
c98e644e 2166
2bd0ea18 2167 if (!pag->pagf_init) {
c98e644e
DC
2168 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2169 if (error)
2170 goto out_no_agbp;
2bd0ea18 2171 if (!pag->pagf_init) {
5e656dbb
BN
2172 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2173 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
c98e644e 2174 goto out_agbp_relse;
2bd0ea18 2175 }
c98e644e 2176 }
34317449 2177
5e656dbb 2178 /*
c98e644e
DC
2179 * If this is a metadata preferred pag and we are user data then try
2180 * somewhere else if we are not being asked to try harder at this
2181 * point
34317449 2182 */
1fccd5c8 2183 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
5e656dbb
BN
2184 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2185 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
c98e644e 2186 goto out_agbp_relse;
34317449
NS
2187 }
2188
de046644 2189 need = xfs_alloc_min_freelist(mp, pag);
2c003dc2
CH
2190 if (!xfs_alloc_space_available(args, need, flags |
2191 XFS_ALLOC_FLAG_CHECK))
c98e644e 2192 goto out_agbp_relse;
5e656dbb 2193
2bd0ea18
NS
2194 /*
2195 * Get the a.g. freespace buffer.
2196 * Can fail if we're not blocking on locks, and it's held.
2197 */
c98e644e
DC
2198 if (!agbp) {
2199 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2200 if (error)
2201 goto out_no_agbp;
2202 if (!agbp) {
5e656dbb
BN
2203 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2204 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
c98e644e 2205 goto out_no_agbp;
2bd0ea18
NS
2206 }
2207 }
72bda06d 2208
8dbee8f5
BF
2209 /* reset a padding mismatched agfl before final free space check */
2210 if (pag->pagf_agflreset)
2211 xfs_agfl_reset(tp, agbp, pag);
2212
72bda06d 2213 /* If there isn't enough total space or single-extent, reject it. */
de046644 2214 need = xfs_alloc_min_freelist(mp, pag);
c98e644e
DC
2215 if (!xfs_alloc_space_available(args, need, flags))
2216 goto out_agbp_relse;
5515b7c1 2217
2bd0ea18
NS
2218 /*
2219 * Make the freelist shorter if it's too long.
72bda06d 2220 *
c98e644e
DC
2221 * Note that from this point onwards, we will always release the agf and
2222 * agfl buffers on error. This handles the case where we error out and
2223 * the buffers are clean or may not have been joined to the transaction
2224 * and hence need to be released manually. If they have been joined to
2225 * the transaction, then xfs_trans_brelse() will handle them
2226 * appropriately based on the recursion count and dirty state of the
2227 * buffer.
2228 *
72bda06d
DC
2229 * XXX (dgc): When we have lots of free space, does this buy us
2230 * anything other than extra overhead when we need to put more blocks
2231 * back on the free list? Maybe we should only do this when space is
2232 * getting low or the AGFL is more than half full?
e365af6f
DW
2233 *
2234 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2235 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2236 * updating the rmapbt. Both flags are used in xfs_repair while we're
2237 * rebuilding the rmapbt, and neither are used by the kernel. They're
2238 * both required to ensure that rmaps are correctly recorded for the
2239 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2240 * repair/rmap.c in xfsprogs for details.
2bd0ea18 2241 */
e365af6f
DW
2242 memset(&targs, 0, sizeof(targs));
2243 if (flags & XFS_ALLOC_FLAG_NORMAP)
2244 xfs_rmap_skip_owner_update(&targs.oinfo);
2245 else
2246 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2247 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
72bda06d 2248 struct xfs_buf *bp;
2bd0ea18 2249
5e656dbb
BN
2250 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2251 if (error)
c98e644e 2252 goto out_agbp_relse;
85aec44f 2253 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
9760cac2 2254 &targs.oinfo, XFS_AG_RESV_AGFL);
72bda06d 2255 if (error)
c98e644e 2256 goto out_agbp_relse;
2bd0ea18 2257 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
b2284d05
ES
2258 if (!bp) {
2259 error = -EFSCORRUPTED;
2260 goto out_agbp_relse;
2261 }
2bd0ea18 2262 xfs_trans_binval(tp, bp);
2bd0ea18 2263 }
72bda06d 2264
2bd0ea18
NS
2265 targs.tp = tp;
2266 targs.mp = mp;
2267 targs.agbp = agbp;
2268 targs.agno = args->agno;
cf8ce220 2269 targs.alignment = targs.minlen = targs.prod = 1;
2bd0ea18
NS
2270 targs.type = XFS_ALLOCTYPE_THIS_AG;
2271 targs.pag = pag;
72bda06d
DC
2272 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2273 if (error)
c98e644e 2274 goto out_agbp_relse;
72bda06d
DC
2275
2276 /* Make the freelist longer if it's too short. */
2277 while (pag->pagf_flcount < need) {
2bd0ea18 2278 targs.agbno = 0;
72bda06d 2279 targs.maxlen = need - pag->pagf_flcount;
9760cac2 2280 targs.resv = XFS_AG_RESV_AGFL;
72bda06d
DC
2281
2282 /* Allocate as many blocks as possible at once. */
2283 error = xfs_alloc_ag_vextent(&targs);
c98e644e
DC
2284 if (error)
2285 goto out_agflbp_relse;
2286
2bd0ea18 2287 /*
dfc130f3
RC
2288 * Stop if we run out. Won't happen if callers are obeying
2289 * the restrictions correctly. Can happen for free calls
2bd0ea18
NS
2290 * on a completely full ag.
2291 */
5e656dbb
BN
2292 if (targs.agbno == NULLAGBLOCK) {
2293 if (flags & XFS_ALLOC_FLAG_FREEING)
2294 break;
c98e644e 2295 goto out_agflbp_relse;
5e656dbb 2296 }
2bd0ea18
NS
2297 /*
2298 * Put each allocated block on the list.
2299 */
2300 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
5e656dbb
BN
2301 error = xfs_alloc_put_freelist(tp, agbp,
2302 agflbp, bno, 0);
2303 if (error)
c98e644e 2304 goto out_agflbp_relse;
2bd0ea18
NS
2305 }
2306 }
cb4deb22 2307 xfs_trans_brelse(tp, agflbp);
2bd0ea18
NS
2308 args->agbp = agbp;
2309 return 0;
c98e644e
DC
2310
2311out_agflbp_relse:
2312 xfs_trans_brelse(tp, agflbp);
2313out_agbp_relse:
2314 if (agbp)
2315 xfs_trans_brelse(tp, agbp);
2316out_no_agbp:
2317 args->agbp = NULL;
2318 return error;
2bd0ea18
NS
2319}
2320
2321/*
2322 * Get a block from the freelist.
2323 * Returns with the buffer for the block gotten.
2324 */
2325int /* error */
2326xfs_alloc_get_freelist(
2327 xfs_trans_t *tp, /* transaction pointer */
2328 xfs_buf_t *agbp, /* buffer containing the agf structure */
cdded3d8
DC
2329 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2330 int btreeblk) /* destination is a AGF btree */
2bd0ea18
NS
2331{
2332 xfs_agf_t *agf; /* a.g. freespace structure */
2bd0ea18
NS
2333 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2334 xfs_agblock_t bno; /* block number returned */
dd5b876e 2335 __be32 *agfl_bno;
2bd0ea18 2336 int error;
cdded3d8 2337 int logflags;
dd5b876e 2338 xfs_mount_t *mp = tp->t_mountp;
2bd0ea18
NS
2339 xfs_perag_t *pag; /* per allocation group data */
2340
2bd0ea18
NS
2341 /*
2342 * Freelist is empty, give up.
2343 */
dd5b876e 2344 agf = XFS_BUF_TO_AGF(agbp);
46eca962 2345 if (!agf->agf_flcount) {
2bd0ea18
NS
2346 *bnop = NULLAGBLOCK;
2347 return 0;
2348 }
2349 /*
2350 * Read the array of free blocks.
2351 */
dd5b876e
DC
2352 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2353 &agflbp);
2354 if (error)
2bd0ea18 2355 return error;
dd5b876e
DC
2356
2357
2bd0ea18
NS
2358 /*
2359 * Get the block number and update the data structures.
2360 */
dd5b876e
DC
2361 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2362 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
5e656dbb 2363 be32_add_cpu(&agf->agf_flfirst, 1);
2bd0ea18 2364 xfs_trans_brelse(tp, agflbp);
b8165508 2365 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
46eca962 2366 agf->agf_flfirst = 0;
56b2de80
DC
2367
2368 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
8dbee8f5 2369 ASSERT(!pag->pagf_agflreset);
5e656dbb 2370 be32_add_cpu(&agf->agf_flcount, -1);
2bd0ea18
NS
2371 xfs_trans_agflist_delta(tp, -1);
2372 pag->pagf_flcount--;
56b2de80 2373 xfs_perag_put(pag);
cdded3d8
DC
2374
2375 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2376 if (btreeblk) {
5e656dbb 2377 be32_add_cpu(&agf->agf_btreeblks, 1);
cdded3d8
DC
2378 pag->pagf_btreeblks++;
2379 logflags |= XFS_AGF_BTREEBLKS;
2380 }
2381
cdded3d8 2382 xfs_alloc_log_agf(tp, agbp, logflags);
2bd0ea18 2383 *bnop = bno;
3e535bba 2384
2bd0ea18
NS
2385 return 0;
2386}
2387
2388/*
2389 * Log the given fields from the agf structure.
2390 */
2391void
2392xfs_alloc_log_agf(
2393 xfs_trans_t *tp, /* transaction pointer */
2394 xfs_buf_t *bp, /* buffer for a.g. freelist header */
dfc130f3 2395 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2bd0ea18
NS
2396{
2397 int first; /* first byte offset */
2398 int last; /* last byte offset */
2399 static const short offsets[] = {
2400 offsetof(xfs_agf_t, agf_magicnum),
2401 offsetof(xfs_agf_t, agf_versionnum),
2402 offsetof(xfs_agf_t, agf_seqno),
2403 offsetof(xfs_agf_t, agf_length),
2404 offsetof(xfs_agf_t, agf_roots[0]),
2405 offsetof(xfs_agf_t, agf_levels[0]),
2406 offsetof(xfs_agf_t, agf_flfirst),
2407 offsetof(xfs_agf_t, agf_fllast),
2408 offsetof(xfs_agf_t, agf_flcount),
2409 offsetof(xfs_agf_t, agf_freeblks),
2410 offsetof(xfs_agf_t, agf_longest),
cdded3d8 2411 offsetof(xfs_agf_t, agf_btreeblks),
dd5b876e 2412 offsetof(xfs_agf_t, agf_uuid),
8511b71a 2413 offsetof(xfs_agf_t, agf_rmap_blocks),
bc859611
DW
2414 offsetof(xfs_agf_t, agf_refcount_blocks),
2415 offsetof(xfs_agf_t, agf_refcount_root),
2416 offsetof(xfs_agf_t, agf_refcount_level),
8511b71a
DW
2417 /* needed so that we don't log the whole rest of the structure: */
2418 offsetof(xfs_agf_t, agf_spare64),
2bd0ea18
NS
2419 sizeof(xfs_agf_t)
2420 };
2421
56b2de80
DC
2422 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2423
bdc16ee5 2424 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
dd5b876e 2425
2bd0ea18
NS
2426 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2427 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2428}
2429
2430/*
2431 * Interface for inode allocation to force the pag data to be initialized.
2432 */
2433int /* error */
2434xfs_alloc_pagf_init(
2435 xfs_mount_t *mp, /* file system mount structure */
2436 xfs_trans_t *tp, /* transaction pointer */
2437 xfs_agnumber_t agno, /* allocation group number */
2438 int flags) /* XFS_ALLOC_FLAGS_... */
2439{
7a3bffe4 2440 xfs_buf_t *bp;
2bd0ea18
NS
2441 int error;
2442
0e266570 2443 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2bd0ea18
NS
2444 return error;
2445 if (bp)
2446 xfs_trans_brelse(tp, bp);
2447 return 0;
2448}
2449
2450/*
2451 * Put the block on the freelist for the allocation group.
2452 */
2453int /* error */
2454xfs_alloc_put_freelist(
2455 xfs_trans_t *tp, /* transaction pointer */
2456 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2457 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
cdded3d8
DC
2458 xfs_agblock_t bno, /* block being freed */
2459 int btreeblk) /* block came from a AGF btree */
2bd0ea18
NS
2460{
2461 xfs_agf_t *agf; /* a.g. freespace structure */
5e656dbb 2462 __be32 *blockp;/* pointer to array entry */
2bd0ea18 2463 int error;
cdded3d8 2464 int logflags;
2bd0ea18
NS
2465 xfs_mount_t *mp; /* mount structure */
2466 xfs_perag_t *pag; /* per allocation group data */
dd5b876e
DC
2467 __be32 *agfl_bno;
2468 int startoff;
2bd0ea18
NS
2469
2470 agf = XFS_BUF_TO_AGF(agbp);
2471 mp = tp->t_mountp;
2472
2473 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
6e3140c7 2474 be32_to_cpu(agf->agf_seqno), &agflbp)))
2bd0ea18 2475 return error;
5e656dbb 2476 be32_add_cpu(&agf->agf_fllast, 1);
b8165508 2477 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
46eca962 2478 agf->agf_fllast = 0;
56b2de80
DC
2479
2480 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
8dbee8f5 2481 ASSERT(!pag->pagf_agflreset);
5e656dbb 2482 be32_add_cpu(&agf->agf_flcount, 1);
2bd0ea18
NS
2483 xfs_trans_agflist_delta(tp, 1);
2484 pag->pagf_flcount++;
cdded3d8
DC
2485
2486 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2487 if (btreeblk) {
5e656dbb 2488 be32_add_cpu(&agf->agf_btreeblks, -1);
cdded3d8
DC
2489 pag->pagf_btreeblks--;
2490 logflags |= XFS_AGF_BTREEBLKS;
2491 }
56b2de80 2492 xfs_perag_put(pag);
cdded3d8 2493
5e656dbb
BN
2494 xfs_alloc_log_agf(tp, agbp, logflags);
2495
b8165508 2496 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
dd5b876e
DC
2497
2498 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2499 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
5e656dbb 2500 *blockp = cpu_to_be32(bno);
dd5b876e
DC
2501 startoff = (char *)blockp - (char *)agflbp->b_addr;
2502
cdded3d8 2503 xfs_alloc_log_agf(tp, agbp, logflags);
dd5b876e 2504
bdc16ee5 2505 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
dd5b876e
DC
2506 xfs_trans_log_buf(tp, agflbp, startoff,
2507 startoff + sizeof(xfs_agblock_t) - 1);
2bd0ea18
NS
2508 return 0;
2509}
2510
bc01119d 2511static xfs_failaddr_t
a2ceac1f 2512xfs_agf_verify(
95d9582b
DW
2513 struct xfs_buf *bp)
2514{
2515 struct xfs_mount *mp = bp->b_target->bt_mount;
2516 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
a2ceac1f 2517
a65d8d29
BF
2518 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2519 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
bc01119d 2520 return __this_address;
a65d8d29
BF
2521 if (!xfs_log_check_lsn(mp,
2522 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
bc01119d 2523 return __this_address;
a65d8d29 2524 }
a2ceac1f 2525
dd5b876e
DC
2526 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2527 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2528 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
b8165508
DC
2529 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2530 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2531 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
bc01119d 2532 return __this_address;
a2ceac1f 2533
00795aae
DW
2534 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2535 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2536 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
5a35bf2c 2537 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
bc01119d 2538 return __this_address;
5a35bf2c 2539
e37838e5 2540 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
00795aae
DW
2541 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2542 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
bc01119d 2543 return __this_address;
e37838e5 2544
a2ceac1f
DC
2545 /*
2546 * during growfs operations, the perag is not fully initialised,
2547 * so we can't use it for any useful checking. growfs ensures we can't
2548 * use it by using uncached buffers that don't have the perag attached
2549 * so we can detect and avoid this problem.
2550 */
dd5b876e 2551 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
bc01119d 2552 return __this_address;
a2ceac1f 2553
dd5b876e
DC
2554 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2555 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
bc01119d 2556 return __this_address;
dd5b876e 2557
88ce0792 2558 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
00795aae
DW
2559 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2560 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
bc01119d 2561 return __this_address;
88ce0792 2562
bc01119d 2563 return NULL;
a2ceac1f 2564
a2ceac1f
DC
2565}
2566
2567static void
2568xfs_agf_read_verify(
2569 struct xfs_buf *bp)
2570{
dd5b876e 2571 struct xfs_mount *mp = bp->b_target->bt_mount;
1e697959 2572 xfs_failaddr_t fa;
dd5b876e 2573
45922933
DC
2574 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2575 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
1e697959
DW
2576 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2577 else {
95d9582b 2578 fa = xfs_agf_verify(bp);
1e697959
DW
2579 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2580 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2581 }
a2ceac1f
DC
2582}
2583
2584static void
2585xfs_agf_write_verify(
2586 struct xfs_buf *bp)
2587{
37d086ca
CM
2588 struct xfs_mount *mp = bp->b_target->bt_mount;
2589 struct xfs_buf_log_item *bip = bp->b_log_item;
1e697959 2590 xfs_failaddr_t fa;
dd5b876e 2591
95d9582b 2592 fa = xfs_agf_verify(bp);
1e697959
DW
2593 if (fa) {
2594 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
dd5b876e
DC
2595 return;
2596 }
2597
2598 if (!xfs_sb_version_hascrc(&mp->m_sb))
2599 return;
2600
2601 if (bip)
2602 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2603
43b5aeed 2604 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
a2ceac1f
DC
2605}
2606
2607const struct xfs_buf_ops xfs_agf_buf_ops = {
a3fac935 2608 .name = "xfs_agf",
a2ceac1f
DC
2609 .verify_read = xfs_agf_read_verify,
2610 .verify_write = xfs_agf_write_verify,
95d9582b 2611 .verify_struct = xfs_agf_verify,
a2ceac1f
DC
2612};
2613
2bd0ea18
NS
2614/*
2615 * Read in the allocation group header (free/alloc section).
2616 */
2617int /* error */
56b2de80
DC
2618xfs_read_agf(
2619 struct xfs_mount *mp, /* mount point structure */
2620 struct xfs_trans *tp, /* transaction pointer */
2621 xfs_agnumber_t agno, /* allocation group number */
2622 int flags, /* XFS_BUF_ */
2623 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2bd0ea18 2624{
9440d84d 2625 int error;
2bd0ea18 2626
ff105f75
DC
2627 trace_xfs_read_agf(mp, agno);
2628
2bd0ea18 2629 ASSERT(agno != NULLAGNUMBER);
9440d84d
NS
2630 error = xfs_trans_read_buf(
2631 mp, tp, mp->m_ddev_targp,
2632 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
a2ceac1f 2633 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
9440d84d 2634 if (error)
2bd0ea18 2635 return error;
56b2de80 2636 if (!*bpp)
2bd0ea18 2637 return 0;
56b2de80 2638
a2ceac1f
DC
2639 ASSERT(!(*bpp)->b_error);
2640 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
56b2de80
DC
2641 return 0;
2642}
2643
2644/*
2645 * Read in the allocation group header (free/alloc section).
2646 */
2647int /* error */
2648xfs_alloc_read_agf(
2649 struct xfs_mount *mp, /* mount point structure */
2650 struct xfs_trans *tp, /* transaction pointer */
2651 xfs_agnumber_t agno, /* allocation group number */
2652 int flags, /* XFS_ALLOC_FLAG_... */
2653 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2654{
2655 struct xfs_agf *agf; /* ag freelist header */
2656 struct xfs_perag *pag; /* per allocation group data */
2657 int error;
2658
ff105f75 2659 trace_xfs_alloc_read_agf(mp, agno);
56b2de80 2660
ff105f75 2661 ASSERT(agno != NULLAGNUMBER);
56b2de80
DC
2662 error = xfs_read_agf(mp, tp, agno,
2663 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2664 bpp);
2665 if (error)
2666 return error;
2667 if (!*bpp)
2668 return 0;
a2ceac1f 2669 ASSERT(!(*bpp)->b_error);
56b2de80
DC
2670
2671 agf = XFS_BUF_TO_AGF(*bpp);
2672 pag = xfs_perag_get(mp, agno);
2bd0ea18 2673 if (!pag->pagf_init) {
6e3140c7 2674 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
cdded3d8 2675 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
6e3140c7
NS
2676 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2677 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2bd0ea18 2678 pag->pagf_levels[XFS_BTNUM_BNOi] =
6e3140c7 2679 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2bd0ea18 2680 pag->pagf_levels[XFS_BTNUM_CNTi] =
6e3140c7 2681 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
e37838e5
DW
2682 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2683 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
88ce0792 2684 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
5e656dbb 2685 spin_lock_init(&pag->pagb_lock);
56b2de80 2686 pag->pagb_count = 0;
ff105f75
DC
2687 /* XXX: pagb_tree doesn't exist in userspace */
2688 //pag->pagb_tree = RB_ROOT;
2bd0ea18 2689 pag->pagf_init = 1;
8dbee8f5 2690 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2bd0ea18
NS
2691 }
2692#ifdef DEBUG
2693 else if (!XFS_FORCED_SHUTDOWN(mp)) {
6e3140c7 2694 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
cdded3d8 2695 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
6e3140c7
NS
2696 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2697 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2bd0ea18 2698 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
6e3140c7 2699 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2bd0ea18 2700 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
6e3140c7 2701 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2bd0ea18
NS
2702 }
2703#endif
56b2de80 2704 xfs_perag_put(pag);
2bd0ea18
NS
2705 return 0;
2706}
2707
2708/*
2709 * Allocate an extent (variable-size).
2710 * Depending on the allocation type, we either look in a single allocation
2711 * group or loop over the allocation groups to find the result.
2712 */
2713int /* error */
2714xfs_alloc_vextent(
dfc130f3 2715 xfs_alloc_arg_t *args) /* allocation argument structure */
2bd0ea18 2716{
dfc130f3 2717 xfs_agblock_t agsize; /* allocation group size */
2bd0ea18
NS
2718 int error;
2719 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2bd0ea18
NS
2720 xfs_mount_t *mp; /* mount structure pointer */
2721 xfs_agnumber_t sagno; /* starting allocation group number */
dfc130f3 2722 xfs_alloctype_t type; /* input allocation type */
34317449 2723 int bump_rotor = 0;
46eca962 2724 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2bd0ea18
NS
2725
2726 mp = args->mp;
2727 type = args->otype = args->type;
2728 args->agbno = NULLAGBLOCK;
2729 /*
2730 * Just fix this up, for the case where the last a.g. is shorter
2731 * (or there's only one a.g.) and the caller couldn't easily figure
2732 * that out (xfs_bmap_alloc).
2733 */
2734 agsize = mp->m_sb.sb_agblocks;
2735 if (args->maxlen > agsize)
2736 args->maxlen = agsize;
2737 if (args->alignment == 0)
2738 args->alignment = 1;
2739 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2740 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2741 ASSERT(args->minlen <= args->maxlen);
2742 ASSERT(args->minlen <= agsize);
2743 ASSERT(args->mod < args->prod);
2744 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2745 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2746 args->minlen > args->maxlen || args->minlen > agsize ||
2747 args->mod >= args->prod) {
2748 args->fsbno = NULLFSBLOCK;
56b2de80 2749 trace_xfs_alloc_vextent_badargs(args);
2bd0ea18
NS
2750 return 0;
2751 }
9baa549b 2752
2bd0ea18
NS
2753 switch (type) {
2754 case XFS_ALLOCTYPE_THIS_AG:
2755 case XFS_ALLOCTYPE_NEAR_BNO:
2756 case XFS_ALLOCTYPE_THIS_BNO:
2757 /*
2758 * These three force us into a single a.g.
2759 */
2760 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
56b2de80 2761 args->pag = xfs_perag_get(mp, args->agno);
2bd0ea18 2762 error = xfs_alloc_fix_freelist(args, 0);
2bd0ea18 2763 if (error) {
56b2de80 2764 trace_xfs_alloc_vextent_nofix(args);
2bd0ea18
NS
2765 goto error0;
2766 }
2767 if (!args->agbp) {
56b2de80 2768 trace_xfs_alloc_vextent_noagbp(args);
2bd0ea18
NS
2769 break;
2770 }
2771 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
0e266570 2772 if ((error = xfs_alloc_ag_vextent(args)))
2bd0ea18 2773 goto error0;
2bd0ea18
NS
2774 break;
2775 case XFS_ALLOCTYPE_START_BNO:
2776 /*
2777 * Try near allocation first, then anywhere-in-ag after
2778 * the first a.g. fails.
2779 */
1fccd5c8 2780 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
34317449 2781 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
46eca962
NS
2782 args->fsbno = XFS_AGB_TO_FSB(mp,
2783 ((mp->m_agfrotor / rotorstep) %
2784 mp->m_sb.sb_agcount), 0);
34317449
NS
2785 bump_rotor = 1;
2786 }
2bd0ea18
NS
2787 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2788 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2789 /* FALLTHROUGH */
2bd0ea18
NS
2790 case XFS_ALLOCTYPE_FIRST_AG:
2791 /*
2792 * Rotate through the allocation groups looking for a winner.
2793 */
f3eda3a5 2794 if (type == XFS_ALLOCTYPE_FIRST_AG) {
2bd0ea18
NS
2795 /*
2796 * Start with allocation group given by bno.
2797 */
2798 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2799 args->type = XFS_ALLOCTYPE_THIS_AG;
2800 sagno = 0;
2801 flags = 0;
2802 } else {
2bd0ea18
NS
2803 /*
2804 * Start with the given allocation group.
2805 */
2806 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2807 flags = XFS_ALLOC_FLAG_TRYLOCK;
2808 }
2809 /*
2810 * Loop over allocation groups twice; first time with
2811 * trylock set, second time without.
2812 */
2813 for (;;) {
56b2de80 2814 args->pag = xfs_perag_get(mp, args->agno);
9baa549b 2815 error = xfs_alloc_fix_freelist(args, flags);
9baa549b 2816 if (error) {
56b2de80 2817 trace_xfs_alloc_vextent_nofix(args);
2bd0ea18
NS
2818 goto error0;
2819 }
2820 /*
2821 * If we get a buffer back then the allocation will fly.
2822 */
2823 if (args->agbp) {
0e266570 2824 if ((error = xfs_alloc_ag_vextent(args)))
2bd0ea18 2825 goto error0;
2bd0ea18
NS
2826 break;
2827 }
56b2de80
DC
2828
2829 trace_xfs_alloc_vextent_loopfailed(args);
2830
2bd0ea18
NS
2831 /*
2832 * Didn't work, figure out the next iteration.
2833 */
2834 if (args->agno == sagno &&
2835 type == XFS_ALLOCTYPE_START_BNO)
2836 args->type = XFS_ALLOCTYPE_THIS_AG;
5e656dbb
BN
2837 /*
2838 * For the first allocation, we can try any AG to get
2839 * space. However, if we already have allocated a
2840 * block, we don't want to try AGs whose number is below
2841 * sagno. Otherwise, we may end up with out-of-order
2842 * locking of AGF, which might cause deadlock.
2843 */
2844 if (++(args->agno) == mp->m_sb.sb_agcount) {
2845 if (args->firstblock != NULLFSBLOCK)
2846 args->agno = sagno;
2847 else
2848 args->agno = 0;
2849 }
5000d01d 2850 /*
2bd0ea18
NS
2851 * Reached the starting a.g., must either be done
2852 * or switch to non-trylock mode.
2853 */
2854 if (args->agno == sagno) {
a3b4a951 2855 if (flags == 0) {
2bd0ea18 2856 args->agbno = NULLAGBLOCK;
56b2de80 2857 trace_xfs_alloc_vextent_allfailed(args);
2bd0ea18
NS
2858 break;
2859 }
a3b4a951
CH
2860
2861 flags = 0;
2862 if (type == XFS_ALLOCTYPE_START_BNO) {
2863 args->agbno = XFS_FSB_TO_AGBNO(mp,
2864 args->fsbno);
2865 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2bd0ea18
NS
2866 }
2867 }
56b2de80 2868 xfs_perag_put(args->pag);
2bd0ea18 2869 }
f3eda3a5 2870 if (bump_rotor) {
46eca962
NS
2871 if (args->agno == sagno)
2872 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2873 (mp->m_sb.sb_agcount * rotorstep);
2874 else
2875 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2876 (mp->m_sb.sb_agcount * rotorstep);
2877 }
2bd0ea18
NS
2878 break;
2879 default:
2880 ASSERT(0);
2881 /* NOTREACHED */
2882 }
2883 if (args->agbno == NULLAGBLOCK)
2884 args->fsbno = NULLFSBLOCK;
2885 else {
2886 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2887#ifdef DEBUG
2888 ASSERT(args->len >= args->minlen);
2889 ASSERT(args->len <= args->maxlen);
2890 ASSERT(args->agbno % args->alignment == 0);
2891 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2892 args->len);
2893#endif
9542ae13
DC
2894
2895 /* Zero the extent if we were asked to do so */
1fccd5c8 2896 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
9542ae13
DC
2897 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2898 if (error)
2899 goto error0;
2900 }
2901
2bd0ea18 2902 }
56b2de80 2903 xfs_perag_put(args->pag);
2bd0ea18
NS
2904 return 0;
2905error0:
56b2de80 2906 xfs_perag_put(args->pag);
2bd0ea18
NS
2907 return error;
2908}
2909
2a6da3b8
DC
2910/* Ensure that the freelist is at full capacity. */
2911int
2912xfs_free_extent_fix_freelist(
2913 struct xfs_trans *tp,
2914 xfs_agnumber_t agno,
2915 struct xfs_buf **agbp)
2bd0ea18 2916{
2a6da3b8
DC
2917 struct xfs_alloc_arg args;
2918 int error;
2bd0ea18 2919
2a6da3b8 2920 memset(&args, 0, sizeof(struct xfs_alloc_arg));
2bd0ea18
NS
2921 args.tp = tp;
2922 args.mp = tp->t_mountp;
2a6da3b8 2923 args.agno = agno;
a2ceac1f
DC
2924
2925 /*
2926 * validate that the block number is legal - the enables us to detect
2927 * and handle a silent filesystem corruption rather than crashing.
2928 */
a2ceac1f 2929 if (args.agno >= args.mp->m_sb.sb_agcount)
12b53197 2930 return -EFSCORRUPTED;
a2ceac1f 2931
56b2de80 2932 args.pag = xfs_perag_get(args.mp, args.agno);
a2ceac1f
DC
2933 ASSERT(args.pag);
2934
2935 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2936 if (error)
2a6da3b8
DC
2937 goto out;
2938
2939 *agbp = args.agbp;
2940out:
2941 xfs_perag_put(args.pag);
2942 return error;
2943}
2944
2945/*
2946 * Free an extent.
2947 * Just break up the extent address and hand off to xfs_free_ag_extent
2948 * after fixing up the freelist.
2949 */
2950int /* error */
2951xfs_free_extent(
2952 struct xfs_trans *tp, /* transaction pointer */
2953 xfs_fsblock_t bno, /* starting block number of extent */
85aec44f 2954 xfs_extlen_t len, /* length of extent */
cf8ce220
DW
2955 struct xfs_owner_info *oinfo, /* extent owner */
2956 enum xfs_ag_resv_type type) /* block reservation type */
2a6da3b8
DC
2957{
2958 struct xfs_mount *mp = tp->t_mountp;
2959 struct xfs_buf *agbp;
2960 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2961 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2962 int error;
2963
2964 ASSERT(len != 0);
9760cac2 2965 ASSERT(type != XFS_AG_RESV_AGFL);
2a6da3b8 2966
a9da40de 2967 if (XFS_TEST_ERROR(false, mp,
e2a190dd 2968 XFS_ERRTAG_FREE_EXTENT))
a9da40de
DW
2969 return -EIO;
2970
2a6da3b8
DC
2971 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2972 if (error)
2973 return error;
2974
2975 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
a2ceac1f
DC
2976
2977 /* validate the extent size is legal now we have the agf locked */
2a6da3b8
DC
2978 XFS_WANT_CORRUPTED_GOTO(mp,
2979 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2980 err);
a2ceac1f 2981
cf8ce220 2982 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
2a6da3b8
DC
2983 if (error)
2984 goto err;
2985
2986 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2987 return 0;
2988
2989err:
2990 xfs_trans_brelse(tp, agbp);
2bd0ea18
NS
2991 return error;
2992}
b3d83fa6
DW
2993
2994struct xfs_alloc_query_range_info {
2995 xfs_alloc_query_range_fn fn;
2996 void *priv;
2997};
2998
2999/* Format btree record and pass to our callback. */
3000STATIC int
3001xfs_alloc_query_range_helper(
3002 struct xfs_btree_cur *cur,
3003 union xfs_btree_rec *rec,
3004 void *priv)
3005{
3006 struct xfs_alloc_query_range_info *query = priv;
3007 struct xfs_alloc_rec_incore irec;
3008
3009 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3010 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3011 return query->fn(cur, &irec, query->priv);
3012}
3013
3014/* Find all free space within a given range of blocks. */
3015int
3016xfs_alloc_query_range(
3017 struct xfs_btree_cur *cur,
3018 struct xfs_alloc_rec_incore *low_rec,
3019 struct xfs_alloc_rec_incore *high_rec,
3020 xfs_alloc_query_range_fn fn,
3021 void *priv)
3022{
3023 union xfs_btree_irec low_brec;
3024 union xfs_btree_irec high_brec;
3025 struct xfs_alloc_query_range_info query;
3026
3027 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3028 low_brec.a = *low_rec;
3029 high_brec.a = *high_rec;
3030 query.priv = priv;
3031 query.fn = fn;
3032 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3033 xfs_alloc_query_range_helper, &query);
3034}
7e05e856
DW
3035
3036/* Find all free space records. */
3037int
3038xfs_alloc_query_all(
3039 struct xfs_btree_cur *cur,
3040 xfs_alloc_query_range_fn fn,
3041 void *priv)
3042{
3043 struct xfs_alloc_query_range_info query;
3044
3045 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3046 query.priv = priv;
3047 query.fn = fn;
3048 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3049}
9bef6258
DW
3050
3051/* Find the size of the AG, in blocks. */
3052xfs_agblock_t
3053xfs_ag_block_count(
3054 struct xfs_mount *mp,
3055 xfs_agnumber_t agno)
3056{
3057 ASSERT(agno < mp->m_sb.sb_agcount);
3058
3059 if (agno < mp->m_sb.sb_agcount - 1)
3060 return mp->m_sb.sb_agblocks;
3061 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
3062}
3063
3064/*
3065 * Verify that an AG block number pointer neither points outside the AG
3066 * nor points at static metadata.
3067 */
3068bool
3069xfs_verify_agbno(
3070 struct xfs_mount *mp,
3071 xfs_agnumber_t agno,
3072 xfs_agblock_t agbno)
3073{
3074 xfs_agblock_t eoag;
3075
3076 eoag = xfs_ag_block_count(mp, agno);
3077 if (agbno >= eoag)
3078 return false;
3079 if (agbno <= XFS_AGFL_BLOCK(mp))
3080 return false;
3081 return true;
3082}
3083
3084/*
3085 * Verify that an FS block number pointer neither points outside the
3086 * filesystem nor points at static AG metadata.
3087 */
3088bool
3089xfs_verify_fsbno(
3090 struct xfs_mount *mp,
3091 xfs_fsblock_t fsbno)
3092{
3093 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
3094
3095 if (agno >= mp->m_sb.sb_agcount)
3096 return false;
3097 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
3098}
1fe41a73
DW
3099
3100/* Is there a record covering a given extent? */
3101int
3102xfs_alloc_has_record(
3103 struct xfs_btree_cur *cur,
3104 xfs_agblock_t bno,
3105 xfs_extlen_t len,
3106 bool *exists)
3107{
3108 union xfs_btree_irec low;
3109 union xfs_btree_irec high;
3110
3111 memset(&low, 0, sizeof(low));
3112 low.a.ar_startblock = bno;
3113 memset(&high, 0xFF, sizeof(high));
3114 high.a.ar_startblock = bno + len - 1;
3115
3116 return xfs_btree_has_record(cur, &low, &high, exists);
3117}