]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc_btree.c
Sync up libxfs to latest kernel code
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc_btree.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <xfs.h>
19
20 STATIC struct xfs_btree_cur *
21 xfs_allocbt_dup_cursor(
22 struct xfs_btree_cur *cur)
23 {
24 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
25 cur->bc_private.a.agbp, cur->bc_private.a.agno,
26 cur->bc_btnum);
27 }
28
29 STATIC void
30 xfs_allocbt_set_root(
31 struct xfs_btree_cur *cur,
32 union xfs_btree_ptr *ptr,
33 int inc)
34 {
35 struct xfs_buf *agbp = cur->bc_private.a.agbp;
36 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
37 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
38 int btnum = cur->bc_btnum;
39
40 ASSERT(ptr->s != 0);
41
42 agf->agf_roots[btnum] = ptr->s;
43 be32_add_cpu(&agf->agf_levels[btnum], inc);
44 cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
45
46 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
47 }
48
49 STATIC int
50 xfs_allocbt_alloc_block(
51 struct xfs_btree_cur *cur,
52 union xfs_btree_ptr *start,
53 union xfs_btree_ptr *new,
54 int length,
55 int *stat)
56 {
57 int error;
58 xfs_agblock_t bno;
59
60 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
61
62 /* Allocate the new block from the freelist. If we can't, give up. */
63 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
64 &bno, 1);
65 if (error) {
66 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
67 return error;
68 }
69
70 if (bno == NULLAGBLOCK) {
71 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
72 *stat = 0;
73 return 0;
74 }
75
76 xfs_trans_agbtree_delta(cur->bc_tp, 1);
77 new->s = cpu_to_be32(bno);
78
79 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
80 *stat = 1;
81 return 0;
82 }
83
84 STATIC int
85 xfs_allocbt_free_block(
86 struct xfs_btree_cur *cur,
87 struct xfs_buf *bp)
88 {
89 struct xfs_buf *agbp = cur->bc_private.a.agbp;
90 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
91 xfs_agblock_t bno;
92 int error;
93
94 bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp));
95 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
96 if (error)
97 return error;
98
99 /*
100 * Since blocks move to the free list without the coordination used in
101 * xfs_bmap_finish, we can't allow block to be available for
102 * reallocation and non-transaction writing (user data) until we know
103 * that the transaction that moved it to the free list is permanently
104 * on disk. We track the blocks by declaring these blocks as "busy";
105 * the busy list is maintained on a per-ag basis and each transaction
106 * records which entries should be removed when the iclog commits to
107 * disk. If a busy block is allocated, the iclog is pushed up to the
108 * LSN that freed the block.
109 */
110 xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
111 xfs_trans_agbtree_delta(cur->bc_tp, -1);
112 return 0;
113 }
114
115 /*
116 * Update the longest extent in the AGF
117 */
118 STATIC void
119 xfs_allocbt_update_lastrec(
120 struct xfs_btree_cur *cur,
121 struct xfs_btree_block *block,
122 union xfs_btree_rec *rec,
123 int ptr,
124 int reason)
125 {
126 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
127 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
128 __be32 len;
129 int numrecs;
130
131 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
132
133 switch (reason) {
134 case LASTREC_UPDATE:
135 /*
136 * If this is the last leaf block and it's the last record,
137 * then update the size of the longest extent in the AG.
138 */
139 if (ptr != xfs_btree_get_numrecs(block))
140 return;
141 len = rec->alloc.ar_blockcount;
142 break;
143 case LASTREC_INSREC:
144 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
145 be32_to_cpu(agf->agf_longest))
146 return;
147 len = rec->alloc.ar_blockcount;
148 break;
149 case LASTREC_DELREC:
150 numrecs = xfs_btree_get_numrecs(block);
151 if (ptr <= numrecs)
152 return;
153 ASSERT(ptr == numrecs + 1);
154
155 if (numrecs) {
156 xfs_alloc_rec_t *rrp;
157
158 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
159 len = rrp->ar_blockcount;
160 } else {
161 len = 0;
162 }
163
164 break;
165 default:
166 ASSERT(0);
167 return;
168 }
169
170 agf->agf_longest = len;
171 cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
172 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
173 }
174
175 STATIC int
176 xfs_allocbt_get_minrecs(
177 struct xfs_btree_cur *cur,
178 int level)
179 {
180 return cur->bc_mp->m_alloc_mnr[level != 0];
181 }
182
183 STATIC int
184 xfs_allocbt_get_maxrecs(
185 struct xfs_btree_cur *cur,
186 int level)
187 {
188 return cur->bc_mp->m_alloc_mxr[level != 0];
189 }
190
191 STATIC void
192 xfs_allocbt_init_key_from_rec(
193 union xfs_btree_key *key,
194 union xfs_btree_rec *rec)
195 {
196 ASSERT(rec->alloc.ar_startblock != 0);
197
198 key->alloc.ar_startblock = rec->alloc.ar_startblock;
199 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
200 }
201
202 STATIC void
203 xfs_allocbt_init_rec_from_key(
204 union xfs_btree_key *key,
205 union xfs_btree_rec *rec)
206 {
207 ASSERT(key->alloc.ar_startblock != 0);
208
209 rec->alloc.ar_startblock = key->alloc.ar_startblock;
210 rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
211 }
212
213 STATIC void
214 xfs_allocbt_init_rec_from_cur(
215 struct xfs_btree_cur *cur,
216 union xfs_btree_rec *rec)
217 {
218 ASSERT(cur->bc_rec.a.ar_startblock != 0);
219
220 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
221 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
222 }
223
224 STATIC void
225 xfs_allocbt_init_ptr_from_cur(
226 struct xfs_btree_cur *cur,
227 union xfs_btree_ptr *ptr)
228 {
229 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
230
231 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
232 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
233
234 ptr->s = agf->agf_roots[cur->bc_btnum];
235 }
236
237 STATIC __int64_t
238 xfs_allocbt_key_diff(
239 struct xfs_btree_cur *cur,
240 union xfs_btree_key *key)
241 {
242 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
243 xfs_alloc_key_t *kp = &key->alloc;
244 __int64_t diff;
245
246 if (cur->bc_btnum == XFS_BTNUM_BNO) {
247 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
248 rec->ar_startblock;
249 }
250
251 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
252 if (diff)
253 return diff;
254
255 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
256 }
257
258 STATIC int
259 xfs_allocbt_kill_root(
260 struct xfs_btree_cur *cur,
261 struct xfs_buf *bp,
262 int level,
263 union xfs_btree_ptr *newroot)
264 {
265 int error;
266
267 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
268 XFS_BTREE_STATS_INC(cur, killroot);
269
270 /*
271 * Update the root pointer, decreasing the level by 1 and then
272 * free the old root.
273 */
274 xfs_allocbt_set_root(cur, newroot, -1);
275 error = xfs_allocbt_free_block(cur, bp);
276 if (error) {
277 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
278 return error;
279 }
280
281 XFS_BTREE_STATS_INC(cur, free);
282
283 xfs_btree_setbuf(cur, level, NULL);
284 cur->bc_nlevels--;
285
286 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
287 return 0;
288 }
289
290 #ifdef DEBUG
291 STATIC int
292 xfs_allocbt_keys_inorder(
293 struct xfs_btree_cur *cur,
294 union xfs_btree_key *k1,
295 union xfs_btree_key *k2)
296 {
297 if (cur->bc_btnum == XFS_BTNUM_BNO) {
298 return be32_to_cpu(k1->alloc.ar_startblock) <
299 be32_to_cpu(k2->alloc.ar_startblock);
300 } else {
301 return be32_to_cpu(k1->alloc.ar_blockcount) <
302 be32_to_cpu(k2->alloc.ar_blockcount) ||
303 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
304 be32_to_cpu(k1->alloc.ar_startblock) <
305 be32_to_cpu(k2->alloc.ar_startblock));
306 }
307 }
308
309 STATIC int
310 xfs_allocbt_recs_inorder(
311 struct xfs_btree_cur *cur,
312 union xfs_btree_rec *r1,
313 union xfs_btree_rec *r2)
314 {
315 if (cur->bc_btnum == XFS_BTNUM_BNO) {
316 return be32_to_cpu(r1->alloc.ar_startblock) +
317 be32_to_cpu(r1->alloc.ar_blockcount) <=
318 be32_to_cpu(r2->alloc.ar_startblock);
319 } else {
320 return be32_to_cpu(r1->alloc.ar_blockcount) <
321 be32_to_cpu(r2->alloc.ar_blockcount) ||
322 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
323 be32_to_cpu(r1->alloc.ar_startblock) <
324 be32_to_cpu(r2->alloc.ar_startblock));
325 }
326 }
327 #endif /* DEBUG */
328
329 #ifdef XFS_BTREE_TRACE
330 ktrace_t *xfs_allocbt_trace_buf;
331
332 STATIC void
333 xfs_allocbt_trace_enter(
334 struct xfs_btree_cur *cur,
335 const char *func,
336 char *s,
337 int type,
338 int line,
339 __psunsigned_t a0,
340 __psunsigned_t a1,
341 __psunsigned_t a2,
342 __psunsigned_t a3,
343 __psunsigned_t a4,
344 __psunsigned_t a5,
345 __psunsigned_t a6,
346 __psunsigned_t a7,
347 __psunsigned_t a8,
348 __psunsigned_t a9,
349 __psunsigned_t a10)
350 {
351 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
352 (void *)func, (void *)s, NULL, (void *)cur,
353 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
354 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
355 (void *)a8, (void *)a9, (void *)a10);
356 }
357
358 STATIC void
359 xfs_allocbt_trace_cursor(
360 struct xfs_btree_cur *cur,
361 __uint32_t *s0,
362 __uint64_t *l0,
363 __uint64_t *l1)
364 {
365 *s0 = cur->bc_private.a.agno;
366 *l0 = cur->bc_rec.a.ar_startblock;
367 *l1 = cur->bc_rec.a.ar_blockcount;
368 }
369
370 STATIC void
371 xfs_allocbt_trace_key(
372 struct xfs_btree_cur *cur,
373 union xfs_btree_key *key,
374 __uint64_t *l0,
375 __uint64_t *l1)
376 {
377 *l0 = be32_to_cpu(key->alloc.ar_startblock);
378 *l1 = be32_to_cpu(key->alloc.ar_blockcount);
379 }
380
381 STATIC void
382 xfs_allocbt_trace_record(
383 struct xfs_btree_cur *cur,
384 union xfs_btree_rec *rec,
385 __uint64_t *l0,
386 __uint64_t *l1,
387 __uint64_t *l2)
388 {
389 *l0 = be32_to_cpu(rec->alloc.ar_startblock);
390 *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
391 *l2 = 0;
392 }
393 #endif /* XFS_BTREE_TRACE */
394
395 static const struct xfs_btree_ops xfs_allocbt_ops = {
396 .rec_len = sizeof(xfs_alloc_rec_t),
397 .key_len = sizeof(xfs_alloc_key_t),
398
399 .dup_cursor = xfs_allocbt_dup_cursor,
400 .set_root = xfs_allocbt_set_root,
401 .kill_root = xfs_allocbt_kill_root,
402 .alloc_block = xfs_allocbt_alloc_block,
403 .free_block = xfs_allocbt_free_block,
404 .update_lastrec = xfs_allocbt_update_lastrec,
405 .get_minrecs = xfs_allocbt_get_minrecs,
406 .get_maxrecs = xfs_allocbt_get_maxrecs,
407 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
408 .init_rec_from_key = xfs_allocbt_init_rec_from_key,
409 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
410 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
411 .key_diff = xfs_allocbt_key_diff,
412
413 #ifdef DEBUG
414 .keys_inorder = xfs_allocbt_keys_inorder,
415 .recs_inorder = xfs_allocbt_recs_inorder,
416 #endif
417
418 #ifdef XFS_BTREE_TRACE
419 .trace_enter = xfs_allocbt_trace_enter,
420 .trace_cursor = xfs_allocbt_trace_cursor,
421 .trace_key = xfs_allocbt_trace_key,
422 .trace_record = xfs_allocbt_trace_record,
423 #endif
424 };
425
426 /*
427 * Allocate a new allocation btree cursor.
428 */
429 struct xfs_btree_cur * /* new alloc btree cursor */
430 xfs_allocbt_init_cursor(
431 struct xfs_mount *mp, /* file system mount point */
432 struct xfs_trans *tp, /* transaction pointer */
433 struct xfs_buf *agbp, /* buffer for agf structure */
434 xfs_agnumber_t agno, /* allocation group number */
435 xfs_btnum_t btnum) /* btree identifier */
436 {
437 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
438 struct xfs_btree_cur *cur;
439
440 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
441
442 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
443
444 cur->bc_tp = tp;
445 cur->bc_mp = mp;
446 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
447 cur->bc_btnum = btnum;
448 cur->bc_blocklog = mp->m_sb.sb_blocklog;
449
450 cur->bc_ops = &xfs_allocbt_ops;
451 if (btnum == XFS_BTNUM_CNT)
452 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
453
454 cur->bc_private.a.agbp = agbp;
455 cur->bc_private.a.agno = agno;
456
457 return cur;
458 }
459
460 /*
461 * Calculate number of records in an alloc btree block.
462 */
463 int
464 xfs_allocbt_maxrecs(
465 struct xfs_mount *mp,
466 int blocklen,
467 int leaf)
468 {
469 blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
470
471 if (leaf)
472 return blocklen / sizeof(xfs_alloc_rec_t);
473 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
474 }