]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_trans_resv.c
xfs: create helpers for rtbitmap block/wordcount computations
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_trans_resv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "libxfs_priv.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_da_format.h"
15 #include "xfs_da_btree.h"
16 #include "xfs_inode.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_quota_defs.h"
21 #include "xfs_rtbitmap.h"
22
23 #define _ALLOC true
24 #define _FREE false
25
26 /*
27 * A buffer has a format structure overhead in the log in addition
28 * to the data, so we need to take this into account when reserving
29 * space in a transaction for a buffer. Round the space required up
30 * to a multiple of 128 bytes so that we don't change the historical
31 * reservation that has been used for this overhead.
32 */
33 STATIC uint
34 xfs_buf_log_overhead(void)
35 {
36 return round_up(sizeof(struct xlog_op_header) +
37 sizeof(struct xfs_buf_log_format), 128);
38 }
39
40 /*
41 * Calculate out transaction log reservation per item in bytes.
42 *
43 * The nbufs argument is used to indicate the number of items that
44 * will be changed in a transaction. size is used to tell how many
45 * bytes should be reserved per item.
46 */
47 STATIC uint
48 xfs_calc_buf_res(
49 uint nbufs,
50 uint size)
51 {
52 return nbufs * (size + xfs_buf_log_overhead());
53 }
54
55 /*
56 * Per-extent log reservation for the btree changes involved in freeing or
57 * allocating an extent. In classic XFS there were two trees that will be
58 * modified (bnobt + cntbt). With rmap enabled, there are three trees
59 * (rmapbt). The number of blocks reserved is based on the formula:
60 *
61 * num trees * ((2 blocks/level * max depth) - 1)
62 *
63 * Keep in mind that max depth is calculated separately for each type of tree.
64 */
65 uint
66 xfs_allocfree_block_count(
67 struct xfs_mount *mp,
68 uint num_ops)
69 {
70 uint blocks;
71
72 blocks = num_ops * 2 * (2 * mp->m_alloc_maxlevels - 1);
73 if (xfs_has_rmapbt(mp))
74 blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
75
76 return blocks;
77 }
78
79 /*
80 * Per-extent log reservation for refcount btree changes. These are never done
81 * in the same transaction as an allocation or a free, so we compute them
82 * separately.
83 */
84 static unsigned int
85 xfs_refcountbt_block_count(
86 struct xfs_mount *mp,
87 unsigned int num_ops)
88 {
89 return num_ops * (2 * mp->m_refc_maxlevels - 1);
90 }
91
92 /*
93 * Logging inodes is really tricksy. They are logged in memory format,
94 * which means that what we write into the log doesn't directly translate into
95 * the amount of space they use on disk.
96 *
97 * Case in point - btree format forks in memory format use more space than the
98 * on-disk format. In memory, the buffer contains a normal btree block header so
99 * the btree code can treat it as though it is just another generic buffer.
100 * However, when we write it to the inode fork, we don't write all of this
101 * header as it isn't needed. e.g. the root is only ever in the inode, so
102 * there's no need for sibling pointers which would waste 16 bytes of space.
103 *
104 * Hence when we have an inode with a maximally sized btree format fork, then
105 * amount of information we actually log is greater than the size of the inode
106 * on disk. Hence we need an inode reservation function that calculates all this
107 * correctly. So, we log:
108 *
109 * - 4 log op headers for object
110 * - for the ilf, the inode core and 2 forks
111 * - inode log format object
112 * - the inode core
113 * - two inode forks containing bmap btree root blocks.
114 * - the btree data contained by both forks will fit into the inode size,
115 * hence when combined with the inode core above, we have a total of the
116 * actual inode size.
117 * - the BMBT headers need to be accounted separately, as they are
118 * additional to the records and pointers that fit inside the inode
119 * forks.
120 */
121 STATIC uint
122 xfs_calc_inode_res(
123 struct xfs_mount *mp,
124 uint ninodes)
125 {
126 return ninodes *
127 (4 * sizeof(struct xlog_op_header) +
128 sizeof(struct xfs_inode_log_format) +
129 mp->m_sb.sb_inodesize +
130 2 * XFS_BMBT_BLOCK_LEN(mp));
131 }
132
133 /*
134 * Inode btree record insertion/removal modifies the inode btree and free space
135 * btrees (since the inobt does not use the agfl). This requires the following
136 * reservation:
137 *
138 * the inode btree: max depth * blocksize
139 * the allocation btrees: 2 trees * (max depth - 1) * block size
140 *
141 * The caller must account for SB and AG header modifications, etc.
142 */
143 STATIC uint
144 xfs_calc_inobt_res(
145 struct xfs_mount *mp)
146 {
147 return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
148 XFS_FSB_TO_B(mp, 1)) +
149 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
150 XFS_FSB_TO_B(mp, 1));
151 }
152
153 /*
154 * The free inode btree is a conditional feature. The behavior differs slightly
155 * from that of the traditional inode btree in that the finobt tracks records
156 * for inode chunks with at least one free inode. A record can be removed from
157 * the tree during individual inode allocation. Therefore the finobt
158 * reservation is unconditional for both the inode chunk allocation and
159 * individual inode allocation (modify) cases.
160 *
161 * Behavior aside, the reservation for finobt modification is equivalent to the
162 * traditional inobt: cover a full finobt shape change plus block allocation.
163 */
164 STATIC uint
165 xfs_calc_finobt_res(
166 struct xfs_mount *mp)
167 {
168 if (!xfs_has_finobt(mp))
169 return 0;
170
171 return xfs_calc_inobt_res(mp);
172 }
173
174 /*
175 * Calculate the reservation required to allocate or free an inode chunk. This
176 * includes:
177 *
178 * the allocation btrees: 2 trees * (max depth - 1) * block size
179 * the inode chunk: m_ino_geo.ialloc_blks * N
180 *
181 * The size N of the inode chunk reservation depends on whether it is for
182 * allocation or free and which type of create transaction is in use. An inode
183 * chunk free always invalidates the buffers and only requires reservation for
184 * headers (N == 0). An inode chunk allocation requires a chunk sized
185 * reservation on v4 and older superblocks to initialize the chunk. No chunk
186 * reservation is required for allocation on v5 supers, which use ordered
187 * buffers to initialize.
188 */
189 STATIC uint
190 xfs_calc_inode_chunk_res(
191 struct xfs_mount *mp,
192 bool alloc)
193 {
194 uint res, size = 0;
195
196 res = xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
197 XFS_FSB_TO_B(mp, 1));
198 if (alloc) {
199 /* icreate tx uses ordered buffers */
200 if (xfs_has_v3inodes(mp))
201 return res;
202 size = XFS_FSB_TO_B(mp, 1);
203 }
204
205 res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
206 return res;
207 }
208
209 /*
210 * Per-extent log reservation for the btree changes involved in freeing or
211 * allocating a realtime extent. We have to be able to log as many rtbitmap
212 * blocks as needed to mark inuse XFS_BMBT_MAX_EXTLEN blocks' worth of realtime
213 * extents, as well as the realtime summary block.
214 */
215 static unsigned int
216 xfs_rtalloc_block_count(
217 struct xfs_mount *mp,
218 unsigned int num_ops)
219 {
220 unsigned int rtbmp_blocks;
221 xfs_rtxlen_t rtxlen;
222
223 rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
224 rtbmp_blocks = xfs_rtbitmap_blockcount(mp, rtxlen);
225 return (rtbmp_blocks + 1) * num_ops;
226 }
227
228 /*
229 * Various log reservation values.
230 *
231 * These are based on the size of the file system block because that is what
232 * most transactions manipulate. Each adds in an additional 128 bytes per
233 * item logged to try to account for the overhead of the transaction mechanism.
234 *
235 * Note: Most of the reservations underestimate the number of allocation
236 * groups into which they could free extents in the xfs_defer_finish() call.
237 * This is because the number in the worst case is quite high and quite
238 * unusual. In order to fix this we need to change xfs_defer_finish() to free
239 * extents in only a single AG at a time. This will require changes to the
240 * EFI code as well, however, so that the EFI for the extents not freed is
241 * logged again in each transaction. See SGI PV #261917.
242 *
243 * Reservation functions here avoid a huge stack in xfs_trans_init due to
244 * register overflow from temporaries in the calculations.
245 */
246
247 /*
248 * Compute the log reservation required to handle the refcount update
249 * transaction. Refcount updates are always done via deferred log items.
250 *
251 * This is calculated as:
252 * Data device refcount updates (t1):
253 * the agfs of the ags containing the blocks: nr_ops * sector size
254 * the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
255 */
256 static unsigned int
257 xfs_calc_refcountbt_reservation(
258 struct xfs_mount *mp,
259 unsigned int nr_ops)
260 {
261 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
262
263 if (!xfs_has_reflink(mp))
264 return 0;
265
266 return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
267 xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz);
268 }
269
270 /*
271 * In a write transaction we can allocate a maximum of 2
272 * extents. This gives (t1):
273 * the inode getting the new extents: inode size
274 * the inode's bmap btree: max depth * block size
275 * the agfs of the ags from which the extents are allocated: 2 * sector
276 * the superblock free block counter: sector size
277 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
278 * Or, if we're writing to a realtime file (t2):
279 * the inode getting the new extents: inode size
280 * the inode's bmap btree: max depth * block size
281 * the agfs of the ags from which the extents are allocated: 2 * sector
282 * the superblock free block counter: sector size
283 * the realtime bitmap: ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
284 * the realtime summary: 1 block
285 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
286 * And the bmap_finish transaction can free bmap blocks in a join (t3):
287 * the agfs of the ags containing the blocks: 2 * sector size
288 * the agfls of the ags containing the blocks: 2 * sector size
289 * the super block free block counter: sector size
290 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
291 * And any refcount updates that happen in a separate transaction (t4).
292 */
293 STATIC uint
294 xfs_calc_write_reservation(
295 struct xfs_mount *mp,
296 bool for_minlogsize)
297 {
298 unsigned int t1, t2, t3, t4;
299 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
300
301 t1 = xfs_calc_inode_res(mp, 1) +
302 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
303 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
304 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
305
306 if (xfs_has_realtime(mp)) {
307 t2 = xfs_calc_inode_res(mp, 1) +
308 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
309 blksz) +
310 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
311 xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 1), blksz) +
312 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1), blksz);
313 } else {
314 t2 = 0;
315 }
316
317 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
318 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
319
320 /*
321 * In the early days of reflink, we included enough reservation to log
322 * two refcountbt splits for each transaction. The codebase runs
323 * refcountbt updates in separate transactions now, so to compute the
324 * minimum log size, add the refcountbtree splits back to t1 and t3 and
325 * do not account them separately as t4. Reflink did not support
326 * realtime when the reservations were established, so no adjustment to
327 * t2 is needed.
328 */
329 if (for_minlogsize) {
330 unsigned int adj = 0;
331
332 if (xfs_has_reflink(mp))
333 adj = xfs_calc_buf_res(
334 xfs_refcountbt_block_count(mp, 2),
335 blksz);
336 t1 += adj;
337 t3 += adj;
338 return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
339 }
340
341 t4 = xfs_calc_refcountbt_reservation(mp, 1);
342 return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
343 }
344
345 unsigned int
346 xfs_calc_write_reservation_minlogsize(
347 struct xfs_mount *mp)
348 {
349 return xfs_calc_write_reservation(mp, true);
350 }
351
352 /*
353 * In truncating a file we free up to two extents at once. We can modify (t1):
354 * the inode being truncated: inode size
355 * the inode's bmap btree: (max depth + 1) * block size
356 * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
357 * the agf for each of the ags: 4 * sector size
358 * the agfl for each of the ags: 4 * sector size
359 * the super block to reflect the freed blocks: sector size
360 * worst case split in allocation btrees per extent assuming 4 extents:
361 * 4 exts * 2 trees * (2 * max depth - 1) * block size
362 * Or, if it's a realtime file (t3):
363 * the agf for each of the ags: 2 * sector size
364 * the agfl for each of the ags: 2 * sector size
365 * the super block to reflect the freed blocks: sector size
366 * the realtime bitmap:
367 * 2 exts * ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
368 * the realtime summary: 2 exts * 1 block
369 * worst case split in allocation btrees per extent assuming 2 extents:
370 * 2 exts * 2 trees * (2 * max depth - 1) * block size
371 * And any refcount updates that happen in a separate transaction (t4).
372 */
373 STATIC uint
374 xfs_calc_itruncate_reservation(
375 struct xfs_mount *mp,
376 bool for_minlogsize)
377 {
378 unsigned int t1, t2, t3, t4;
379 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
380
381 t1 = xfs_calc_inode_res(mp, 1) +
382 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
383
384 t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
385 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4), blksz);
386
387 if (xfs_has_realtime(mp)) {
388 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
389 xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 2), blksz) +
390 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
391 } else {
392 t3 = 0;
393 }
394
395 /*
396 * In the early days of reflink, we included enough reservation to log
397 * four refcountbt splits in the same transaction as bnobt/cntbt
398 * updates. The codebase runs refcountbt updates in separate
399 * transactions now, so to compute the minimum log size, add the
400 * refcount btree splits back here and do not compute them separately
401 * as t4. Reflink did not support realtime when the reservations were
402 * established, so do not adjust t3.
403 */
404 if (for_minlogsize) {
405 if (xfs_has_reflink(mp))
406 t2 += xfs_calc_buf_res(
407 xfs_refcountbt_block_count(mp, 4),
408 blksz);
409
410 return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
411 }
412
413 t4 = xfs_calc_refcountbt_reservation(mp, 2);
414 return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
415 }
416
417 unsigned int
418 xfs_calc_itruncate_reservation_minlogsize(
419 struct xfs_mount *mp)
420 {
421 return xfs_calc_itruncate_reservation(mp, true);
422 }
423
424 /*
425 * In renaming a files we can modify:
426 * the five inodes involved: 5 * inode size
427 * the two directory btrees: 2 * (max depth + v2) * dir block size
428 * the two directory bmap btrees: 2 * max depth * block size
429 * And the bmap_finish transaction can free dir and bmap blocks (two sets
430 * of bmap blocks) giving:
431 * the agf for the ags in which the blocks live: 3 * sector size
432 * the agfl for the ags in which the blocks live: 3 * sector size
433 * the superblock for the free block count: sector size
434 * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
435 */
436 STATIC uint
437 xfs_calc_rename_reservation(
438 struct xfs_mount *mp)
439 {
440 return XFS_DQUOT_LOGRES(mp) +
441 max((xfs_calc_inode_res(mp, 5) +
442 xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
443 XFS_FSB_TO_B(mp, 1))),
444 (xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
445 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3),
446 XFS_FSB_TO_B(mp, 1))));
447 }
448
449 /*
450 * For removing an inode from unlinked list at first, we can modify:
451 * the agi hash list and counters: sector size
452 * the on disk inode before ours in the agi hash list: inode cluster size
453 * the on disk inode in the agi hash list: inode cluster size
454 */
455 STATIC uint
456 xfs_calc_iunlink_remove_reservation(
457 struct xfs_mount *mp)
458 {
459 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
460 2 * M_IGEO(mp)->inode_cluster_size;
461 }
462
463 /*
464 * For creating a link to an inode:
465 * the parent directory inode: inode size
466 * the linked inode: inode size
467 * the directory btree could split: (max depth + v2) * dir block size
468 * the directory bmap btree could join or split: (max depth + v2) * blocksize
469 * And the bmap_finish transaction can free some bmap blocks giving:
470 * the agf for the ag in which the blocks live: sector size
471 * the agfl for the ag in which the blocks live: sector size
472 * the superblock for the free block count: sector size
473 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
474 */
475 STATIC uint
476 xfs_calc_link_reservation(
477 struct xfs_mount *mp)
478 {
479 return XFS_DQUOT_LOGRES(mp) +
480 xfs_calc_iunlink_remove_reservation(mp) +
481 max((xfs_calc_inode_res(mp, 2) +
482 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
483 XFS_FSB_TO_B(mp, 1))),
484 (xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
485 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
486 XFS_FSB_TO_B(mp, 1))));
487 }
488
489 /*
490 * For adding an inode to unlinked list we can modify:
491 * the agi hash list: sector size
492 * the on disk inode: inode cluster size
493 */
494 STATIC uint
495 xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
496 {
497 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
498 M_IGEO(mp)->inode_cluster_size;
499 }
500
501 /*
502 * For removing a directory entry we can modify:
503 * the parent directory inode: inode size
504 * the removed inode: inode size
505 * the directory btree could join: (max depth + v2) * dir block size
506 * the directory bmap btree could join or split: (max depth + v2) * blocksize
507 * And the bmap_finish transaction can free the dir and bmap blocks giving:
508 * the agf for the ag in which the blocks live: 2 * sector size
509 * the agfl for the ag in which the blocks live: 2 * sector size
510 * the superblock for the free block count: sector size
511 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
512 */
513 STATIC uint
514 xfs_calc_remove_reservation(
515 struct xfs_mount *mp)
516 {
517 return XFS_DQUOT_LOGRES(mp) +
518 xfs_calc_iunlink_add_reservation(mp) +
519 max((xfs_calc_inode_res(mp, 2) +
520 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
521 XFS_FSB_TO_B(mp, 1))),
522 (xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
523 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
524 XFS_FSB_TO_B(mp, 1))));
525 }
526
527 /*
528 * For create, break it in to the two cases that the transaction
529 * covers. We start with the modify case - allocation done by modification
530 * of the state of existing inodes - and the allocation case.
531 */
532
533 /*
534 * For create we can modify:
535 * the parent directory inode: inode size
536 * the new inode: inode size
537 * the inode btree entry: block size
538 * the superblock for the nlink flag: sector size
539 * the directory btree: (max depth + v2) * dir block size
540 * the directory inode's bmap btree: (max depth + v2) * block size
541 * the finobt (record modification and allocation btrees)
542 */
543 STATIC uint
544 xfs_calc_create_resv_modify(
545 struct xfs_mount *mp)
546 {
547 return xfs_calc_inode_res(mp, 2) +
548 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
549 (uint)XFS_FSB_TO_B(mp, 1) +
550 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
551 xfs_calc_finobt_res(mp);
552 }
553
554 /*
555 * For icreate we can allocate some inodes giving:
556 * the agi and agf of the ag getting the new inodes: 2 * sectorsize
557 * the superblock for the nlink flag: sector size
558 * the inode chunk (allocation, optional init)
559 * the inobt (record insertion)
560 * the finobt (optional, record insertion)
561 */
562 STATIC uint
563 xfs_calc_icreate_resv_alloc(
564 struct xfs_mount *mp)
565 {
566 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
567 mp->m_sb.sb_sectsize +
568 xfs_calc_inode_chunk_res(mp, _ALLOC) +
569 xfs_calc_inobt_res(mp) +
570 xfs_calc_finobt_res(mp);
571 }
572
573 STATIC uint
574 xfs_calc_icreate_reservation(xfs_mount_t *mp)
575 {
576 return XFS_DQUOT_LOGRES(mp) +
577 max(xfs_calc_icreate_resv_alloc(mp),
578 xfs_calc_create_resv_modify(mp));
579 }
580
581 STATIC uint
582 xfs_calc_create_tmpfile_reservation(
583 struct xfs_mount *mp)
584 {
585 uint res = XFS_DQUOT_LOGRES(mp);
586
587 res += xfs_calc_icreate_resv_alloc(mp);
588 return res + xfs_calc_iunlink_add_reservation(mp);
589 }
590
591 /*
592 * Making a new directory is the same as creating a new file.
593 */
594 STATIC uint
595 xfs_calc_mkdir_reservation(
596 struct xfs_mount *mp)
597 {
598 return xfs_calc_icreate_reservation(mp);
599 }
600
601
602 /*
603 * Making a new symplink is the same as creating a new file, but
604 * with the added blocks for remote symlink data which can be up to 1kB in
605 * length (XFS_SYMLINK_MAXLEN).
606 */
607 STATIC uint
608 xfs_calc_symlink_reservation(
609 struct xfs_mount *mp)
610 {
611 return xfs_calc_icreate_reservation(mp) +
612 xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
613 }
614
615 /*
616 * In freeing an inode we can modify:
617 * the inode being freed: inode size
618 * the super block free inode counter, AGF and AGFL: sector size
619 * the on disk inode (agi unlinked list removal)
620 * the inode chunk (invalidated, headers only)
621 * the inode btree
622 * the finobt (record insertion, removal or modification)
623 *
624 * Note that the inode chunk res. includes an allocfree res. for freeing of the
625 * inode chunk. This is technically extraneous because the inode chunk free is
626 * deferred (it occurs after a transaction roll). Include the extra reservation
627 * anyways since we've had reports of ifree transaction overruns due to too many
628 * agfl fixups during inode chunk frees.
629 */
630 STATIC uint
631 xfs_calc_ifree_reservation(
632 struct xfs_mount *mp)
633 {
634 return XFS_DQUOT_LOGRES(mp) +
635 xfs_calc_inode_res(mp, 1) +
636 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
637 xfs_calc_iunlink_remove_reservation(mp) +
638 xfs_calc_inode_chunk_res(mp, _FREE) +
639 xfs_calc_inobt_res(mp) +
640 xfs_calc_finobt_res(mp);
641 }
642
643 /*
644 * When only changing the inode we log the inode and possibly the superblock
645 * We also add a bit of slop for the transaction stuff.
646 */
647 STATIC uint
648 xfs_calc_ichange_reservation(
649 struct xfs_mount *mp)
650 {
651 return XFS_DQUOT_LOGRES(mp) +
652 xfs_calc_inode_res(mp, 1) +
653 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
654
655 }
656
657 /*
658 * Growing the data section of the filesystem.
659 * superblock
660 * agi and agf
661 * allocation btrees
662 */
663 STATIC uint
664 xfs_calc_growdata_reservation(
665 struct xfs_mount *mp)
666 {
667 return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
668 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
669 XFS_FSB_TO_B(mp, 1));
670 }
671
672 /*
673 * Growing the rt section of the filesystem.
674 * In the first set of transactions (ALLOC) we allocate space to the
675 * bitmap or summary files.
676 * superblock: sector size
677 * agf of the ag from which the extent is allocated: sector size
678 * bmap btree for bitmap/summary inode: max depth * blocksize
679 * bitmap/summary inode: inode size
680 * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
681 */
682 STATIC uint
683 xfs_calc_growrtalloc_reservation(
684 struct xfs_mount *mp)
685 {
686 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
687 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
688 XFS_FSB_TO_B(mp, 1)) +
689 xfs_calc_inode_res(mp, 1) +
690 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
691 XFS_FSB_TO_B(mp, 1));
692 }
693
694 /*
695 * Growing the rt section of the filesystem.
696 * In the second set of transactions (ZERO) we zero the new metadata blocks.
697 * one bitmap/summary block: blocksize
698 */
699 STATIC uint
700 xfs_calc_growrtzero_reservation(
701 struct xfs_mount *mp)
702 {
703 return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
704 }
705
706 /*
707 * Growing the rt section of the filesystem.
708 * In the third set of transactions (FREE) we update metadata without
709 * allocating any new blocks.
710 * superblock: sector size
711 * bitmap inode: inode size
712 * summary inode: inode size
713 * one bitmap block: blocksize
714 * summary blocks: new summary size
715 */
716 STATIC uint
717 xfs_calc_growrtfree_reservation(
718 struct xfs_mount *mp)
719 {
720 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
721 xfs_calc_inode_res(mp, 2) +
722 xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
723 xfs_calc_buf_res(1, mp->m_rsumsize);
724 }
725
726 /*
727 * Logging the inode modification timestamp on a synchronous write.
728 * inode
729 */
730 STATIC uint
731 xfs_calc_swrite_reservation(
732 struct xfs_mount *mp)
733 {
734 return xfs_calc_inode_res(mp, 1);
735 }
736
737 /*
738 * Logging the inode mode bits when writing a setuid/setgid file
739 * inode
740 */
741 STATIC uint
742 xfs_calc_writeid_reservation(
743 struct xfs_mount *mp)
744 {
745 return xfs_calc_inode_res(mp, 1);
746 }
747
748 /*
749 * Converting the inode from non-attributed to attributed.
750 * the inode being converted: inode size
751 * agf block and superblock (for block allocation)
752 * the new block (directory sized)
753 * bmap blocks for the new directory block
754 * allocation btrees
755 */
756 STATIC uint
757 xfs_calc_addafork_reservation(
758 struct xfs_mount *mp)
759 {
760 return XFS_DQUOT_LOGRES(mp) +
761 xfs_calc_inode_res(mp, 1) +
762 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
763 xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
764 xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
765 XFS_FSB_TO_B(mp, 1)) +
766 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
767 XFS_FSB_TO_B(mp, 1));
768 }
769
770 /*
771 * Removing the attribute fork of a file
772 * the inode being truncated: inode size
773 * the inode's bmap btree: max depth * block size
774 * And the bmap_finish transaction can free the blocks and bmap blocks:
775 * the agf for each of the ags: 4 * sector size
776 * the agfl for each of the ags: 4 * sector size
777 * the super block to reflect the freed blocks: sector size
778 * worst case split in allocation btrees per extent assuming 4 extents:
779 * 4 exts * 2 trees * (2 * max depth - 1) * block size
780 */
781 STATIC uint
782 xfs_calc_attrinval_reservation(
783 struct xfs_mount *mp)
784 {
785 return max((xfs_calc_inode_res(mp, 1) +
786 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
787 XFS_FSB_TO_B(mp, 1))),
788 (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
789 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4),
790 XFS_FSB_TO_B(mp, 1))));
791 }
792
793 /*
794 * Setting an attribute at mount time.
795 * the inode getting the attribute
796 * the superblock for allocations
797 * the agfs extents are allocated from
798 * the attribute btree * max depth
799 * the inode allocation btree
800 * Since attribute transaction space is dependent on the size of the attribute,
801 * the calculation is done partially at mount time and partially at runtime(see
802 * below).
803 */
804 STATIC uint
805 xfs_calc_attrsetm_reservation(
806 struct xfs_mount *mp)
807 {
808 return XFS_DQUOT_LOGRES(mp) +
809 xfs_calc_inode_res(mp, 1) +
810 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
811 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
812 }
813
814 /*
815 * Setting an attribute at runtime, transaction space unit per block.
816 * the superblock for allocations: sector size
817 * the inode bmap btree could join or split: max depth * block size
818 * Since the runtime attribute transaction space is dependent on the total
819 * blocks needed for the 1st bmap, here we calculate out the space unit for
820 * one block so that the caller could figure out the total space according
821 * to the attibute extent length in blocks by:
822 * ext * M_RES(mp)->tr_attrsetrt.tr_logres
823 */
824 STATIC uint
825 xfs_calc_attrsetrt_reservation(
826 struct xfs_mount *mp)
827 {
828 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
829 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
830 XFS_FSB_TO_B(mp, 1));
831 }
832
833 /*
834 * Removing an attribute.
835 * the inode: inode size
836 * the attribute btree could join: max depth * block size
837 * the inode bmap btree could join or split: max depth * block size
838 * And the bmap_finish transaction can free the attr blocks freed giving:
839 * the agf for the ag in which the blocks live: 2 * sector size
840 * the agfl for the ag in which the blocks live: 2 * sector size
841 * the superblock for the free block count: sector size
842 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
843 */
844 STATIC uint
845 xfs_calc_attrrm_reservation(
846 struct xfs_mount *mp)
847 {
848 return XFS_DQUOT_LOGRES(mp) +
849 max((xfs_calc_inode_res(mp, 1) +
850 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
851 XFS_FSB_TO_B(mp, 1)) +
852 (uint)XFS_FSB_TO_B(mp,
853 XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
854 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
855 (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
856 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
857 XFS_FSB_TO_B(mp, 1))));
858 }
859
860 /*
861 * Clearing a bad agino number in an agi hash bucket.
862 */
863 STATIC uint
864 xfs_calc_clear_agi_bucket_reservation(
865 struct xfs_mount *mp)
866 {
867 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
868 }
869
870 /*
871 * Adjusting quota limits.
872 * the disk quota buffer: sizeof(struct xfs_disk_dquot)
873 */
874 STATIC uint
875 xfs_calc_qm_setqlim_reservation(void)
876 {
877 return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
878 }
879
880 /*
881 * Allocating quota on disk if needed.
882 * the write transaction log space for quota file extent allocation
883 * the unit of quota allocation: one system block size
884 */
885 STATIC uint
886 xfs_calc_qm_dqalloc_reservation(
887 struct xfs_mount *mp,
888 bool for_minlogsize)
889 {
890 return xfs_calc_write_reservation(mp, for_minlogsize) +
891 xfs_calc_buf_res(1,
892 XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
893 }
894
895 unsigned int
896 xfs_calc_qm_dqalloc_reservation_minlogsize(
897 struct xfs_mount *mp)
898 {
899 return xfs_calc_qm_dqalloc_reservation(mp, true);
900 }
901
902 /*
903 * Syncing the incore super block changes to disk.
904 * the super block to reflect the changes: sector size
905 */
906 STATIC uint
907 xfs_calc_sb_reservation(
908 struct xfs_mount *mp)
909 {
910 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
911 }
912
913 void
914 xfs_trans_resv_calc(
915 struct xfs_mount *mp,
916 struct xfs_trans_resv *resp)
917 {
918 int logcount_adj = 0;
919
920 /*
921 * The following transactions are logged in physical format and
922 * require a permanent reservation on space.
923 */
924 resp->tr_write.tr_logres = xfs_calc_write_reservation(mp, false);
925 resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
926 resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
927
928 resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp, false);
929 resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
930 resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
931
932 resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
933 resp->tr_rename.tr_logcount = XFS_RENAME_LOG_COUNT;
934 resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
935
936 resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
937 resp->tr_link.tr_logcount = XFS_LINK_LOG_COUNT;
938 resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
939
940 resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
941 resp->tr_remove.tr_logcount = XFS_REMOVE_LOG_COUNT;
942 resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
943
944 resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
945 resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
946 resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
947
948 resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
949 resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
950 resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
951
952 resp->tr_create_tmpfile.tr_logres =
953 xfs_calc_create_tmpfile_reservation(mp);
954 resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT;
955 resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
956
957 resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
958 resp->tr_mkdir.tr_logcount = XFS_MKDIR_LOG_COUNT;
959 resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
960
961 resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
962 resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
963 resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
964
965 resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
966 resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
967 resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
968
969 resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
970 resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
971 resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
972
973 resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
974 resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
975 resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
976
977 resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
978 resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
979 resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
980
981 resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
982 resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
983 resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
984
985 resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp,
986 false);
987 resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
988 resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
989
990 /*
991 * The following transactions are logged in logical format with
992 * a default log count.
993 */
994 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
995 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
996
997 resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
998 resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
999
1000 /* growdata requires permanent res; it can free space to the last AG */
1001 resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
1002 resp->tr_growdata.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
1003 resp->tr_growdata.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1004
1005 /* The following transaction are logged in logical format */
1006 resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
1007 resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
1008 resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
1009 resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
1010 resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
1011 resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
1012 resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
1013
1014 /*
1015 * Add one logcount for BUI items that appear with rmap or reflink,
1016 * one logcount for refcount intent items, and one logcount for rmap
1017 * intent items.
1018 */
1019 if (xfs_has_reflink(mp) || xfs_has_rmapbt(mp))
1020 logcount_adj++;
1021 if (xfs_has_reflink(mp))
1022 logcount_adj++;
1023 if (xfs_has_rmapbt(mp))
1024 logcount_adj++;
1025
1026 resp->tr_itruncate.tr_logcount += logcount_adj;
1027 resp->tr_write.tr_logcount += logcount_adj;
1028 resp->tr_qm_dqalloc.tr_logcount += logcount_adj;
1029 }