From: Christoph Hellwig Date: Mon, 29 Jul 2024 23:22:42 +0000 (-0700) Subject: xfs: rework splitting of indirect block reservations X-Git-Tag: v6.10.0~24^2~76 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0eec275070f9c73e6cd933b92e282b296c03d7c7;p=thirdparty%2Fxfsprogs-dev.git xfs: rework splitting of indirect block reservations Source kernel commit: da2b9c3a8d2cbdeec3f13cebf4c6c86c13e1077e Move the check if we have enough indirect blocks and the stealing of the deleted extent blocks out of xfs_bmap_split_indlen and into the caller to prepare for handling delayed allocation of RT extents that can't easily be stolen. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- diff --git a/libxfs/xfs_bmap.c b/libxfs/xfs_bmap.c index 79cde87d..7b18477e 100644 --- a/libxfs/xfs_bmap.c +++ b/libxfs/xfs_bmap.c @@ -4825,31 +4825,17 @@ error0: * ores == 1). The number of stolen blocks is returned. The availability and * subsequent accounting of stolen blocks is the responsibility of the caller. */ -static xfs_filblks_t +static void xfs_bmap_split_indlen( xfs_filblks_t ores, /* original res. */ xfs_filblks_t *indlen1, /* ext1 worst indlen */ - xfs_filblks_t *indlen2, /* ext2 worst indlen */ - xfs_filblks_t avail) /* stealable blocks */ + xfs_filblks_t *indlen2) /* ext2 worst indlen */ { xfs_filblks_t len1 = *indlen1; xfs_filblks_t len2 = *indlen2; xfs_filblks_t nres = len1 + len2; /* new total res. */ - xfs_filblks_t stolen = 0; xfs_filblks_t resfactor; - /* - * Steal as many blocks as we can to try and satisfy the worst case - * indlen for both new extents. - */ - if (ores < nres && avail) - stolen = XFS_FILBLKS_MIN(nres - ores, avail); - ores += stolen; - - /* nothing else to do if we've satisfied the new reservation */ - if (ores >= nres) - return stolen; - /* * We can't meet the total required reservation for the two extents. * Calculate the percent of the overall shortage between both extents @@ -4894,8 +4880,6 @@ xfs_bmap_split_indlen( *indlen1 = len1; *indlen2 = len2; - - return stolen; } int @@ -4911,7 +4895,7 @@ xfs_bmap_del_extent_delay( struct xfs_bmbt_irec new; int64_t da_old, da_new, da_diff = 0; xfs_fileoff_t del_endoff, got_endoff; - xfs_filblks_t got_indlen, new_indlen, stolen; + xfs_filblks_t got_indlen, new_indlen, stolen = 0; uint32_t state = xfs_bmap_fork_to_state(whichfork); uint64_t fdblocks; int error = 0; @@ -4990,8 +4974,19 @@ xfs_bmap_del_extent_delay( new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); WARN_ON_ONCE(!got_indlen || !new_indlen); - stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, - del->br_blockcount); + /* + * Steal as many blocks as we can to try and satisfy the worst + * case indlen for both new extents. + */ + da_new = got_indlen + new_indlen; + if (da_new > da_old) { + stolen = XFS_FILBLKS_MIN(da_new - da_old, + del->br_blockcount); + da_old += stolen; + } + if (da_new > da_old) + xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen); + da_new = got_indlen + new_indlen; got->br_startblock = nullstartblock((int)got_indlen); @@ -5003,7 +4998,6 @@ xfs_bmap_del_extent_delay( xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); - da_new = got_indlen + new_indlen - stolen; del->br_blockcount -= stolen; break; }