struct xfs_buftarg *btp,
struct xfs_buf_map *map)
{
- xfs_daddr_t eofs;
-
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
* Corrupted block numbers can get through to here, unfortunately, so we
* have to check that the buffer falls within the filesystem bounds.
*/
- eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
- if (map->bm_bn < 0 || map->bm_bn >= eofs) {
+ if (map->bm_bn < 0 || map->bm_bn >= btp->bt_nr_sectors) {
xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx",
- __func__, map->bm_bn, eofs);
+ __func__, map->bm_bn, btp->bt_nr_sectors);
WARN_ON(1);
return -EFSCORRUPTED;
}
int
xfs_configure_buftarg(
struct xfs_buftarg *btp,
- unsigned int sectorsize)
+ unsigned int sectorsize,
+ xfs_rfsblock_t nr_blocks)
{
- int error;
+ struct xfs_mount *mp = btp->bt_mount;
- ASSERT(btp->bt_bdev != NULL);
+ if (btp->bt_bdev) {
+ int error;
- /* Set up metadata sector size info */
- btp->bt_meta_sectorsize = sectorsize;
- btp->bt_meta_sectormask = sectorsize - 1;
+ error = bdev_validate_blocksize(btp->bt_bdev, sectorsize);
+ if (error) {
+ xfs_warn(mp,
+ "Cannot use blocksize %u on device %pg, err %d",
+ sectorsize, btp->bt_bdev, error);
+ return -EINVAL;
+ }
- error = bdev_validate_blocksize(btp->bt_bdev, sectorsize);
- if (error) {
- xfs_warn(btp->bt_mount,
- "Cannot use blocksize %u on device %pg, err %d",
- sectorsize, btp->bt_bdev, error);
- return -EINVAL;
+ if (bdev_can_atomic_write(btp->bt_bdev))
+ xfs_configure_buftarg_atomic_writes(btp);
}
- if (bdev_can_atomic_write(btp->bt_bdev))
- xfs_configure_buftarg_atomic_writes(btp);
+ btp->bt_meta_sectorsize = sectorsize;
+ btp->bt_meta_sectormask = sectorsize - 1;
+ /* m_blkbb_log is not set up yet */
+ btp->bt_nr_sectors = nr_blocks << (mp->m_sb.sb_blocklog - BBSHIFT);
return 0;
}
size_t logical_sectorsize,
const char *descr)
{
+ /* The maximum size of the buftarg is only known once the sb is read. */
+ btp->bt_nr_sectors = (xfs_daddr_t)-1;
+
/* Set up device logical sector size mask */
btp->bt_logical_sectorsize = logical_sectorsize;
btp->bt_logical_sectormask = logical_sectorsize - 1;
size_t bt_meta_sectormask;
size_t bt_logical_sectorsize;
size_t bt_logical_sectormask;
+ xfs_daddr_t bt_nr_sectors;
/* LRU control structures */
struct shrinker *bt_shrinker;
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);
-int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize);
+int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize,
+ xfs_fsblock_t nr_blocks);
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
*/
xfs_sb_from_disk(&mp->m_sb, dsb);
+ /*
+ * Grow can change the device size. Mirror that into the buftarg.
+ */
+ mp->m_ddev_targp->bt_nr_sectors =
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
+ if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp) {
+ mp->m_rtdev_targp->bt_nr_sectors =
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
+ }
+
if (mp->m_sb.sb_agcount < orig_agcount) {
xfs_alert(mp, "Shrinking AG count in log recovery not supported");
return -EFSCORRUPTED;
{
int error;
- error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
+ error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize,
+ mp->m_sb.sb_dblocks);
if (error)
return error;
if (xfs_has_sector(mp))
log_sector_size = mp->m_sb.sb_logsectsize;
error = xfs_configure_buftarg(mp->m_logdev_targp,
- log_sector_size);
+ log_sector_size, mp->m_sb.sb_logblocks);
if (error)
return error;
}
mp->m_rtdev_targp = mp->m_ddev_targp;
} else if (mp->m_rtname) {
error = xfs_configure_buftarg(mp->m_rtdev_targp,
- mp->m_sb.sb_sectsize);
+ mp->m_sb.sb_sectsize, mp->m_sb.sb_rblocks);
if (error)
return error;
}
*/
STATIC void
xfs_trans_apply_sb_deltas(
- xfs_trans_t *tp)
+ struct xfs_trans *tp)
{
- struct xfs_dsb *sbp;
- struct xfs_buf *bp;
- int whole = 0;
-
- bp = xfs_trans_getsb(tp);
- sbp = bp->b_addr;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *bp = xfs_trans_getsb(tp);
+ struct xfs_dsb *sbp = bp->b_addr;
+ int whole = 0;
/*
* Only update the superblock counters if we are logging them
*/
- if (!xfs_has_lazysbcount((tp->t_mountp))) {
+ if (!xfs_has_lazysbcount(mp)) {
if (tp->t_icount_delta)
be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
if (tp->t_ifree_delta)
* write the correct value ondisk.
*/
if ((tp->t_frextents_delta || tp->t_res_frextents_delta) &&
- !xfs_has_rtgroups(tp->t_mountp)) {
- struct xfs_mount *mp = tp->t_mountp;
+ !xfs_has_rtgroups(mp)) {
int64_t rtxdelta;
rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta;
if (tp->t_dblocks_delta) {
be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
+ mp->m_ddev_targp->bt_nr_sectors +=
+ XFS_FSB_TO_BB(mp, tp->t_dblocks_delta);
whole = 1;
}
if (tp->t_agcount_delta) {
* recompute the ondisk rtgroup block log. The incore values
* will be recomputed in xfs_trans_unreserve_and_mod_sb.
*/
- if (xfs_has_rtgroups(tp->t_mountp)) {
+ if (xfs_has_rtgroups(mp)) {
sbp->sb_rgblklog = xfs_compute_rgblklog(
be32_to_cpu(sbp->sb_rgextents),
be32_to_cpu(sbp->sb_rextsize));
}
if (tp->t_rblocks_delta) {
be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
+ mp->m_rtdev_targp->bt_nr_sectors +=
+ XFS_FSB_TO_BB(mp, tp->t_rblocks_delta);
whole = 1;
}
if (tp->t_rextents_delta) {