+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <sys/stat.h>
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode_buf.h"
#include "xfs_inode_fork.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_refcount_btree.h"
#include "libxfs.h" /* for now */
int use_xfs_buf_lock; /* global flag: use xfs_buf_t locks for MT */
-static void manage_zones(int); /* setup global zones */
-
-kmem_zone_t *xfs_inode_zone;
-
/*
* dev_map - map open devices to fd.
*/
#define MAX_DEVS 10 /* arbitary maximum */
-int nextfakedev = -1; /* device number to give to next fake device */
+static int nextfakedev = -1; /* device number to give to next fake device */
static struct dev_to_fd {
dev_t dev;
int fd;
static int
check_isactive(char *name, char *block, int fatal)
{
- struct stat64 st;
+ struct stat st;
- if (stat64(block, &st) < 0)
+ if (stat(block, &st) < 0)
return 0;
if ((st.st_mode & S_IFMT) != S_IFBLK)
return 0;
if (platform_check_ismounted(name, block, &st, 0) == 0)
return 0;
- return platform_check_iswritable(name, block, &st, fatal);
+ if (platform_check_iswritable(name, block, &st))
+ return fatal ? 1 : 0;
+ return 0;
}
/* libxfs_device_to_fd:
dev_t dev;
int fd, d, flags;
int readonly, dio, excl;
- struct stat64 statb;
+ struct stat statb;
readonly = (xflags & LIBXFS_ISREADONLY);
excl = (xflags & LIBXFS_EXCLUSIVELY) && !creat;
exit(1);
}
- if (fstat64(fd, &statb) < 0) {
+ if (fstat(fd, &statb) < 0) {
fprintf(stderr, _("%s: cannot stat %s: %s\n"),
progname, path, strerror(errno));
exit(1);
int readonly = (flags & LIBXFS_ISREADONLY);
int inactive = (flags & LIBXFS_ISINACTIVE);
int dangerously = (flags & LIBXFS_DANGEROUSLY);
- struct stat64 stbuf;
+ struct stat stbuf;
- if (stat64(path, &stbuf) < 0) {
+ if (stat(path, &stbuf) < 0) {
perror(path);
return 0;
}
return 1;
}
+/*
+ * Initialize/destroy all of the zone allocators we use.
+ */
+static void
+init_zones(void)
+{
+ /* initialise zone allocation */
+ xfs_buf_zone = kmem_zone_init(sizeof(struct xfs_buf), "xfs_buffer");
+ xfs_inode_zone = kmem_zone_init(sizeof(struct xfs_inode), "xfs_inode");
+ xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
+ xfs_ili_zone = kmem_zone_init(
+ sizeof(struct xfs_inode_log_item),"xfs_inode_log_item");
+ xfs_buf_item_zone = kmem_zone_init(
+ sizeof(struct xfs_buf_log_item), "xfs_buf_log_item");
+ xfs_da_state_zone = kmem_zone_init(
+ sizeof(struct xfs_da_state), "xfs_da_state");
+ xfs_btree_cur_zone = kmem_zone_init(
+ sizeof(struct xfs_btree_cur), "xfs_btree_cur");
+ xfs_bmap_free_item_zone = kmem_zone_init(
+ sizeof(struct xfs_extent_free_item),
+ "xfs_bmap_free_item");
+ xfs_trans_zone = kmem_zone_init(
+ sizeof(struct xfs_trans), "xfs_trans");
+}
+
+static int
+destroy_zones(void)
+{
+ int leaked = 0;
+
+ leaked += kmem_zone_destroy(xfs_buf_zone);
+ leaked += kmem_zone_destroy(xfs_ili_zone);
+ leaked += kmem_zone_destroy(xfs_inode_zone);
+ leaked += kmem_zone_destroy(xfs_ifork_zone);
+ leaked += kmem_zone_destroy(xfs_buf_item_zone);
+ leaked += kmem_zone_destroy(xfs_da_state_zone);
+ leaked += kmem_zone_destroy(xfs_btree_cur_zone);
+ leaked += kmem_zone_destroy(xfs_bmap_free_item_zone);
+ leaked += kmem_zone_destroy(xfs_trans_zone);
+
+ return leaked;
+}
+
/*
* libxfs initialization.
* Caller gets a 0 on failure (and we print a message), 1 on success.
libxfs_init(libxfs_init_t *a)
{
char *blockfile;
- char curdir[MAXPATHLEN];
char *dname;
char dpath[25];
int fd;
char *logname;
char logpath[25];
- int needcd;
char *rawfile;
char *rtname;
char rtpath[25];
rtname = a->rtname;
a->dfd = a->logfd = a->rtfd = -1;
a->ddev = a->logdev = a->rtdev = 0;
- a->dbsize = a->lbsize = a->rtbsize = 0;
- a->dsize = a->logBBsize = a->logBBstart = a->rtsize = 0;
+ a->dsize = a->lbsize = a->rtbsize = 0;
+ a->dbsize = a->logBBsize = a->logBBstart = a->rtsize = 0;
- (void)getcwd(curdir,MAXPATHLEN);
- needcd = 0;
fd = -1;
flags = (a->isreadonly | a->isdirect);
if (a->volname) {
if(!check_open(a->volname,flags,&rawfile,&blockfile))
goto done;
- needcd = 1;
fd = open(rawfile, O_RDONLY);
dname = a->dname = a->volname;
a->volname = NULL;
}
if (dname) {
- if (dname[0] != '/' && needcd)
- chdir(curdir);
if (a->disfile) {
a->ddev= libxfs_device_open(dname, a->dcreat, flags,
a->setblksize);
a->dfd = libxfs_device_to_fd(a->ddev);
+ platform_findsizes(dname, a->dfd, &a->dsize,
+ &a->dbsize);
} else {
if (!check_open(dname, flags, &rawfile, &blockfile))
goto done;
a->dcreat, flags, a->setblksize);
a->dfd = libxfs_device_to_fd(a->ddev);
platform_findsizes(rawfile, a->dfd,
- &a->dsize, &a->dbsize);
+ &a->dsize, &a->dbsize);
}
- needcd = 1;
} else
a->dsize = 0;
if (logname) {
- if (logname[0] != '/' && needcd)
- chdir(curdir);
if (a->lisfile) {
a->logdev = libxfs_device_open(logname,
a->lcreat, flags, a->setblksize);
a->logfd = libxfs_device_to_fd(a->logdev);
+ platform_findsizes(dname, a->logfd, &a->logBBsize,
+ &a->lbsize);
} else {
if (!check_open(logname, flags, &rawfile, &blockfile))
goto done;
a->lcreat, flags, a->setblksize);
a->logfd = libxfs_device_to_fd(a->logdev);
platform_findsizes(rawfile, a->logfd,
- &a->logBBsize, &a->lbsize);
+ &a->logBBsize, &a->lbsize);
}
- needcd = 1;
} else
a->logBBsize = 0;
if (rtname) {
- if (rtname[0] != '/' && needcd)
- chdir(curdir);
if (a->risfile) {
a->rtdev = libxfs_device_open(rtname,
a->rcreat, flags, a->setblksize);
a->rtfd = libxfs_device_to_fd(a->rtdev);
+ platform_findsizes(dname, a->rtfd, &a->rtsize,
+ &a->rtbsize);
} else {
if (!check_open(rtname, flags, &rawfile, &blockfile))
goto done;
a->rcreat, flags, a->setblksize);
a->rtfd = libxfs_device_to_fd(a->rtdev);
platform_findsizes(rawfile, a->rtfd,
- &a->rtsize, &a->rtbsize);
+ &a->rtsize, &a->rtbsize);
}
- needcd = 1;
} else
a->rtsize = 0;
if (a->dsize < 0) {
progname);
goto done;
}
- if (needcd)
- chdir(curdir);
if (!libxfs_bhash_size)
libxfs_bhash_size = LIBXFS_BHASHSIZE(sbp);
libxfs_bcache = cache_init(a->bcache_flags, libxfs_bhash_size,
&libxfs_bcache_operations);
use_xfs_buf_lock = a->usebuflock;
- manage_zones(0);
+ xfs_dir_startup();
+ init_zones();
rval = 1;
done:
if (dpath[0])
}
-/*
- * Initialize/destroy all of the zone allocators we use.
- */
-static void
-manage_zones(int release)
-{
- extern kmem_zone_t *xfs_buf_zone;
- extern kmem_zone_t *xfs_ili_zone;
- extern kmem_zone_t *xfs_ifork_zone;
- extern kmem_zone_t *xfs_buf_item_zone;
- extern kmem_zone_t *xfs_da_state_zone;
- extern kmem_zone_t *xfs_btree_cur_zone;
- extern kmem_zone_t *xfs_bmap_free_item_zone;
- extern kmem_zone_t *xfs_log_item_desc_zone;
- extern void xfs_dir_startup();
-
- if (release) { /* free zone allocation */
- kmem_free(xfs_buf_zone);
- kmem_free(xfs_inode_zone);
- kmem_free(xfs_ifork_zone);
- kmem_free(xfs_buf_item_zone);
- kmem_free(xfs_da_state_zone);
- kmem_free(xfs_btree_cur_zone);
- kmem_free(xfs_bmap_free_item_zone);
- kmem_free(xfs_log_item_desc_zone);
- return;
- }
- /* otherwise initialise zone allocation */
- xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buffer");
- xfs_inode_zone = kmem_zone_init(sizeof(struct xfs_inode), "xfs_inode");
- xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
- xfs_ili_zone = kmem_zone_init(
- sizeof(xfs_inode_log_item_t), "xfs_inode_log_item");
- xfs_buf_item_zone = kmem_zone_init(
- sizeof(xfs_buf_log_item_t), "xfs_buf_log_item");
- xfs_da_state_zone = kmem_zone_init(
- sizeof(xfs_da_state_t), "xfs_da_state");
- xfs_btree_cur_zone = kmem_zone_init(
- sizeof(xfs_btree_cur_t), "xfs_btree_cur");
- xfs_bmap_free_item_zone = kmem_zone_init(
- sizeof(xfs_bmap_free_item_t), "xfs_bmap_free_item");
- xfs_log_item_desc_zone = kmem_zone_init(
- sizeof(struct xfs_log_item_desc), "xfs_log_item_desc");
- xfs_dir_startup();
-}
-
/*
* Initialize realtime fields in the mount structure.
*/
* If we mount with the inode64 option, or no inode overflows
* the legacy 32-bit address space clear the inode32 option.
*/
- agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
+ agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
* the max inode percentage.
*/
if (mp->m_maxicount) {
- __uint64_t icount;
+ uint64_t icount;
icount = sbp->sb_dblocks * sbp->sb_imax_pct;
do_div(icount, 100);
if (maxagi)
*maxagi = index;
+
+ mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
out_unwind:
xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
xfs_ialloc_compute_maxlevels(mp);
+ xfs_rmapbt_compute_maxlevels(mp);
+ xfs_refcountbt_compute_maxlevels(mp);
if (sbp->sb_imax_pct) {
/* Make sure the maximum inode count is a multiple of the
* units we allocate inodes in.
*/
mp->m_maxicount = (sbp->sb_dblocks * sbp->sb_imax_pct) / 100;
- mp->m_maxicount = ((mp->m_maxicount / mp->m_ialloc_blks) *
- mp->m_ialloc_blks) << sbp->sb_inopblog;
+ mp->m_maxicount = XFS_FSB_TO_INO(mp,
+ (mp->m_maxicount / mp->m_ialloc_blks) *
+ mp->m_ialloc_blks);
} else
mp->m_maxicount = 0;
mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ int new_size = mp->m_inode_cluster_size;
+
+ new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
+ if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
+ mp->m_inode_cluster_size = new_size;
+ }
+ mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp);
+ mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster);
+ mp->m_cluster_align = xfs_ialloc_cluster_alignment(mp);
+ mp->m_cluster_align_inodes = XFS_FSB_TO_INO(mp, mp->m_cluster_align);
/*
* Set whether we're using stripe alignment.
return NULL;
}
+ /*
+ * libxfs_initialize_perag will allocate a perag structure for each ag.
+ * If agcount is corrupted and insanely high, this will OOM the box.
+ * If the agount seems (arbitrarily) high, try to read what would be
+ * the last AG, and if that fails for a relatively high agcount, just
+ * read the first one and let the user know to check the geometry.
+ */
+ if (sbp->sb_agcount > 1000000) {
+ bp = libxfs_readbuf(mp->m_dev,
+ XFS_AG_DADDR(mp, sbp->sb_agcount - 1, 0), 1,
+ !(flags & LIBXFS_MOUNT_DEBUGGER), NULL);
+ if (bp->b_error) {
+ fprintf(stderr, _("%s: read of AG %u failed\n"),
+ progname, sbp->sb_agcount);
+ if (!(flags & LIBXFS_MOUNT_DEBUGGER))
+ return NULL;
+ fprintf(stderr, _("%s: limiting reads to AG 0\n"),
+ progname);
+ sbp->sb_agcount = 1;
+ }
+ libxfs_putbuf(bp);
+ }
+
error = libxfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
if (error) {
fprintf(stderr, _("%s: perag init failed\n"),
libxfs_rtmount_destroy(xfs_mount_t *mp)
{
if (mp->m_rsumip)
- IRELE(mp->m_rsumip);
+ libxfs_irele(mp->m_rsumip);
if (mp->m_rbmip)
- IRELE(mp->m_rbmip);
+ libxfs_irele(mp->m_rbmip);
mp->m_rsumip = mp->m_rbmip = NULL;
}
pag = radix_tree_delete(&mp->m_perag_tree, agno);
kmem_free(pag);
}
+
+ kmem_free(mp->m_attr_geo);
+ kmem_free(mp->m_dir_geo);
+
+ kmem_free(mp->m_rtdev_targp);
+ if (mp->m_logdev_targp != mp->m_ddev_targp)
+ kmem_free(mp->m_logdev_targp);
+ kmem_free(mp->m_ddev_targp);
+
}
/*
void
libxfs_destroy(void)
{
- manage_zones(1);
+ int leaked;
+
+ /* Free everything from the buffer cache before freeing buffer zone */
+ libxfs_bcache_purge();
+ libxfs_bcache_free();
cache_destroy(libxfs_bcache);
+ leaked = destroy_zones();
+ if (getenv("LIBXFS_LEAK_CHECK") && leaked)
+ exit(1);
}
int
int
libxfs_nproc(void)
{
- return platform_nproc();
+ int nr;
+
+ nr = platform_nproc();
+ if (nr < 1)
+ nr = 1;
+ return nr;
}
unsigned long