Sync user and kernel code.
int m_bsize; /* fs logical block size */
xfs_agnumber_t m_agfrotor; /* last ag where space found */
xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
+ lock_t m_agirotor_lock;/* .. and lock protecting it */
xfs_agnumber_t m_maxagi; /* highest inode alloc group */
int m_ihsize; /* size of next field */
struct xfs_ihash *m_ihash; /* fs private inode hash table*/
int argc,
char **argv)
{
+ int fflag = 0;
int c;
progname = basename(argv[0]);
case 'd': /* directIO */
directio = 1;
break;
- case 'f': /* ignore */
+ case 'f': /* create */
+ fflag = 1;
break;
case 'p': /* progname */
progname = optarg;
usage();
fname = strdup(argv[optind]);
- if ((fdesc = openfile(fname, append, 0, directio,
+ if ((fdesc = openfile(fname, append, fflag, directio,
readonly, osync, trunc, realtime)) < 0)
exit(1);
#define mraccess(a) ((void) 0)
#define ismrlocked(a,b) 1
#define spinlock_init(a,b) ((void) 0)
+#define spin_lock(a) ((void) 0)
+#define spin_unlock(a) ((void) 0)
#define __return_address __builtin_return_address(0)
#define xfs_btree_reada_bufl(m,fsb,c) ((void) 0)
#define xfs_btree_reada_bufs(m,fsb,c,x) ((void) 0)
return 0;
}
+STATIC __inline xfs_agnumber_t
+xfs_ialloc_next_ag(
+ xfs_mount_t *mp)
+{
+ xfs_agnumber_t agno;
+
+ spin_lock(&mp->m_agirotor_lock);
+ agno = mp->m_agirotor;
+ if (++mp->m_agirotor == mp->m_maxagi)
+ mp->m_agirotor = 0;
+ spin_unlock(&mp->m_agirotor_lock);
+
+ return agno;
+}
+
/*
* Select an allocation group to look for a free inode in, based on the parent
* inode and then mode. Return the allocation group buffer.
mp = tp->t_mountp;
agcount = mp->m_maxagi;
if (S_ISDIR(mode))
- pagno = atomicIncWithWrap((int *)&mp->m_agirotor, agcount);
+ pagno = xfs_ialloc_next_ag(mp);
else {
pagno = XFS_INO_TO_AGNO(mp, parent);
if (pagno >= agcount)
agbp = NULL;
if (!pag->pagi_inodeok) {
- atomicIncWithWrap((int *)&mp->m_agirotor, agcount);
+ xfs_ialloc_next_ag(mp);
goto unlock_nextag;
}
ifp->if_bytes = size;
ifp->if_real_bytes = real_size;
if (size) {
- xfs_validate_extents(
- (xfs_bmbt_rec_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT),
- nex, 1, XFS_EXTFMT_INODE(ip));
- dp = (xfs_bmbt_rec_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT);
+ dp = (xfs_bmbt_rec_t *)
+ XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT);
+ xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip));
ep = ifp->if_u1.if_extents;
-#if ARCH_CONVERT != ARCH_NOCONVERT
for (i = 0; i < nex; i++, ep++, dp++) {
ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
- ARCH_CONVERT);
+ ARCH_CONVERT);
ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
- ARCH_CONVERT);
+ ARCH_CONVERT);
}
-#else
- memcpy(ep, dp, size);
-#endif
xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
whichfork);
if (whichfork != XFS_DATA_FORK ||
continue;
}
-#if ARCH_CONVERT != ARCH_NOCONVERT
/* Translate to on disk format */
put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
- (__uint64_t*)&dest_ep->l0);
+ (__uint64_t*)&dest_ep->l0);
put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
- (__uint64_t*)&dest_ep->l1);
-#else
- *dest_ep = *ep;
-#endif
+ (__uint64_t*)&dest_ep->l1);
dest_ep++;
ep++;
copied++;
int i;
mp->m_agfrotor = mp->m_agirotor = 0;
+ spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock");
mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;