From: Nathan Scott Date: Fri, 2 Dec 2005 02:51:40 +0000 (+0000) Subject: Sort out cosmetic differences between user and kernel copies of some sources. X-Git-Tag: v2.8.0~52 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6e3140c7c9f03fd0c6ee61e95801a93674de8be4;p=thirdparty%2Fxfsprogs-dev.git Sort out cosmetic differences between user and kernel copies of some sources. Merge of master-melb:xfs-cmds:24660a by kenmcd. --- diff --git a/include/buildmacros b/include/buildmacros index cb2a7448f..928f24f71 100644 --- a/include/buildmacros +++ b/include/buildmacros @@ -1,18 +1,5 @@ # -# Copyright (c) 2002-2005 Silicon Graphics, Inc. -# All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. # BUILDRULES = $(TOPDIR)/include/buildrules diff --git a/include/buildrules b/include/buildrules index 840f417b9..c530bcc06 100644 --- a/include/buildrules +++ b/include/buildrules @@ -1,20 +1,5 @@ # -# Copyright (c) 1999, 2001-2003, 2005 Silicon Graphics, Inc. -# All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# Common build rules for gmake +# Copyright (c) 1999, 2001-2003 Silicon Graphics, Inc. All Rights Reserved. # ifndef _BUILDRULES_INCLUDED_ _BUILDRULES_INCLUDED_ = 1 diff --git a/include/xfs_ag.h b/include/xfs_ag.h index 15bdbb398..a96e2ffce 100644 --- a/include/xfs_ag.h +++ b/include/xfs_ag.h @@ -199,8 +199,8 @@ typedef struct xfs_perag (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) #define XFS_MIN_FREELIST(a,mp) \ (XFS_MIN_FREELIST_RAW( \ - INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \ - INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp)) + be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \ + be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp)) #define XFS_MIN_FREELIST_PAG(pag,mp) \ (XFS_MIN_FREELIST_RAW( \ (uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ diff --git a/include/xfs_arch.h b/include/xfs_arch.h index 17d5e50ca..71de20ced 100644 --- a/include/xfs_arch.h +++ b/include/xfs_arch.h @@ -170,6 +170,21 @@ } \ } +static inline void be16_add(__be16 *a, __s16 b) +{ + *a = cpu_to_be16(be16_to_cpu(*a) + b); +} + +static inline void be32_add(__be32 *a, __s32 b) +{ + *a = cpu_to_be32(be32_to_cpu(*a) + b); +} + +static inline void be64_add(__be64 *a, __s64 b) +{ + *a = cpu_to_be64(be64_to_cpu(*a) + b); +} + /* * In directories inode numbers are stored as unaligned arrays of unsigned * 8bit integers on disk. @@ -184,7 +199,7 @@ * into 32bits a four-member array is used: * * |24-31|16-23| 8-15| 0- 7| - */ + */ #define XFS_GET_DIR_INO4(di) \ (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) diff --git a/include/xfs_bmap.h b/include/xfs_bmap.h index e879c6a46..12cc63dfc 100644 --- a/include/xfs_bmap.h +++ b/include/xfs_bmap.h @@ -105,7 +105,7 @@ typedef struct xfs_bmalloca { char wasdel; /* replacing a delayed allocation */ char userdata;/* set if is user data */ char low; /* low on space, using seq'l ags */ - char aeof; /* allocated space at eof */ + char aeof; /* allocated space at eof */ char conv; /* overwriting unwritten extents */ } xfs_bmalloca_t; diff --git a/include/xfs_bmap_btree.h b/include/xfs_bmap_btree.h index 66da08881..e095a2d34 100644 --- a/include/xfs_bmap_btree.h +++ b/include/xfs_bmap_btree.h @@ -211,36 +211,36 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #define XFS_BMAP_REC_DADDR(bb,i,cur) \ (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_REC_IADDR(bb,i,cur) \ (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_KEY_DADDR(bb,i,cur) \ (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_KEY_IADDR(bb,i,cur) \ (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_PTR_DADDR(bb,i,cur) \ (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_PTR_IADDR(bb,i,cur) \ (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + be16_to_cpu((bb)->bb_level), cur), \ xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ - INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) + be16_to_cpu((bb)->bb_level), cur))) /* * These are to be used when we know the size of the block and @@ -253,7 +253,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ (XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))) -#define XFS_BMAP_BROOT_NUMRECS(bb) INT_GET((bb)->bb_numrecs, ARCH_CONVERT) +#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs) #define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ @@ -261,7 +261,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) #define XFS_BMAP_BROOT_SPACE(bb) \ - (XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT))) + (XFS_BMAP_BROOT_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs))) #define XFS_BMDR_SPACE_CALC(nrecs) \ (int)(sizeof(xfs_bmdr_block_t) + \ ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) @@ -272,10 +272,10 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) #define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ - (INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \ - INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \ - INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \ - INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0]) + (be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \ + be16_to_cpu((bb)->bb_level) == level && \ + be16_to_cpu((bb)->bb_numrecs) > 0 && \ + be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0]) #ifdef __KERNEL__ diff --git a/include/xfs_log_priv.h b/include/xfs_log_priv.h index 8f2851496..4518b188a 100644 --- a/include/xfs_log_priv.h +++ b/include/xfs_log_priv.h @@ -494,10 +494,8 @@ typedef struct log { #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) -#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ - xlog_grant_sub_space(log,bytes,type) -static inline void xlog_grant_sub_space(struct log *log, int bytes, int type) -{ +#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ + { \ if (type == 'w') { \ (log)->l_grant_write_bytes -= (bytes); \ if ((log)->l_grant_write_bytes < 0) { \ @@ -511,13 +509,9 @@ static inline void xlog_grant_sub_space(struct log *log, int bytes, int type) (log)->l_grant_reserve_cycle--; \ } \ } \ -} - -#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ - xlog_grant_add_space(log,bytes,type) -static inline void -xlog_grant_add_space(struct log *log, int bytes, int type) -{ + } +#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ + { \ if (type == 'w') { \ (log)->l_grant_write_bytes += (bytes); \ if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ @@ -531,12 +525,9 @@ xlog_grant_add_space(struct log *log, int bytes, int type) (log)->l_grant_reserve_cycle++; \ } \ } \ -} - -#define XLOG_INS_TICKETQ(q, tic) xlog_ins_ticketq(q, tic) -static inline void -xlog_ins_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) -{ \ + } +#define XLOG_INS_TICKETQ(q, tic) \ + { \ if (q) { \ (tic)->t_next = (q); \ (tic)->t_prev = (q)->t_prev; \ @@ -547,12 +538,9 @@ xlog_ins_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) (q) = (tic); \ } \ (tic)->t_flags |= XLOG_TIC_IN_Q; \ -} - -#define XLOG_DEL_TICKETQ(q, tic) xlog_del_ticketq(q, tic) -static inline void -xlog_del_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) -{ \ + } +#define XLOG_DEL_TICKETQ(q, tic) \ + { \ if ((tic) == (tic)->t_next) { \ (q) = NULL; \ } else { \ @@ -562,7 +550,7 @@ xlog_del_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) } \ (tic)->t_next = (tic)->t_prev = NULL; \ (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ -} + } /* common routines */ extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); diff --git a/include/xfs_sb.h b/include/xfs_sb.h index efb7073b8..abe78607e 100644 --- a/include/xfs_sb.h +++ b/include/xfs_sb.h @@ -68,19 +68,6 @@ struct xfs_mount; (XFS_SB_VERSION_NUMBITS | \ XFS_SB_VERSION_OKREALFBITS | \ XFS_SB_VERSION_OKSASHFBITS) -#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,logv2,attrv1,sflag,morebits) \ - (((ia) || (dia) || (extflag) || (dirv2) || (logv2) || (attrv1) || \ - (sflag) || (morebits)) ? \ - (XFS_SB_VERSION_4 | \ - ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \ - ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \ - ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \ - ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \ - ((logv2) ? XFS_SB_VERSION_LOGV2BIT : 0) | \ - ((attrv1) ? XFS_SB_VERSION_ATTRBIT : 0) | \ - ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \ - ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \ - XFS_SB_VERSION_1) /* * There are two words to hold XFS "feature" bits: the original @@ -90,10 +77,11 @@ struct xfs_mount; * These defines represent bits in sb_features2. */ #define XFS_SB_VERSION2_REALFBITS 0x00ffffff /* Mask: features */ -#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001 -#define XFS_SB_VERSION2_RESERVED2BIT 0x00000002 -#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 +#define XFS_SB_VERSION2_DONOTUSEBIT1 0x00000001 +#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Lazy SB counters */ +#define XFS_SB_VERSION2_DONOTUSEBIT2 0x00000004 #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ +#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* Parent pointers */ #define XFS_SB_VERSION2_SASHFBITS 0xff000000 /* Mask: features that require changing PROM and SASH */ @@ -106,12 +94,6 @@ struct xfs_mount; (XFS_SB_VERSION2_OKREALFBITS | \ XFS_SB_VERSION2_OKSASHFBITS ) -/* - * mkfs macro to set up sb_features2 word - */ -#define XFS_SB_VERSION2_MKFS(resvd1, sbcntr, attrv2) \ - (((attrv2) ? XFS_SB_VERSION2_ATTR2BIT : 0)) - typedef struct xfs_sb { __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ diff --git a/include/xfs_trans.h b/include/xfs_trans.h index a889963fd..d77901c07 100644 --- a/include/xfs_trans.h +++ b/include/xfs_trans.h @@ -973,7 +973,6 @@ void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *); void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *); void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); -void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); diff --git a/io/Makefile b/io/Makefile index e7a5de068..3c857d36c 100644 --- a/io/Makefile +++ b/io/Makefile @@ -9,8 +9,8 @@ LTCOMMAND = xfs_io LSRCFILES = xfs_bmap.sh xfs_freeze.sh HFILES = init.h io.h CFILES = init.c \ - attr.c bmap.c file.c freeze.c fsync.c getrusage.c imap.c \ - mmap.c open.c pread.c prealloc.c pwrite.c truncate.c + attr.c bmap.c file.c freeze.c fsync.c getrusage.c imap.c mmap.c \ + open.c pread.c prealloc.c pwrite.c shutdown.c truncate.c LLDLIBS = $(LIBXCMD) LTDEPENDENCIES = $(LIBXCMD) @@ -45,13 +45,13 @@ LSRCFILES += sendfile.c endif ifeq ($(PKG_PLATFORM),irix) -LSRCFILES += inject.c resblks.c shutdown.c +LSRCFILES += inject.c resblks.c CFILES += parent.c LCFLAGS += -DHAVE_PARENT else LSRCFILES += parent.c -CFILES += inject.c resblks.c shutdown.c -LCFLAGS += -DHAVE_INJECT -DHAVE_RESBLKS -DHAVE_SHUTDOWN +CFILES += inject.c resblks.c +LCFLAGS += -DHAVE_INJECT -DHAVE_RESBLKS endif ifeq ($(ENABLE_READLINE),yes) diff --git a/libxfs/xfs_alloc.c b/libxfs/xfs_alloc.c index 1a66e0158..0e5699fb1 100644 --- a/libxfs/xfs_alloc.c +++ b/libxfs/xfs_alloc.c @@ -162,8 +162,8 @@ xfs_alloc_fix_minleft( if (args->minleft == 0) return 1; agf = XFS_BUF_TO_AGF(args->agbp); - diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT) - + INT_GET(agf->agf_flcount, ARCH_CONVERT) + diff = be32_to_cpu(agf->agf_freeblks) + + be32_to_cpu(agf->agf_flcount) - args->len - args->minleft; if (diff >= 0) return 1; @@ -238,7 +238,8 @@ xfs_alloc_fixup_trees( bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); XFS_WANT_CORRUPTED_RETURN( - INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT)); + be16_to_cpu(bnoblock->bb_numrecs) == + be16_to_cpu(cntblock->bb_numrecs)); } } #endif @@ -346,7 +347,8 @@ xfs_alloc_read_agfl( return 0; } -#if defined(XFS_ALLOC_TRACE) +#if 0 +//#if defined(XFS_ALLOC_TRACE) /* * Add an allocation trace entry for an alloc call. */ @@ -424,21 +426,17 @@ xfs_alloc_trace_modagf( (void *)str, (void *)mp, (void *)(__psint_t)flags, - (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO], - ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT], - ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO], - ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT], - ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT), - (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT)); + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno). + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]). + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]). + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]). + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks), + (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest)); } #endif /* XFS_ALLOC_TRACE */ @@ -505,12 +503,12 @@ xfs_alloc_ag_vextent( if (!(args->wasfromfl)) { agf = XFS_BUF_TO_AGF(args->agbp); - INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len)); + be32_add(&agf->agf_freeblks, -(args->len)); xfs_trans_agblocks_delta(args->tp, -((long)(args->len))); args->pag->pagf_freeblks -= args->len; - ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT) - <= INT_GET(agf->agf_length, ARCH_CONVERT)); + ASSERT(be32_to_cpu(agf->agf_freeblks) <= + be32_to_cpu(agf->agf_length)); TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); xfs_alloc_log_agf(args->tp, args->agbp, XFS_AGF_FREEBLKS); @@ -616,8 +614,7 @@ xfs_alloc_ag_vextent_exact( cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_CNT, NULL, 0); ASSERT(args->agbno + args->len <= - INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, - ARCH_CONVERT)); + be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); @@ -790,8 +787,7 @@ xfs_alloc_ag_vextent_near( goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ltend = ltbno + ltlen; - ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, - ARCH_CONVERT)); + ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); args->len = blen; if (!xfs_alloc_fix_minleft(args)) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); @@ -1146,8 +1142,7 @@ xfs_alloc_ag_vextent_near( ltlen, <new); ASSERT(ltnew >= ltbno); ASSERT(ltnew + rlen <= ltend); - ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, - ARCH_CONVERT)); + ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); args->agbno = ltnew; if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, ltnew, rlen, XFSA_FIXUP_BNO_OK))) @@ -1310,8 +1305,7 @@ xfs_alloc_ag_vextent_size( args->agbno = rbno; XFS_WANT_CORRUPTED_GOTO( args->agbno + args->len <= - INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, - ARCH_CONVERT), + be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); TRACE_ALLOC("normal", args); return 0; @@ -1359,10 +1353,9 @@ xfs_alloc_ag_vextent_small( * freelist. */ else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && - (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount, - ARCH_CONVERT) > args->minleft)) { - error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno); - if (error) + (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) + > args->minleft)) { + if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) goto error0; if (fbno != NULLAGBLOCK) { if (args->userdata) { @@ -1376,8 +1369,7 @@ xfs_alloc_ag_vextent_small( args->agbno = fbno; XFS_WANT_CORRUPTED_GOTO( args->agbno + args->len <= - INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, - ARCH_CONVERT), + be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); args->wasfromfl = 1; TRACE_ALLOC("freelist", args); @@ -1651,12 +1643,12 @@ xfs_free_ag_extent( agf = XFS_BUF_TO_AGF(agbp); pag = &mp->m_perag[agno]; - INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len); + be32_add(&agf->agf_freeblks, len); xfs_trans_agblocks_delta(tp, len); pag->pagf_freeblks += len; XFS_WANT_CORRUPTED_GOTO( - INT_GET(agf->agf_freeblks, ARCH_CONVERT) - <= INT_GET(agf->agf_length, ARCH_CONVERT), + be32_to_cpu(agf->agf_freeblks) <= + be32_to_cpu(agf->agf_length), error0); TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); @@ -1804,18 +1796,18 @@ xfs_alloc_fix_freelist( */ agf = XFS_BUF_TO_AGF(agbp); need = XFS_MIN_FREELIST(agf, mp); - delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ? - (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0; + delta = need > be32_to_cpu(agf->agf_flcount) ? + (need - be32_to_cpu(agf->agf_flcount)) : 0; /* * If there isn't enough total or single-extent, reject it. */ - longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + longest = be32_to_cpu(agf->agf_longest); longest = (longest > delta) ? (longest - delta) : - (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0); + (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); if (args->minlen + args->alignment + args->minalignslop - 1 > longest || (args->minleft && - (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + - INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) < + (int)(be32_to_cpu(agf->agf_freeblks) + + be32_to_cpu(agf->agf_flcount) - need - args->total) < (int)args->minleft)) { xfs_trans_brelse(tp, agbp); args->agbp = NULL; @@ -1824,7 +1816,7 @@ xfs_alloc_fix_freelist( /* * Make the freelist shorter if it's too long. */ - while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) { + while (be32_to_cpu(agf->agf_flcount) > need) { xfs_buf_t *bp; if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) @@ -1851,9 +1843,9 @@ xfs_alloc_fix_freelist( /* * Make the freelist longer if it's too short. */ - while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) { + while (be32_to_cpu(agf->agf_flcount) < need) { targs.agbno = 0; - targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT); + targs.maxlen = need - be32_to_cpu(agf->agf_flcount); /* * Allocate as many blocks as possible at once. */ @@ -1913,19 +1905,19 @@ xfs_alloc_get_freelist( */ mp = tp->t_mountp; if ((error = xfs_alloc_read_agfl(mp, tp, - INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + be32_to_cpu(agf->agf_seqno), &agflbp))) return error; agfl = XFS_BUF_TO_AGFL(agflbp); /* * Get the block number and update the data structures. */ - bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT); - INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1); + bno = INT_GET(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)], ARCH_CONVERT); + be32_add(&agf->agf_flfirst, 1); xfs_trans_brelse(tp, agflbp); - if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) agf->agf_flfirst = 0; - pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; - INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1); + pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; + be32_add(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); pag->pagf_flcount--; TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); @@ -1940,7 +1932,7 @@ xfs_alloc_get_freelist( * the freeing transaction must be pushed to disk NOW by forcing * to disk all iclogs up that transaction's LSN. */ - xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1); return 0; } @@ -2018,18 +2010,18 @@ xfs_alloc_put_freelist( mp = tp->t_mountp; if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, - INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + be32_to_cpu(agf->agf_seqno), &agflbp))) return error; agfl = XFS_BUF_TO_AGFL(agflbp); - INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1); - if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + be32_add(&agf->agf_fllast, 1); + if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) agf->agf_fllast = 0; - pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; - INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1); + pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; + be32_add(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); pag->pagf_flcount++; - ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp)); - blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)]; + ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); + blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; INT_SET(*blockp, ARCH_CONVERT, bno); TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); @@ -2076,14 +2068,12 @@ xfs_alloc_read_agf( */ agf = XFS_BUF_TO_AGF(bp); agf_ok = - INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC && - XFS_AGF_GOOD_VERSION( - INT_GET(agf->agf_versionnum, ARCH_CONVERT)) && - INT_GET(agf->agf_freeblks, ARCH_CONVERT) <= - INT_GET(agf->agf_length, ARCH_CONVERT) && - INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && - INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && - INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp); + be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC && + XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && + be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && + be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && + be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && + be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, XFS_RANDOM_ALLOC_READ_AGF))) { XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", @@ -2093,13 +2083,13 @@ xfs_alloc_read_agf( } pag = &mp->m_perag[agno]; if (!pag->pagf_init) { - pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT); - pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT); - pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); + pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); + pag->pagf_longest = be32_to_cpu(agf->agf_longest); pag->pagf_levels[XFS_BTNUM_BNOi] = - INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT); + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); pag->pagf_levels[XFS_BTNUM_CNTi] = - INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT); + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); spinlock_init(&pag->pagb_lock, "xfspagb"); pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * sizeof(xfs_perag_busy_t), KM_SLEEP); @@ -2107,13 +2097,13 @@ xfs_alloc_read_agf( } #ifdef DEBUG else if (!XFS_FORCED_SHUTDOWN(mp)) { - ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT)); - ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT)); - ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT)); + ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); + ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); + ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == - INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT)); + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi])); ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == - INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT)); + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); } #endif XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); @@ -2362,7 +2352,7 @@ xfs_free_extent( #ifdef DEBUG ASSERT(args.agbp != NULL); agf = XFS_BUF_TO_AGF(args.agbp); - ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT)); + ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); #endif error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); diff --git a/libxfs/xfs_alloc_btree.c b/libxfs/xfs_alloc_btree.c index 921b2155b..fdb78ac54 100644 --- a/libxfs/xfs_alloc_btree.c +++ b/libxfs/xfs_alloc_btree.c @@ -79,7 +79,7 @@ xfs_alloc_delrec( /* * Fail if we're off the end of the block. */ - if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (ptr > be16_to_cpu(block->bb_numrecs)) { *stat = 0; return 0; } @@ -93,18 +93,18 @@ xfs_alloc_delrec( lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); #ifdef DEBUG - for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) return error; } #endif - if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (ptr < be16_to_cpu(block->bb_numrecs)) { memmove(&lkp[ptr - 1], &lkp[ptr], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ + (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); memmove(&lpp[ptr - 1], &lpp[ptr], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ - xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); - xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); + xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); + xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); } } /* @@ -113,25 +113,25 @@ xfs_alloc_delrec( */ else { lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); - if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (ptr < be16_to_cpu(block->bb_numrecs)) { memmove(&lrp[ptr - 1], &lrp[ptr], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); - xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); } /* * If it's the first record in the block, we'll need a key * structure to pass up to the next level (updkey). */ if (ptr == 1) { - key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ - key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ + key.ar_startblock = lrp->ar_startblock; + key.ar_blockcount = lrp->ar_blockcount; lkp = &key; } } /* * Decrement and log the number of entries in the block. */ - INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&block->bb_numrecs, -1); xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); /* * See if the longest free extent in the allocation group was @@ -144,24 +144,24 @@ xfs_alloc_delrec( if (level == 0 && cur->bc_btnum == XFS_BTNUM_CNT && - INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && - ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { - ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); + be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && + ptr > be16_to_cpu(block->bb_numrecs)) { + ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); /* * There are still records in the block. Grab the size * from the last one. */ - if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { - rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); - INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); + if (be16_to_cpu(block->bb_numrecs)) { + rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); + agf->agf_longest = rrp->ar_blockcount; } /* * No free extents left. */ else agf->agf_longest = 0; - mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = - INT_GET(agf->agf_longest, ARCH_CONVERT); + mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest = + be32_to_cpu(agf->agf_longest); xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } @@ -175,15 +175,15 @@ xfs_alloc_delrec( * and it's NOT the leaf level, * then we can get rid of this level. */ - if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { + if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { /* * lpp is still set to the first pointer in the block. * Make it the new root of the btree. */ - bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); - INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); - INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); - mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; + bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); + agf->agf_roots[cur->bc_btnum] = *lpp; + be32_add(&agf->agf_levels[cur->bc_btnum], -1); + mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; /* * Put this buffer/block on the ag's freelist. */ @@ -205,7 +205,7 @@ xfs_alloc_delrec( * that freed the block. */ xfs_alloc_mark_busy(cur->bc_tp, - INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + be32_to_cpu(agf->agf_seqno), bno, 1); xfs_trans_agbtree_delta(cur->bc_tp, -1); xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, @@ -231,7 +231,7 @@ xfs_alloc_delrec( * If the number of records remaining in the block is at least * the minimum, we're done. */ - if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) return error; *stat = 1; @@ -242,8 +242,8 @@ xfs_alloc_delrec( * tree balanced. Look at the left and right sibling blocks to * see if we can re-balance by moving only one record. */ - rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); - lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + rbno = be32_to_cpu(block->bb_rightsib); + lbno = be32_to_cpu(block->bb_leftsib); bno = NULLAGBLOCK; ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); /* @@ -280,18 +280,18 @@ xfs_alloc_delrec( /* * Grab the current block number, for future use. */ - bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + bno = be32_to_cpu(right->bb_leftsib); /* * If right block is full enough so that removing one entry * won't make it too empty, and left-shifting an entry out * of right to us works, we're done. */ - if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + if (be16_to_cpu(right->bb_numrecs) - 1 >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { if ((error = xfs_alloc_lshift(tcur, level, &i))) goto error0; if (i) { - ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); @@ -308,7 +308,7 @@ xfs_alloc_delrec( * future reference, and fix up the temp cursor to point * to our block again (last record). */ - rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + rrecs = be16_to_cpu(right->bb_numrecs); if (lbno != NULLAGBLOCK) { i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); @@ -344,18 +344,18 @@ xfs_alloc_delrec( /* * Grab the current block number, for future use. */ - bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + bno = be32_to_cpu(left->bb_rightsib); /* * If left block is full enough so that removing one entry * won't make it too empty, and right-shifting an entry out * of left to us works, we're done. */ - if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + if (be16_to_cpu(left->bb_numrecs) - 1 >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { if ((error = xfs_alloc_rshift(tcur, level, &i))) goto error0; if (i) { - ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); @@ -369,7 +369,7 @@ xfs_alloc_delrec( * Otherwise, grab the number of records in right for * future reference. */ - lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + lrecs = be16_to_cpu(left->bb_numrecs); } /* * Delete the temp cursor, we're done with it. @@ -383,7 +383,7 @@ xfs_alloc_delrec( * See if we can join with the left neighbor block. */ if (lbno != NULLAGBLOCK && - lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { /* * Set "right" to be the starting block, * "left" to be the left neighbor. @@ -403,7 +403,7 @@ xfs_alloc_delrec( * If that won't work, see if we can join with the right neighbor block. */ else if (rbno != NULLAGBLOCK && - rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + rrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { /* * Set "left" to be the starting block, @@ -438,31 +438,34 @@ xfs_alloc_delrec( /* * It's a non-leaf. Move keys and pointers. */ - lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); - lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); + lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); #ifdef DEBUG - for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) return error; } #endif - memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ - memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ - xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, - INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); - xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, - INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); + memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); + xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, + be16_to_cpu(left->bb_numrecs) + + be16_to_cpu(right->bb_numrecs)); + xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, + be16_to_cpu(left->bb_numrecs) + + be16_to_cpu(right->bb_numrecs)); } else { /* * It's a leaf. Move records. */ - lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); - xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, - INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, + be16_to_cpu(left->bb_numrecs) + + be16_to_cpu(right->bb_numrecs)); } /* * If we joined with the left neighbor, set the buffer in the @@ -470,7 +473,7 @@ xfs_alloc_delrec( */ if (bp != lbp) { xfs_btree_setbuf(cur, level, lbp); - cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); } /* * If we joined with the right neighbor and there's a level above @@ -482,28 +485,28 @@ xfs_alloc_delrec( /* * Fix up the number of records in the surviving block. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); /* * Fix up the right block pointer in the surviving block, and log it. */ - left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + left->bb_rightsib = right->bb_rightsib; xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); /* * If there is a right sibling now, make it point to the * remaining block. */ - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { xfs_alloc_block_t *rrblock; xfs_buf_t *rrbp; if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, &rrbp, XFS_ALLOC_BTREE_REF))) return error; rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) return error; - INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + rrblock->bb_leftsib = cpu_to_be32(lbno); xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); } /* @@ -524,10 +527,9 @@ xfs_alloc_delrec( * busy block is allocated, the iclog is pushed up to the * LSN that freed the block. */ - xfs_alloc_mark_busy(cur->bc_tp, - INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); - + xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); xfs_trans_agbtree_delta(cur->bc_tp, -1); + /* * Adjust the current level's cursor so that we're left referring * to the right node, after we're done. @@ -575,7 +577,15 @@ xfs_alloc_insrec( int ptr; /* index in btree block for this rec */ xfs_alloc_rec_t *rp; /* pointer to btree records */ - ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); + ASSERT(be32_to_cpu(recp->ar_blockcount) > 0); + + /* + * GCC doesn't understand the (arguably complex) control flow in + * this function and complains about uninitialized structure fields + * without this. + */ + memset(&nrec, 0, sizeof(nrec)); + /* * If we made it to the root level, allocate a new root block * and we're done. @@ -591,8 +601,8 @@ xfs_alloc_insrec( /* * Make a key out of the record data to be inserted, and save it. */ - key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ - key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ + key.ar_startblock = recp->ar_startblock; + key.ar_blockcount = recp->ar_blockcount; optr = ptr = cur->bc_ptrs[level]; /* * If we're off the left edge, return failure. @@ -613,7 +623,7 @@ xfs_alloc_insrec( /* * Check that the new entry is being inserted in the right place. */ - if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (ptr <= be16_to_cpu(block->bb_numrecs)) { if (level == 0) { rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); xfs_btree_check_rec(cur->bc_btnum, recp, rp); @@ -629,7 +639,7 @@ xfs_alloc_insrec( * If the block is full, we can't insert the new entry until we * make the block un-full. */ - if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { /* * First, try shifting an entry to the right neighbor. */ @@ -666,8 +676,8 @@ xfs_alloc_insrec( return error; #endif ptr = cur->bc_ptrs[level]; - nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ - nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ + nrec.ar_startblock = nkey.ar_startblock; + nrec.ar_blockcount = nkey.ar_blockcount; } /* * Otherwise the insert fails. @@ -691,15 +701,15 @@ xfs_alloc_insrec( kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); #ifdef DEBUG - for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) return error; } #endif memmove(&kp[ptr], &kp[ptr - 1], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ + (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); memmove(&pp[ptr], &pp[ptr - 1], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ + (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); #ifdef DEBUG if ((error = xfs_btree_check_sptr(cur, *bnop, level))) return error; @@ -708,12 +718,12 @@ xfs_alloc_insrec( * Now stuff the new data in, bump numrecs and log the new data. */ kp[ptr - 1] = key; - INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); - INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); - xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); - xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + pp[ptr - 1] = cpu_to_be32(*bnop); + be16_add(&block->bb_numrecs, 1); + xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); + xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); #ifdef DEBUG - if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (ptr < be16_to_cpu(block->bb_numrecs)) xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, kp + ptr); #endif @@ -723,16 +733,16 @@ xfs_alloc_insrec( */ rp = XFS_ALLOC_REC_ADDR(block, 1, cur); memmove(&rp[ptr], &rp[ptr - 1], - (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); + (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); /* * Now stuff the new record in, bump numrecs * and log the new data. */ rp[ptr - 1] = *recp; /* INT_: struct copy */ - INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); - xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + be16_add(&block->bb_numrecs, 1); + xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); #ifdef DEBUG - if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (ptr < be16_to_cpu(block->bb_numrecs)) xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, rp + ptr); #endif @@ -754,16 +764,16 @@ xfs_alloc_insrec( agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); if (level == 0 && cur->bc_btnum == XFS_BTNUM_CNT && - INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && - INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { + be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && + be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) { /* * If this is a leaf in the by-size btree and there * is no right sibling block and this block is bigger * than the previous longest block, update it. */ - INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); - cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest - = INT_GET(recp->ar_blockcount, ARCH_CONVERT); + agf->agf_longest = recp->ar_blockcount; + cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest + = be32_to_cpu(recp->ar_blockcount); xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } @@ -873,8 +883,9 @@ xfs_alloc_log_recs( agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) - ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= - INT_GET(agf->agf_length, ARCH_CONVERT)); + ASSERT(be32_to_cpu(p->ar_startblock) + + be32_to_cpu(p->ar_blockcount) <= + be32_to_cpu(agf->agf_length)); } #endif first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); @@ -911,8 +922,8 @@ xfs_alloc_lookup( xfs_agf_t *agf; /* a.g. freespace header */ agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); - agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); + agno = be32_to_cpu(agf->agf_seqno); + agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); } /* * Iterate over each level in the btree, starting at the root. @@ -979,7 +990,7 @@ xfs_alloc_lookup( * Set low and high entry numbers, 1-based. */ low = 1; - if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + if (!(high = be16_to_cpu(block->bb_numrecs))) { /* * If the block is empty, the tree must * be an empty leaf. @@ -1008,14 +1019,14 @@ xfs_alloc_lookup( xfs_alloc_key_t *kkp; kkp = kkbase + keyno - 1; - startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); - blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); + startblock = be32_to_cpu(kkp->ar_startblock); + blockcount = be32_to_cpu(kkp->ar_blockcount); } else { xfs_alloc_rec_t *krp; krp = krbase + keyno - 1; - startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); - blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); + startblock = be32_to_cpu(krp->ar_startblock); + blockcount = be32_to_cpu(krp->ar_blockcount); } /* * Compute difference to get next direction. @@ -1055,7 +1066,7 @@ xfs_alloc_lookup( */ if (diff > 0 && --keyno < 1) keyno = 1; - agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur)); #ifdef DEBUG if ((error = xfs_btree_check_sptr(cur, agbno, level))) return error; @@ -1074,8 +1085,8 @@ xfs_alloc_lookup( * not the last block, we're in the wrong block. */ if (dir == XFS_LOOKUP_GE && - keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && - INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + keyno > be16_to_cpu(block->bb_numrecs) && + be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { int i; cur->bc_ptrs[0] = keyno; @@ -1092,7 +1103,7 @@ xfs_alloc_lookup( /* * Return if we succeeded or not. */ - if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) *stat = 0; else *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); @@ -1135,7 +1146,7 @@ xfs_alloc_lshift( /* * If we've got no left sibling then we can't shift an entry left. */ - if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1151,8 +1162,8 @@ xfs_alloc_lshift( * Set up the left neighbor as "left". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, - XFS_ALLOC_BTREE_REF))) + cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), + 0, &lbp, XFS_ALLOC_BTREE_REF))) return error; left = XFS_BUF_TO_ALLOC_BLOCK(lbp); if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) @@ -1160,11 +1171,11 @@ xfs_alloc_lshift( /* * If it's full, it can't take another entry. */ - if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { *stat = 0; return 0; } - nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + nrec = be16_to_cpu(left->bb_numrecs) + 1; /* * If non-leaf, copy a key and a ptr to the left block. */ @@ -1179,7 +1190,7 @@ xfs_alloc_lshift( lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); #ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) return error; #endif *lpp = *rpp; /* INT_: copy */ @@ -1201,30 +1212,30 @@ xfs_alloc_lshift( /* * Bump and log left's numrecs, decrement and log right's numrecs. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + be16_add(&left->bb_numrecs, 1); xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&right->bb_numrecs, -1); xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); /* * Slide the contents of right down one entry. */ if (level > 0) { #ifdef DEBUG - for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), level))) return error; } #endif - memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); - xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); + xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); } else { - memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); - xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ - key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + key.ar_startblock = rrp->ar_startblock; + key.ar_blockcount = rrp->ar_blockcount; rkp = &key; } /* @@ -1289,9 +1300,9 @@ xfs_alloc_newroot( xfs_agnumber_t seqno; agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); - INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); - seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); + be32_add(&agf->agf_levels[cur->bc_btnum], 1); + seqno = be32_to_cpu(agf->agf_seqno); mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); @@ -1308,12 +1319,12 @@ xfs_alloc_newroot( if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) return error; #endif - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { /* * Our block is left, pick up the right block. */ lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); - rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + rbno = be32_to_cpu(left->bb_rightsib); if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, rbno, 0, &rbp, XFS_ALLOC_BTREE_REF))) @@ -1330,7 +1341,7 @@ xfs_alloc_newroot( rbp = lbp; right = left; rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); - lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + lbno = be32_to_cpu(right->bb_leftsib); if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, lbno, 0, &lbp, XFS_ALLOC_BTREE_REF))) @@ -1344,11 +1355,11 @@ xfs_alloc_newroot( /* * Fill in the new block's btree header and log it. */ - INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); - INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); - INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); - INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); - INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); + new->bb_level = cpu_to_be16(cur->bc_nlevels); + new->bb_numrecs = cpu_to_be16(2); + new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); + new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); /* @@ -1358,18 +1369,18 @@ xfs_alloc_newroot( xfs_alloc_key_t *kp; /* btree key pointer */ kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); - if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + if (be16_to_cpu(left->bb_level) > 0) { kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ } else { xfs_alloc_rec_t *rp; /* btree record pointer */ rp = XFS_ALLOC_REC_ADDR(left, 1, cur); - kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ - kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + kp[0].ar_startblock = rp->ar_startblock; + kp[0].ar_blockcount = rp->ar_blockcount; rp = XFS_ALLOC_REC_ADDR(right, 1, cur); - kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ - kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + kp[1].ar_startblock = rp->ar_startblock; + kp[1].ar_blockcount = rp->ar_blockcount; } } xfs_alloc_log_keys(cur, nbp, 1, 2); @@ -1380,8 +1391,8 @@ xfs_alloc_newroot( xfs_alloc_ptr_t *pp; /* btree address pointer */ pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); - INT_SET(pp[0], ARCH_CONVERT, lbno); - INT_SET(pp[1], ARCH_CONVERT, rbno); + pp[0] = cpu_to_be32(lbno); + pp[1] = cpu_to_be32(rbno); } xfs_alloc_log_ptrs(cur, nbp, 1, 2); /* @@ -1426,7 +1437,7 @@ xfs_alloc_rshift( /* * If we've got no right sibling then we can't shift an entry right. */ - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1434,7 +1445,7 @@ xfs_alloc_rshift( * If the cursor entry is the one that would be moved, don't * do it... it's too complicated. */ - if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { *stat = 0; return 0; } @@ -1442,8 +1453,8 @@ xfs_alloc_rshift( * Set up the right neighbor as "right". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, - XFS_ALLOC_BTREE_REF))) + cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), + 0, &rbp, XFS_ALLOC_BTREE_REF))) return error; right = XFS_BUF_TO_ALLOC_BLOCK(rbp); if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) @@ -1451,7 +1462,7 @@ xfs_alloc_rshift( /* * If it's full, it can't take another entry. */ - if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { *stat = 0; return 0; } @@ -1464,47 +1475,47 @@ xfs_alloc_rshift( xfs_alloc_ptr_t *lpp; /* address pointer for left block */ xfs_alloc_ptr_t *rpp; /* address pointer for right block */ - lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); - lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); + lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); #ifdef DEBUG - for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) return error; } #endif - memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); #ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) return error; #endif *rkp = *lkp; /* INT_: copy */ *rpp = *lpp; /* INT_: copy */ - xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); - xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); + xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); } else { xfs_alloc_rec_t *lrp; /* record pointer for left block */ xfs_alloc_rec_t *rrp; /* record pointer for right block */ - lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); *rrp = *lrp; - xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); - key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ - key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); + key.ar_startblock = rrp->ar_startblock; + key.ar_blockcount = rrp->ar_blockcount; rkp = &key; xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); } /* * Decrement and log left's numrecs, bump and log right's numrecs. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&left->bb_numrecs, -1); xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + be16_add(&right->bb_numrecs, 1); xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); /* * Using a temporary cursor, update the parent key values of the @@ -1577,17 +1588,17 @@ xfs_alloc_split( /* * Fill in the btree header for the new block. */ - INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); - right->bb_level = left->bb_level; /* INT_: direct copy */ - INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; + right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); /* * Make sure that if there's an odd number of entries now, that * each new block will have the same number of entries. */ - if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && - cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) - INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); - i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + if ((be16_to_cpu(left->bb_numrecs) & 1) && + cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) + be16_add(&right->bb_numrecs, 1); + i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; /* * For non-leaf blocks, copy keys and addresses over to the new block. */ @@ -1602,15 +1613,15 @@ xfs_alloc_split( rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); #ifdef DEBUG - for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) return error; } #endif - memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ - memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ - xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); + xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); *keyp = *rkp; } /* @@ -1622,38 +1633,38 @@ xfs_alloc_split( lrp = XFS_ALLOC_REC_ADDR(left, i, cur); rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); - xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ - keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + keyp->ar_startblock = rrp->ar_startblock; + keyp->ar_blockcount = rrp->ar_blockcount; } /* * Find the left block number by looking in the buffer. * Adjust numrecs, sibling pointers. */ lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); - INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); - right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ - INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); - INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); + right->bb_rightsib = left->bb_rightsib; + left->bb_rightsib = cpu_to_be32(rbno); + right->bb_leftsib = cpu_to_be32(lbno); xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); /* * If there's a block to the new block's right, make that block * point back to right instead of to left. */ - if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { xfs_alloc_block_t *rrblock; /* rr btree block */ xfs_buf_t *rrbp; /* buffer for rrblock */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, + cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0, &rrbp, XFS_ALLOC_BTREE_REF))) return error; rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) return error; - INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); + rrblock->bb_leftsib = cpu_to_be32(rbno); xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); } /* @@ -1661,9 +1672,9 @@ xfs_alloc_split( * If it's just pointing past the last entry in left, then we'll * insert there, so don't change anything in that case. */ - if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { xfs_btree_setbuf(cur, level, rbp); - cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); } /* * If there are more levels, we'll need another cursor which refers to @@ -1761,7 +1772,7 @@ xfs_alloc_decrement( /* * If we just went off the left edge of the tree, return failure. */ - if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1790,7 +1801,7 @@ xfs_alloc_decrement( xfs_agblock_t agbno; /* block number of btree block */ xfs_buf_t *bp; /* buffer pointer for block */ - agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno, agbno, 0, &bp, XFS_ALLOC_BTREE_REF))) @@ -1800,7 +1811,7 @@ xfs_alloc_decrement( block = XFS_BUF_TO_ALLOC_BLOCK(bp); if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) return error; - cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); } *stat = 1; return 0; @@ -1867,7 +1878,7 @@ xfs_alloc_get_rec( /* * Off the right end or left end, return failure. */ - if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { *stat = 0; return 0; } @@ -1878,8 +1889,8 @@ xfs_alloc_get_rec( xfs_alloc_rec_t *rec; /* record data */ rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); - *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); - *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); + *bno = be32_to_cpu(rec->ar_startblock); + *len = be32_to_cpu(rec->ar_blockcount); } *stat = 1; return 0; @@ -1918,14 +1929,14 @@ xfs_alloc_increment( * Increment the ptr at this level. If we're still in the block * then we're done. */ - if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { *stat = 1; return 0; } /* * If we just went off the right edge of the tree, return failure. */ - if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1940,7 +1951,7 @@ xfs_alloc_increment( if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) return error; #endif - if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) break; /* * Read-ahead the right block, we're going to read it @@ -1960,7 +1971,7 @@ xfs_alloc_increment( lev > level; ) { xfs_agblock_t agbno; /* block number of btree block */ - agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno, agbno, 0, &bp, XFS_ALLOC_BTREE_REF))) @@ -1995,8 +2006,8 @@ xfs_alloc_insert( level = 0; nbno = NULLAGBLOCK; - INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); - INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); + nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); + nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); ncur = (xfs_btree_cur_t *)0; pcur = cur; /* @@ -2117,8 +2128,8 @@ xfs_alloc_update( /* * Fill in the new contents and log them. */ - INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); - INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); + rp->ar_startblock = cpu_to_be32(bno); + rp->ar_blockcount = cpu_to_be32(len); xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); } /* @@ -2127,15 +2138,15 @@ xfs_alloc_update( * extent in the a.g., which we cache in the a.g. freelist header. */ if (cur->bc_btnum == XFS_BTNUM_CNT && - INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && - ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && + ptr == be16_to_cpu(block->bb_numrecs)) { xfs_agf_t *agf; /* a.g. freespace header */ xfs_agnumber_t seqno; agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + seqno = be32_to_cpu(agf->agf_seqno); cur->bc_mp->m_perag[seqno].pagf_longest = len; - INT_SET(agf->agf_longest, ARCH_CONVERT, len); + agf->agf_longest = cpu_to_be32(len); xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } @@ -2145,8 +2156,8 @@ xfs_alloc_update( if (ptr == 1) { xfs_alloc_key_t key; /* key containing [bno, len] */ - INT_SET(key.ar_startblock, ARCH_CONVERT, bno); - INT_SET(key.ar_blockcount, ARCH_CONVERT, len); + key.ar_startblock = cpu_to_be32(bno); + key.ar_blockcount = cpu_to_be32(len); if ((error = xfs_alloc_updkey(cur, &key, 1))) return error; } diff --git a/libxfs/xfs_attr_leaf.c b/libxfs/xfs_attr_leaf.c index 2decdb65e..11d3778d1 100644 --- a/libxfs/xfs_attr_leaf.c +++ b/libxfs/xfs_attr_leaf.c @@ -242,7 +242,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) * Fix up the start offset of the attribute fork */ totsize -= size; - if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname) { + if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname && + !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { /* * Last attribute now removed, revert to original * inode format making all literal area available @@ -260,7 +261,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); ASSERT(dp->i_d.di_forkoff); - ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname); + ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname || + (mp->m_flags & XFS_MOUNT_COMPAT_ATTR)); dp->i_afp->if_ext_max = XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); dp->i_df.if_ext_max = @@ -430,7 +432,8 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) + name_loc->namelen + INT_GET(name_loc->valuelen, ARCH_CONVERT); } - if (bytes == sizeof(struct xfs_attr_sf_hdr)) + if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) && + (bytes == sizeof(struct xfs_attr_sf_hdr))) return(-1); return(xfs_attr_shortform_bytesfit(dp, bytes)); } @@ -468,6 +471,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) goto out; if (forkoff == -1) { + ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR)); + /* * Last attribute was removed, revert to original * inode format making all literal area available diff --git a/libxfs/xfs_bmap.c b/libxfs/xfs_bmap.c index 4248f9ef0..f493f29fd 100644 --- a/libxfs/xfs_bmap.c +++ b/libxfs/xfs_bmap.c @@ -2422,8 +2422,8 @@ xfs_bmap_btree_to_extents( ASSERT(ifp->if_flags & XFS_IFEXTENTS); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); rblock = ifp->if_broot; - ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1); - ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1); + ASSERT(be16_to_cpu(rblock->bb_level) == 1); + ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); mp = ip->i_mount; pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); @@ -2866,11 +2866,11 @@ xfs_bmap_extents_to_btree( * Fill in the root. */ block = ifp->if_broot; - INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); - INT_SET(block->bb_level, ARCH_CONVERT, 1); - INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); - INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); - INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); + block->bb_level = cpu_to_be16(1); + block->bb_numrecs = cpu_to_be16(1); + block->bb_leftsib = cpu_to_be64(NULLDFSBNO); + block->bb_rightsib = cpu_to_be64(NULLDFSBNO); /* * Need a cursor. Can't allocate until bb_level is filled in. */ @@ -2923,10 +2923,10 @@ xfs_bmap_extents_to_btree( * Fill in the child block. */ ablock = XFS_BUF_TO_BMBT_BLOCK(abp); - INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); ablock->bb_level = 0; - INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); - INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO); + ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { @@ -2936,8 +2936,8 @@ xfs_bmap_extents_to_btree( arp++; cnt++; } } - INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt); - ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork)); + ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); + ablock->bb_numrecs = cpu_to_be16(cnt); /* * Fill in the root key and pointer. */ @@ -2951,7 +2951,7 @@ xfs_bmap_extents_to_btree( * the root is at the right level. */ xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); - xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); ASSERT(*curp == NULL); *curp = cur; *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); @@ -3761,8 +3761,8 @@ xfs_bmap_read_extents( /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. */ - ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); - level = INT_GET(block->bb_level, ARCH_CONVERT); + level = be16_to_cpu(block->bb_level); + ASSERT(level > 0); pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); @@ -3805,7 +3805,7 @@ xfs_bmap_read_extents( xfs_extnum_t num_recs; - num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + num_recs = be16_to_cpu(block->bb_numrecs); if (unlikely(i + num_recs > room)) { ASSERT(i + num_recs <= room); xfs_fs_cmn_err(CE_WARN, ip->i_mount, @@ -3822,7 +3822,7 @@ xfs_bmap_read_extents( /* * Read-ahead the next leaf block, if any. */ - nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + nextbno = be64_to_cpu(block->bb_rightsib); if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1); /* @@ -4008,7 +4008,7 @@ xfs_bmapi( } if (wr && *firstblock == NULLFSBLOCK) { if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) - minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; else minleft = 1; } else @@ -4127,19 +4127,18 @@ xfs_bmapi( error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, -(indlen), rsvd); - if (error && rt) { - xfs_mod_incore_sb(ip->i_mount, + if (error && rt) + xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, rsvd); - } else if (error) { - xfs_mod_incore_sb(ip->i_mount, + else if (error) + xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, alen, rsvd); - } } if (error) { - if (XFS_IS_QUOTA_ON(ip->i_mount)) + if (XFS_IS_QUOTA_ON(mp)) /* unreserve the blocks now */ (void) XFS_TRANS_UNRESERVE_QUOTA_NBLKS( diff --git a/libxfs/xfs_bmap_btree.c b/libxfs/xfs_bmap_btree.c index 202786bf6..ddf3a2fed 100644 --- a/libxfs/xfs_bmap_btree.c +++ b/libxfs/xfs_bmap_btree.c @@ -72,7 +72,7 @@ xfs_bmbt_delrec( return 0; } block = xfs_bmbt_get_block(cur, level, &bp); - numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + numrecs = be16_to_cpu(block->bb_numrecs); #ifdef DEBUG if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -117,7 +117,7 @@ xfs_bmbt_delrec( } } numrecs--; - INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + block->bb_numrecs = cpu_to_be16(numrecs); xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); /* * We're at the root level. @@ -153,8 +153,8 @@ xfs_bmbt_delrec( *stat = 1; return 0; } - rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); - lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + rbno = be64_to_cpu(block->bb_rightsib); + lbno = be64_to_cpu(block->bb_leftsib); /* * One child of root, need to get a chance to copy its contents * into the root and delete it. Can't go up to next level, @@ -198,15 +198,15 @@ xfs_bmbt_delrec( goto error0; } #endif - bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); - if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + bno = be64_to_cpu(right->bb_leftsib); + if (be16_to_cpu(right->bb_numrecs) - 1 >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { if ((error = xfs_bmbt_lshift(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } if (i) { - ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(block->bb_numrecs) >= XFS_BMAP_BLOCK_IMINRECS(level, tcur)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; @@ -223,7 +223,7 @@ xfs_bmbt_delrec( return 0; } } - rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + rrecs = be16_to_cpu(right->bb_numrecs); if (lbno != NULLFSBLOCK) { i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); @@ -254,15 +254,15 @@ xfs_bmbt_delrec( goto error0; } #endif - bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); - if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + bno = be64_to_cpu(left->bb_rightsib); + if (be16_to_cpu(left->bb_numrecs) - 1 >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { if ((error = xfs_bmbt_rshift(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } if (i) { - ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(block->bb_numrecs) >= XFS_BMAP_BLOCK_IMINRECS(level, tcur)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; @@ -273,14 +273,14 @@ xfs_bmbt_delrec( return 0; } } - lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + lrecs = be16_to_cpu(left->bb_numrecs); } xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; mp = cur->bc_mp; ASSERT(bno != NULLFSBLOCK); if (lbno != NULLFSBLOCK && - lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { rbno = bno; right = block; rbp = bp; @@ -295,7 +295,7 @@ xfs_bmbt_delrec( goto error0; } } else if (rbno != NULLFSBLOCK && - rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + rrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { lbno = bno; left = block; @@ -310,7 +310,7 @@ xfs_bmbt_delrec( XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } - lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + lrecs = be16_to_cpu(left->bb_numrecs); } else { if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -320,8 +320,8 @@ xfs_bmbt_delrec( *stat = 1; return 0; } - numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); - numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + numlrecs = be16_to_cpu(left->bb_numrecs); + numrrecs = be16_to_cpu(right->bb_numrecs); if (level > 0) { lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); @@ -345,12 +345,12 @@ xfs_bmbt_delrec( memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); } - INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs); - left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + be16_add(&left->bb_numrecs, numrrecs); + left->bb_rightsib = right->bb_rightsib; xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, - INT_GET(left->bb_rightsib, ARCH_CONVERT), + be64_to_cpu(left->bb_rightsib), 0, &rrbp, XFS_BMAP_BTREE_REF))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; @@ -360,7 +360,7 @@ xfs_bmbt_delrec( XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } - INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + rrblock->bb_leftsib = cpu_to_be64(lbno); xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); } xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, @@ -437,7 +437,7 @@ xfs_bmbt_insrec( } XFS_STATS_INC(xs_bmbt_insrec); block = xfs_bmbt_get_block(cur, level, &bp); - numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + numrecs = be16_to_cpu(block->bb_numrecs); #ifdef DEBUG if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -519,7 +519,7 @@ xfs_bmbt_insrec( } } } - numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + numrecs = be16_to_cpu(block->bb_numrecs); if (level > 0) { kp = XFS_BMAP_KEY_IADDR(block, 1, cur); pp = XFS_BMAP_PTR_IADDR(block, 1, cur); @@ -546,7 +546,7 @@ xfs_bmbt_insrec( kp[ptr - 1] = key; INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); numrecs++; - INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + block->bb_numrecs = cpu_to_be16(numrecs); xfs_bmbt_log_keys(cur, bp, ptr, numrecs); xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); } else { @@ -555,7 +555,7 @@ xfs_bmbt_insrec( (numrecs - ptr + 1) * sizeof(*rp)); rp[ptr - 1] = *recp; numrecs++; - INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + block->bb_numrecs = cpu_to_be16(numrecs); xfs_bmbt_log_recs(cur, bp, ptr, numrecs); } xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); @@ -825,7 +825,7 @@ xfs_bmbt_lookup( else krbase = XFS_BMAP_REC_IADDR(block, 1, cur); low = 1; - if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + if (!(high = be16_to_cpu(block->bb_numrecs))) { ASSERT(level == 0); cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; XFS_BMBT_TRACE_CURSOR(cur, EXIT); @@ -872,8 +872,8 @@ xfs_bmbt_lookup( * If ge search and we went off the end of the block, but it's * not the last block, we're in the wrong block. */ - if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && - INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) && + be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) { cur->bc_ptrs[0] = keyno; if ((error = xfs_bmbt_increment(cur, 0, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -888,7 +888,7 @@ xfs_bmbt_lookup( else if (dir == XFS_LOOKUP_LE && diff > 0) keyno--; cur->bc_ptrs[0] = keyno; - if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; } else { @@ -945,7 +945,7 @@ xfs_bmbt_lshift( return error; } #endif - if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; @@ -956,7 +956,7 @@ xfs_bmbt_lshift( return 0; } mp = cur->bc_mp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0, &lbp, XFS_BMAP_BTREE_REF))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; @@ -966,12 +966,12 @@ xfs_bmbt_lshift( XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } - if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; } - lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + lrecs = be16_to_cpu(left->bb_numrecs) + 1; if (level > 0) { lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); @@ -993,7 +993,7 @@ xfs_bmbt_lshift( *lrp = *rrp; xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); } - INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs); + left->bb_numrecs = cpu_to_be16(lrecs); xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); #ifdef DEBUG if (level > 0) @@ -1001,8 +1001,8 @@ xfs_bmbt_lshift( else xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); #endif - rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; - INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs); + rrecs = be16_to_cpu(right->bb_numrecs) - 1; + right->bb_numrecs = cpu_to_be16(rrecs); xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); if (level > 0) { #ifdef DEBUG @@ -1079,18 +1079,18 @@ xfs_bmbt_rshift( return error; } #endif - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; } - if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; } mp = cur->bc_mp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0, &rbp, XFS_BMAP_BTREE_REF))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; @@ -1100,26 +1100,26 @@ xfs_bmbt_rshift( XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } - if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; } if (level > 0) { - lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); - lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); + lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); #ifdef DEBUG - for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } } #endif - memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); #ifdef DEBUG if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -1128,21 +1128,21 @@ xfs_bmbt_rshift( #endif *rkp = *lkp; *rpp = *lpp; /* INT_: direct copy */ - xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); - xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); + xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); } else { - lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); rrp = XFS_BMAP_REC_IADDR(right, 1, cur); - memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); *rrp = *lrp; - xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rrp)); rkp = &key; } - INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&left->bb_numrecs, -1); xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); - INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + be16_add(&right->bb_numrecs, 1); #ifdef DEBUG if (level > 0) xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); @@ -1400,18 +1400,18 @@ xfs_bmdr_to_bmbt( xfs_bmbt_key_t *tkp; xfs_bmbt_ptr_t *tpp; - INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); - rblock->bb_level = dblock->bb_level; /* both in on-disk format */ - ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); - rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */ - INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); - INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); + rblock->bb_level = dblock->bb_level; + ASSERT(be16_to_cpu(rblock->bb_level) > 0); + rblock->bb_numrecs = dblock->bb_numrecs; + rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO); + rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO); dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); - dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ } @@ -1454,7 +1454,7 @@ xfs_bmbt_decrement( return error; } #endif - if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; @@ -1486,7 +1486,7 @@ xfs_bmbt_decrement( XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } - cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); } XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 1; @@ -1772,12 +1772,12 @@ xfs_bmbt_increment( return error; } #endif - if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 1; return 0; } - if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 0; return 0; @@ -1790,7 +1790,7 @@ xfs_bmbt_increment( return error; } #endif - if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) break; if (lev < cur->bc_nlevels - 1) xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); @@ -1980,20 +1980,6 @@ xfs_bmbt_lookup_ge( return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat); } -int /* error */ -xfs_bmbt_lookup_le( - xfs_btree_cur_t *cur, - xfs_fileoff_t off, - xfs_fsblock_t bno, - xfs_filblks_t len, - int *stat) /* success/failure */ -{ - cur->bc_rec.b.br_startoff = off; - cur->bc_rec.b.br_startblock = bno; - cur->bc_rec.b.br_blockcount = len; - return xfs_bmbt_lookup(cur, XFS_LOOKUP_LE, stat); -} - /* * Give the bmap btree a new root block. Copy the old broot contents * down into a real block and make the broot point to it. @@ -2066,23 +2052,23 @@ xfs_bmbt_newroot( bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); cblock = XFS_BUF_TO_BMBT_BLOCK(bp); *cblock = *block; - INT_MOD(block->bb_level, ARCH_CONVERT, +1); - INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + be16_add(&block->bb_level, 1); + block->bb_numrecs = cpu_to_be16(1); cur->bc_nlevels++; cur->bc_ptrs[level + 1] = 1; kp = XFS_BMAP_KEY_IADDR(block, 1, cur); ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); - memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); + memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp)); cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); #ifdef DEBUG - for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { + for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } } #endif - memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); + memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp)); #ifdef DEBUG if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, level))) { @@ -2091,7 +2077,7 @@ xfs_bmbt_newroot( } #endif INT_SET(*pp, ARCH_CONVERT, args.fsbno); - xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT), + xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs), cur->bc_private.b.whichfork); xfs_btree_setbuf(cur, level, bp); /* @@ -2099,8 +2085,8 @@ xfs_bmbt_newroot( * the root is at the right level. */ xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); - xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); - xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); + xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); XFS_BMBT_TRACE_CURSOR(cur, EXIT); *logflags |= XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); @@ -2368,18 +2354,18 @@ xfs_bmbt_to_bmdr( xfs_bmbt_key_t *tkp; xfs_bmbt_ptr_t *tpp; - ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC); - ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); - ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); - ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); - dblock->bb_level = rblock->bb_level; /* both in on-disk format */ - dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */ + ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); + ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO); + ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO); + ASSERT(be16_to_cpu(rblock->bb_level) > 0); + dblock->bb_level = rblock->bb_level; + dblock->bb_numrecs = rblock->bb_numrecs; dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); - dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ } diff --git a/libxfs/xfs_btree.c b/libxfs/xfs_btree.c index 8ae7a5584..d6d126fcc 100644 --- a/libxfs/xfs_btree.c +++ b/libxfs/xfs_btree.c @@ -62,11 +62,14 @@ xfs_btree_maxrecs( switch (cur->bc_btnum) { case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: - return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + return (int)XFS_ALLOC_BLOCK_MAXRECS( + be16_to_cpu(block->bb_h.bb_level), cur); case XFS_BTNUM_BMAP: - return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + return (int)XFS_BMAP_BLOCK_IMAXRECS( + be16_to_cpu(block->bb_h.bb_level), cur); case XFS_BTNUM_INO: - return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + return (int)XFS_INOBT_BLOCK_MAXRECS( + be16_to_cpu(block->bb_h.bb_level), cur); default: ASSERT(0); return 0; @@ -112,7 +115,7 @@ xfs_btree_check_key( k1 = ak1; k2 = ak2; - ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)); + ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)); break; } case XFS_BTNUM_CNT: { @@ -121,9 +124,9 @@ xfs_btree_check_key( k1 = ak1; k2 = ak2; - ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) || - (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) && - INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT))); + ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) || + (k1->ar_blockcount == k2->ar_blockcount && + be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock))); break; } case XFS_BTNUM_BMAP: { @@ -166,16 +169,16 @@ xfs_btree_check_lblock( mp = cur->bc_mp; lblock_ok = - INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && - INT_GET(block->bb_level, ARCH_CONVERT) == level && - INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && + be16_to_cpu(block->bb_level) == level && + be16_to_cpu(block->bb_numrecs) <= xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && block->bb_leftsib && - (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) && + (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && block->bb_rightsib && - (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT))); + (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib))); if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, XFS_RANDOM_BTREE_CHECK_LBLOCK))) { if (bp) @@ -223,8 +226,9 @@ xfs_btree_check_rec( r1 = ar1; r2 = ar2; - ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <= - INT_GET(r2->ar_startblock, ARCH_CONVERT)); + ASSERT(be32_to_cpu(r1->ar_startblock) + + be32_to_cpu(r1->ar_blockcount) <= + be32_to_cpu(r2->ar_startblock)); break; } case XFS_BTNUM_CNT: { @@ -233,9 +237,9 @@ xfs_btree_check_rec( r1 = ar1; r2 = ar2; - ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) || - (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) && - INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT))); + ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) || + (r1->ar_blockcount == r2->ar_blockcount && + be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock))); break; } case XFS_BTNUM_BMAP: { @@ -283,17 +287,17 @@ xfs_btree_check_sblock( agbp = cur->bc_private.a.agbp; agf = XFS_BUF_TO_AGF(agbp); - agflen = INT_GET(agf->agf_length, ARCH_CONVERT); + agflen = be32_to_cpu(agf->agf_length); sblock_ok = - INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && - INT_GET(block->bb_level, ARCH_CONVERT) == level && - INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && + be16_to_cpu(block->bb_level) == level && + be16_to_cpu(block->bb_numrecs) <= xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && - (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK || - INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) && + (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK || + be32_to_cpu(block->bb_leftsib) < agflen) && block->bb_leftsib && - (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK || - INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) && + (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK || + be32_to_cpu(block->bb_rightsib) < agflen) && block->bb_rightsib; if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK, @@ -324,7 +328,7 @@ xfs_btree_check_sptr( XFS_WANT_CORRUPTED_RETURN( level > 0 && ptr != NULLAGBLOCK && ptr != 0 && - ptr < INT_GET(agf->agf_length, ARCH_CONVERT)); + ptr < be32_to_cpu(agf->agf_length)); return 0; } @@ -563,15 +567,15 @@ xfs_btree_init_cursor( case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: agf = XFS_BUF_TO_AGF(agbp); - nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT); + nlevels = be32_to_cpu(agf->agf_levels[btnum]); break; case XFS_BTNUM_BMAP: ifp = XFS_IFORK_PTR(ip, whichfork); - nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; break; case XFS_BTNUM_INO: agi = XFS_BUF_TO_AGI(agbp); - nlevels = INT_GET(agi->agi_level, ARCH_CONVERT); + nlevels = be32_to_cpu(agi->agi_level); break; default: ASSERT(0); @@ -635,9 +639,9 @@ xfs_btree_islastblock( block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) - return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO; + return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; else - return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK; + return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; } /* @@ -665,7 +669,7 @@ xfs_btree_lastrec( /* * Set the ptr value to numrecs, that's the last record/key. */ - cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[level] = be16_to_cpu(block->bb_h.bb_numrecs); return 1; } @@ -797,38 +801,38 @@ xfs_btree_readahead_core( case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - INT_GET(a->bb_leftsib, ARCH_CONVERT), 1); + be32_to_cpu(a->bb_leftsib), 1); rval++; } - if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - INT_GET(a->bb_rightsib, ARCH_CONVERT), 1); + be32_to_cpu(a->bb_rightsib), 1); rval++; } break; case XFS_BTNUM_BMAP: b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) { - xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1); + if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1); rval++; } - if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { - xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1); + if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1); rval++; } break; case XFS_BTNUM_INO: i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, - INT_GET(i->bb_leftsib, ARCH_CONVERT), 1); + be32_to_cpu(i->bb_leftsib), 1); rval++; } - if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, - INT_GET(i->bb_rightsib, ARCH_CONVERT), 1); + be32_to_cpu(i->bb_rightsib), 1); rval++; } break; @@ -860,14 +864,14 @@ xfs_btree_setbuf( return; b = XFS_BUF_TO_BLOCK(bp); if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { - if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) + if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) + if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } else { - if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) + if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) + if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } } diff --git a/libxfs/xfs_ialloc.c b/libxfs/xfs_ialloc.c index 6208384d6..77e2d10a4 100644 --- a/libxfs/xfs_ialloc.c +++ b/libxfs/xfs_ialloc.c @@ -104,7 +104,6 @@ xfs_ialloc_ag_alloc( int blks_per_cluster; /* fs blocks per inode cluster */ xfs_btree_cur_t *cur; /* inode btree cursor */ xfs_daddr_t d; /* disk addr of buffer */ - xfs_agnumber_t agno; int error; xfs_buf_t *fbuf; /* new free inodes' buffer */ xfs_dinode_t *free; /* new free inode structure */ @@ -158,8 +157,8 @@ xfs_ialloc_ag_alloc( * Ideally they should be spaced out through the a.g. * For now, just allocate blocks up front. */ - args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); - args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + args.agbno = be32_to_cpu(agi->agi_root); + args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno); /* * Allocate a fixed-size extent of inodes. @@ -181,9 +180,9 @@ xfs_ialloc_ag_alloc( */ if (isaligned && args.fsbno == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, - INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno); + be32_to_cpu(agi->agi_seqno), args.agbno); if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && args.mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) @@ -238,7 +237,7 @@ xfs_ialloc_ag_alloc( /* * Get the block. */ - d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno + (j * blks_per_cluster)); fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, args.mp->m_bsize * blks_per_cluster, @@ -258,17 +257,17 @@ xfs_ialloc_ag_alloc( } xfs_trans_inode_alloc_buf(tp, fbuf); } - INT_MOD(agi->agi_count, ARCH_CONVERT, newlen); - INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen); - agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + be32_add(&agi->agi_count, newlen); + be32_add(&agi->agi_freecount, newlen); down_read(&args.mp->m_peraglock); - args.mp->m_perag[agno].pagi_freecount += newlen; + args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen; up_read(&args.mp->m_peraglock); - INT_SET(agi->agi_newino, ARCH_CONVERT, newino); + agi->agi_newino = cpu_to_be32(newino); /* * Insert records describing the new inode chunk into the btree. */ - cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno, + cur = xfs_btree_init_cursor(args.mp, tp, agbp, + be32_to_cpu(agi->agi_seqno), XFS_BTNUM_INO, (xfs_inode_t *)0, 0); for (thisino = newino; thisino < newino + newlen; @@ -508,7 +507,7 @@ xfs_dialloc( return 0; } agi = XFS_BUF_TO_AGI(agbp); - ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); } else { /* * Continue where we left off before. In this case, we @@ -516,12 +515,12 @@ xfs_dialloc( */ agbp = *IO_agbp; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); - ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(be32_to_cpu(agi->agi_freecount) > 0); } mp = tp->t_mountp; agcount = mp->m_sb.sb_agcount; - agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + agno = be32_to_cpu(agi->agi_seqno); tagno = agno; pagno = XFS_INO_TO_AGNO(mp, parent); pagino = XFS_INO_TO_AGINO(mp, parent); @@ -569,7 +568,7 @@ xfs_dialloc( * can commit the current transaction and call * us again where we left off. */ - ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + ASSERT(be32_to_cpu(agi->agi_freecount) > 0); *alloc_done = B_TRUE; *IO_agbp = agbp; *inop = NULLFSINO; @@ -600,7 +599,7 @@ nextag: if (error) goto nextag; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); } /* * Here with an allocation group that has a free inode. @@ -609,14 +608,14 @@ nextag: */ agno = tagno; *IO_agbp = NULL; - cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno), XFS_BTNUM_INO, (xfs_inode_t *)0, 0); /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. */ if (!pagino) - pagino = INT_GET(agi->agi_newino, ARCH_CONVERT); + pagino = be32_to_cpu(agi->agi_newino); #ifdef DEBUG if (cur->bc_nlevels == 1) { int freecount = 0; @@ -634,7 +633,7 @@ nextag: goto error0; } while (i == 1); - ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || XFS_FORCED_SHUTDOWN(mp)); } #endif @@ -793,9 +792,9 @@ nextag: * In a different a.g. from the parent. * See if the most recently allocated block has any free. */ - else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) { + else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { if ((error = xfs_inobt_lookup_eq(cur, - INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i))) + be32_to_cpu(agi->agi_newino), 0, 0, &i))) goto error0; if (i == 1 && (error = xfs_inobt_get_rec(cur, &rec.ir_startino, @@ -842,7 +841,7 @@ nextag: if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) goto error0; - INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1); + be32_add(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); down_read(&mp->m_peraglock); mp->m_perag[tagno].pagi_freecount--; @@ -862,7 +861,7 @@ nextag: if ((error = xfs_inobt_increment(cur, 0, &i))) goto error0; } while (i == 1); - ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || XFS_FORCED_SHUTDOWN(mp)); } #endif @@ -1075,7 +1074,7 @@ xfs_ialloc_log_agi( xfs_agi_t *agi; /* allocation group header */ agi = XFS_BUF_TO_AGI(bp); - ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); #endif /* * Compute byte offsets for the first and last fields. @@ -1117,9 +1116,8 @@ xfs_ialloc_read_agi( */ agi = XFS_BUF_TO_AGI(bp); agi_ok = - INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && - XFS_AGI_GOOD_VERSION( - INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, XFS_RANDOM_IALLOC_READ_AGI))) { XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, @@ -1129,16 +1127,15 @@ xfs_ialloc_read_agi( } pag = &mp->m_perag[agno]; if (!pag->pagi_init) { - pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT); + pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_init = 1; } else { /* * It's possible for these to be out of sync if * we are in the middle of a forced shutdown. */ - ASSERT(pag->pagi_freecount == - INT_GET(agi->agi_freecount, ARCH_CONVERT) - || XFS_FORCED_SHUTDOWN(mp)); + ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || + XFS_FORCED_SHUTDOWN(mp)); } #ifdef DEBUG diff --git a/libxfs/xfs_ialloc_btree.c b/libxfs/xfs_ialloc_btree.c index 152e08a9c..beaf5398e 100644 --- a/libxfs/xfs_ialloc_btree.c +++ b/libxfs/xfs_ialloc_btree.c @@ -51,6 +51,13 @@ xfs_inobt_insrec( int ptr; /* index in btree block for this rec */ xfs_inobt_rec_t *rp=NULL; /* pointer to btree records */ + /* + * GCC doesn't understand the (arguably complex) control flow in + * this function and complains about uninitialized structure fields + * without this. + */ + memset(&nrec, 0, sizeof(nrec)); + /* * If we made it to the root level, allocate a new root block * and we're done. @@ -78,7 +85,7 @@ xfs_inobt_insrec( */ bp = cur->bc_bufs[level]; block = XFS_BUF_TO_INOBT_BLOCK(bp); - numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + numrecs = be16_to_cpu(block->bb_numrecs); #ifdef DEBUG if ((error = xfs_btree_check_sblock(cur, block, level, bp))) return error; @@ -152,7 +159,7 @@ xfs_inobt_insrec( * At this point we know there's room for our new entry in the block * we're pointing at. */ - numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + numrecs = be16_to_cpu(block->bb_numrecs); if (level > 0) { /* * It's a non-leaf entry. Make a hole for the new data @@ -162,7 +169,7 @@ xfs_inobt_insrec( pp = XFS_INOBT_PTR_ADDR(block, 1, cur); #ifdef DEBUG for (i = numrecs; i >= ptr; i--) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) return error; } #endif @@ -178,9 +185,9 @@ xfs_inobt_insrec( return error; #endif kp[ptr - 1] = key; /* INT_: struct copy */ - INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + pp[ptr - 1] = cpu_to_be32(*bnop); numrecs++; - INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + block->bb_numrecs = cpu_to_be16(numrecs); xfs_inobt_log_keys(cur, bp, ptr, numrecs); xfs_inobt_log_ptrs(cur, bp, ptr, numrecs); } else { @@ -196,7 +203,7 @@ xfs_inobt_insrec( */ rp[ptr - 1] = *recp; /* INT_: struct copy */ numrecs++; - INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + block->bb_numrecs = cpu_to_be16(numrecs); xfs_inobt_log_recs(cur, bp, ptr, numrecs); } /* @@ -351,8 +358,8 @@ xfs_inobt_lookup( xfs_agi_t *agi; /* a.g. inode header */ agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); - agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); - agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + agno = be32_to_cpu(agi->agi_seqno); + agbno = be32_to_cpu(agi->agi_root); } /* * Iterate over each level in the btree, starting at the root. @@ -419,7 +426,7 @@ xfs_inobt_lookup( * Set low and high entry numbers, 1-based. */ low = 1; - if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + if (!(high = be16_to_cpu(block->bb_numrecs))) { /* * If the block is empty, the tree must * be an empty leaf. @@ -486,7 +493,7 @@ xfs_inobt_lookup( */ if (diff > 0 && --keyno < 1) keyno = 1; - agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, keyno, cur)); #ifdef DEBUG if ((error = xfs_btree_check_sptr(cur, agbno, level))) return error; @@ -505,8 +512,8 @@ xfs_inobt_lookup( * not the last block, we're in the wrong block. */ if (dir == XFS_LOOKUP_GE && - keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && - INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + keyno > be16_to_cpu(block->bb_numrecs) && + be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { int i; cur->bc_ptrs[0] = keyno; @@ -523,7 +530,7 @@ xfs_inobt_lookup( /* * Return if we succeeded or not. */ - if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) *stat = 0; else *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); @@ -569,7 +576,7 @@ xfs_inobt_lshift( /* * If we've got no left sibling then we can't shift an entry left. */ - if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -585,8 +592,8 @@ xfs_inobt_lshift( * Set up the left neighbor as "left". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, - XFS_INO_BTREE_REF))) + cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib), + 0, &lbp, XFS_INO_BTREE_REF))) return error; left = XFS_BUF_TO_INOBT_BLOCK(lbp); if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) @@ -594,11 +601,11 @@ xfs_inobt_lshift( /* * If it's full, it can't take another entry. */ - if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + if (be16_to_cpu(left->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { *stat = 0; return 0; } - nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + nrec = be16_to_cpu(left->bb_numrecs) + 1; /* * If non-leaf, copy a key and a ptr to the left block. */ @@ -610,7 +617,7 @@ xfs_inobt_lshift( lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); #ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) return error; #endif *lpp = *rpp; /* INT_: no-change copy */ @@ -628,7 +635,7 @@ xfs_inobt_lshift( /* * Bump and log left's numrecs, decrement and log right's numrecs. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + be16_add(&left->bb_numrecs, 1); xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); #ifdef DEBUG if (level > 0) @@ -636,26 +643,26 @@ xfs_inobt_lshift( else xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); #endif - INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&right->bb_numrecs, -1); xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); /* * Slide the contents of right down one entry. */ if (level > 0) { #ifdef DEBUG - for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), level))) return error; } #endif - memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); - xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); } else { - memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); - xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ rkp = &key; } @@ -707,7 +714,7 @@ xfs_inobt_newroot( args.tp = cur->bc_tp; args.mp = cur->bc_mp; args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, - INT_GET(agi->agi_root, ARCH_CONVERT)); + be32_to_cpu(agi->agi_root)); args.mod = args.minleft = args.alignment = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.minlen = args.maxlen = args.prod = 1; @@ -727,8 +734,8 @@ xfs_inobt_newroot( /* * Set the root data in the a.g. inode structure. */ - INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno); - INT_MOD(agi->agi_level, ARCH_CONVERT, 1); + agi->agi_root = cpu_to_be32(args.agbno); + be32_add(&agi->agi_level, 1); xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); /* @@ -743,14 +750,14 @@ xfs_inobt_newroot( if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) return error; #endif - if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if (be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { /* * Our block is left, pick up the right block. */ lbp = bp; lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); left = block; - rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + rbno = be32_to_cpu(left->bb_rightsib); if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, rbno, 0, &rbp, XFS_INO_BTREE_REF))) return error; @@ -767,7 +774,7 @@ xfs_inobt_newroot( rbp = bp; rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); right = block; - lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + lbno = be32_to_cpu(right->bb_leftsib); if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, lbno, 0, &lbp, XFS_INO_BTREE_REF))) return error; @@ -781,18 +788,18 @@ xfs_inobt_newroot( /* * Fill in the new block's btree header and log it. */ - INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); - INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); - INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); - INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); - INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); + new->bb_level = cpu_to_be16(cur->bc_nlevels); + new->bb_numrecs = cpu_to_be16(2); + new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); + new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); /* * Fill in the key data in the new root. */ kp = XFS_INOBT_KEY_ADDR(new, 1, cur); - if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + if (be16_to_cpu(left->bb_level) > 0) { kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ } else { @@ -806,8 +813,8 @@ xfs_inobt_newroot( * Fill in the pointer data in the new root. */ pp = XFS_INOBT_PTR_ADDR(new, 1, cur); - INT_SET(pp[0], ARCH_CONVERT, lbno); - INT_SET(pp[1], ARCH_CONVERT, rbno); + pp[0] = cpu_to_be32(lbno); + pp[1] = cpu_to_be32(rbno); xfs_inobt_log_ptrs(cur, nbp, 1, 2); /* * Fix up the cursor. @@ -856,7 +863,7 @@ xfs_inobt_rshift( /* * If we've got no right sibling then we can't shift an entry right. */ - if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -864,7 +871,7 @@ xfs_inobt_rshift( * If the cursor entry is the one that would be moved, don't * do it... it's too complicated. */ - if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { *stat = 0; return 0; } @@ -872,8 +879,8 @@ xfs_inobt_rshift( * Set up the right neighbor as "right". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, - XFS_INO_BTREE_REF))) + cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), + 0, &rbp, XFS_INO_BTREE_REF))) return error; right = XFS_BUF_TO_INOBT_BLOCK(rbp); if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) @@ -881,7 +888,7 @@ xfs_inobt_rshift( /* * If it's full, it can't take another entry. */ - if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + if (be16_to_cpu(right->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { *stat = 0; return 0; } @@ -890,41 +897,41 @@ xfs_inobt_rshift( * copy the last left block entry to the hole. */ if (level > 0) { - lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); - lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lkp = XFS_INOBT_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); + lpp = XFS_INOBT_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); #ifdef DEBUG - for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) return error; } #endif - memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); #ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) return error; #endif *rkp = *lkp; /* INT_: no change copy */ *rpp = *lpp; /* INT_: no change copy */ - xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); - xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); + xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); } else { - lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lrp = XFS_INOBT_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); *rrp = *lrp; - xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ rkp = &key; } /* * Decrement and log left's numrecs, bump and log right's numrecs. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + be16_add(&left->bb_numrecs, -1); xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + be16_add(&right->bb_numrecs, 1); #ifdef DEBUG if (level > 0) xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); @@ -1016,17 +1023,17 @@ xfs_inobt_split( /* * Fill in the btree header for the new block. */ - INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); - right->bb_level = left->bb_level; /* INT_: direct copy */ - INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; + right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); /* * Make sure that if there's an odd number of entries now, that * each new block will have the same number of entries. */ - if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && - cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) - INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); - i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + if ((be16_to_cpu(left->bb_numrecs) & 1) && + cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) + be16_add(&right->bb_numrecs, 1); + i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; /* * For non-leaf blocks, copy keys and addresses over to the new block. */ @@ -1036,15 +1043,15 @@ xfs_inobt_split( rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); #ifdef DEBUG - for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { - if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { + if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) return error; } #endif - memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); - memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); - xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); - xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); + memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); + xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); *keyp = *rkp; } /* @@ -1053,36 +1060,36 @@ xfs_inobt_split( else { lrp = XFS_INOBT_REC_ADDR(left, i, cur); rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); - xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ } /* * Find the left block number by looking in the buffer. * Adjust numrecs, sibling pointers. */ - INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); - right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ - INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno); - INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); + right->bb_rightsib = left->bb_rightsib; + left->bb_rightsib = cpu_to_be32(args.agbno); + right->bb_leftsib = cpu_to_be32(lbno); xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); /* * If there's a block to the new block's right, make that block * point back to right instead of to left. */ - if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { xfs_inobt_block_t *rrblock; /* rr btree block */ xfs_buf_t *rrbp; /* buffer for rrblock */ if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, - INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, + be32_to_cpu(right->bb_rightsib), 0, &rrbp, XFS_INO_BTREE_REF))) return error; rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) return error; - INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno); + rrblock->bb_leftsib = cpu_to_be32(args.agbno); xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); } /* @@ -1090,9 +1097,9 @@ xfs_inobt_split( * If it's just pointing past the last entry in left, then we'll * insert there, so don't change anything in that case. */ - if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { xfs_btree_setbuf(cur, level, rbp); - cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); } /* * If there are more levels, we'll need another cursor which refers @@ -1190,7 +1197,7 @@ xfs_inobt_decrement( /* * If we just went off the left edge of the tree, return failure. */ - if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1219,7 +1226,7 @@ xfs_inobt_decrement( xfs_agblock_t agbno; /* block number of btree block */ xfs_buf_t *bp; /* buffer containing btree block */ - agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.i.agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) @@ -1229,7 +1236,7 @@ xfs_inobt_decrement( block = XFS_BUF_TO_INOBT_BLOCK(bp); if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) return error; - cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); } *stat = 1; return 0; @@ -1264,7 +1271,7 @@ xfs_inobt_get_rec( /* * Off the right end or left end, return failure. */ - if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { *stat = 0; return 0; } @@ -1312,14 +1319,14 @@ xfs_inobt_increment( * Increment the ptr at this level. If we're still in the block * then we're done. */ - if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { *stat = 1; return 0; } /* * If we just went off the right edge of the tree, return failure. */ - if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { *stat = 0; return 0; } @@ -1334,7 +1341,7 @@ xfs_inobt_increment( if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) return error; #endif - if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) break; /* * Read-ahead the right block, we're going to read it @@ -1354,7 +1361,7 @@ xfs_inobt_increment( lev > level; ) { xfs_agblock_t agbno; /* block number of btree block */ - agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.i.agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) diff --git a/mkfs/xfs_mkfs.c b/mkfs/xfs_mkfs.c index 0917dfc02..5bfad6a99 100644 --- a/mkfs/xfs_mkfs.c +++ b/mkfs/xfs_mkfs.c @@ -2014,13 +2014,12 @@ an AG size that is one stripe unit smaller, for example %llu.\n"), sbp->sb_logsectlog = 0; sbp->sb_logsectsize = 0; } - sbp->sb_versionnum = - XFS_SB_VERSION_MKFS(iaflag, dsunit != 0, extent_flagging, + sbp->sb_features2 = XFS_SB_VERSION2_MKFS(0, attrversion == 2, 0); + sbp->sb_versionnum = XFS_SB_VERSION_MKFS( + iaflag, dsunit != 0, extent_flagging, dirversion == 2, logversion == 2, attrversion == 1, (sectorsize != BBSIZE || lsectorsize != BBSIZE), - (0/*mmr*/|| 0/*lazy_sb_counters*/ || attrversion == 2)); - sbp->sb_features2 = XFS_SB_VERSION2_MKFS(0/*mmr*/,0/*lazy_sb_counters*/, - attrversion == 2); + sbp->sb_features2 != 0); /* * Zero out the beginning of the device, to obliterate any old diff --git a/mkfs/xfs_mkfs.h b/mkfs/xfs_mkfs.h index 5782add63..1ab85fdf2 100644 --- a/mkfs/xfs_mkfs.h +++ b/mkfs/xfs_mkfs.h @@ -18,6 +18,25 @@ #ifndef __XFS_MKFS_H__ #define __XFS_MKFS_H__ +#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dir2,log2,attr1,sflag,more) (\ + ((ia)||(dia)||(extflag)||(dir2)||(log2)||(attr1)||(sflag)||(more)) ? \ + ( XFS_SB_VERSION_4 | \ + ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \ + ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \ + ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \ + ((dir2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \ + ((log2) ? XFS_SB_VERSION_LOGV2BIT : 0) | \ + ((attr1) ? XFS_SB_VERSION_ATTRBIT : 0) | \ + ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \ + ((more) ? XFS_SB_VERSION_MOREBITSBIT : 0) | \ + 0 ) : XFS_SB_VERSION_1 ) + +#define XFS_SB_VERSION2_MKFS(lazycount, attr2, parent) (\ + ((lazycount) ? XFS_SB_VERSION2_LAZYSBCOUNTBIT : 0) | \ + ((attr2) ? XFS_SB_VERSION2_ATTR2BIT : 0) | \ + ((parent) ? XFS_SB_VERSION2_PARENTBIT : 0) | \ + 0 ) + #define XFS_DFL_BLOCKSIZE_LOG 12 /* 4096 byte blocks */ #define XFS_DINODE_DFL_LOG 8 /* 256 byte inodes */ #define XFS_MIN_DATA_BLOCKS 100 @@ -30,7 +49,7 @@ #define XFS_MIN_LOG_FACTOR 3 /* min log size factor */ #define XFS_DFL_LOG_FACTOR 16 /* default log size, factor */ /* with max trans reservation */ -#define XFS_MAX_INODE_SIG_BITS 32 /* most significant bits in an +#define XFS_MAX_INODE_SIG_BITS 32 /* most significant bits in an * inode number that we'll * accept w/o warnings */ @@ -54,7 +73,7 @@ extern char *setup_proto (char *fname); extern void parse_proto (xfs_mount_t *mp, struct fsxattr *fsx, char **pp); extern void res_failed (int err); -/* maxtrres.c */ +/* maxtrres.c */ extern int max_trans_res (int dirversion, int sectorlog, int blocklog, int inodelog, int dirblocklog);