/*
- * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <xfs.h>
* Routines to implement directories as Btrees of hashed names.
*/
+static int xfs_error_level;
/*========================================================================
* Routines used for growing the Btree.
return(error);
ASSERT(bp != NULL);
node = bp->data;
- INT_ZERO(node->hdr.info.forw, ARCH_CONVERT);
- INT_ZERO(node->hdr.info.back, ARCH_CONVERT);
+ node->hdr.info.forw = 0;
+ node->hdr.info.back = 0;
INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC);
- INT_ZERO(node->hdr.info.pad, ARCH_CONVERT);
- INT_ZERO(node->hdr.count, ARCH_CONVERT);
+ node->hdr.info.pad = 0;
+ node->hdr.count = 0;
INT_SET(node->hdr.level, ARCH_CONVERT, level);
xfs_da_log_buf(tp, bp,
*/
switch (oldblk->magic) {
case XFS_ATTR_LEAF_MAGIC:
-#ifndef __KERNEL__
- return(ENOTTY);
-#else
error = xfs_attr_leaf_split(state, oldblk, newblk);
if ((error != 0) && (error != ENOSPC)) {
return(error); /* GROT: attr is inconsistent */
return(error); /* GROT: attr inconsistent */
addblk = newblk;
break;
-#endif
case XFS_DIR_LEAF_MAGIC:
ASSERT(XFS_DIR_IS_V1(state->mp));
error = xfs_dir_leaf_split(state, oldblk, newblk);
*/
node = oldblk->bp->data;
- if (!INT_ISZERO(node->hdr.info.forw, ARCH_CONVERT)) {
+ if (node->hdr.info.forw) {
if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) {
bp = addblk->bp;
} else {
size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] -
(char *)leaf);
}
- bcopy(oldroot, node, size);
+ memcpy(node, oldroot, size);
xfs_da_log_buf(tp, bp, 0, size - 1);
xfs_da_buf_done(blk1->bp);
blk1->bp = bp;
INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval);
INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno);
INT_SET(node->hdr.count, ARCH_CONVERT, 2);
- if (XFS_DIR_IS_V2(mp)) {
+
+#ifdef DEBUG
+ if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) {
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
blk1->blkno < mp->m_dirfreeblk);
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
blk2->blkno < mp->m_dirfreeblk);
}
+#endif
+
/* Header is already logged by xfs_da_node_create */
xfs_da_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, node->btree,
/*
* With V2 the extra block is data or freespace.
*/
- useextra = state->extravalid && XFS_DIR_IS_V1(state->mp);
+ useextra = state->extravalid && (XFS_DIR_IS_V1(state->mp) ||
+ state->args->whichfork == XFS_ATTR_FORK);
newcount = 1 + useextra;
/*
* Do we have to split the node?
*/
- if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > XFS_DA_NODE_ENTRIES(state->mp)) {
+ if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) {
/*
* Allocate a new node, add to the doubly linked chain of
* nodes, then move some of our excess entries into it.
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return(error); /* GROT: dir is inconsistent */
-
+
error = xfs_da_node_create(state->args, blkno, treelevel,
&newblk->bp, state->args->whichfork);
if (error)
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[0];
btree_d = &node2->btree[count];
- ovbcopy(btree_s, btree_d, tmp);
+ memmove(btree_d, btree_s, tmp);
}
/*
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count];
btree_d = &node2->btree[0];
- bcopy(btree_s, btree_d, tmp);
+ memcpy(btree_d, btree_s, tmp);
INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count));
} else {
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[0];
btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)];
- bcopy(btree_s, btree_d, tmp);
+ memcpy(btree_d, btree_s, tmp);
INT_MOD(node1->hdr.count, ARCH_CONVERT, count);
xfs_da_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[count];
btree_d = &node2->btree[0];
- ovbcopy(btree_s, btree_d, tmp);
+ memmove(btree_d, btree_s, tmp);
INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count));
}
btree = &node->btree[ oldblk->index ];
if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) {
tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree);
- ovbcopy(btree, btree + 1, tmp);
+ memmove(btree + 1, btree, tmp);
}
INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval);
INT_SET(btree->before, ARCH_CONVERT, newblk->blkno);
*/
switch (drop_blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
-#ifndef __KERNEL__
- error = ENOTTY;
-#else
error = xfs_attr_leaf_toosmall(state, &action);
-#endif
if (error)
return(error);
if (action == 0)
return(0);
-#ifdef __KERNEL__
xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
-#endif
break;
case XFS_DIR_LEAF_MAGIC:
ASSERT(XFS_DIR_IS_V1(state->mp));
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
oldroot = root_blk->bp->data;
ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
- ASSERT(INT_ISZERO(oldroot->hdr.info.forw, ARCH_CONVERT));
- ASSERT(INT_ISZERO(oldroot->hdr.info.back, ARCH_CONVERT));
+ ASSERT(!oldroot->hdr.info.forw);
+ ASSERT(!oldroot->hdr.info.back);
/*
* If the root has more than one child, then don't do anything.
} else {
ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
}
- ASSERT(INT_GET(blkinfo->forw, ARCH_CONVERT) == 0);
- ASSERT(INT_GET(blkinfo->back, ARCH_CONVERT) == 0);
- bcopy(bp->data, root_blk->bp->data, state->blocksize);
+ ASSERT(!blkinfo->forw);
+ ASSERT(!blkinfo->back);
+ memcpy(root_blk->bp->data, bp->data, state->blocksize);
xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
node = (xfs_da_intnode_t *)info;
count = INT_GET(node->hdr.count, ARCH_CONVERT);
- if (count > (XFS_DA_NODE_ENTRIES(state->mp) >> 1)) {
- *action = 0; /* blk over 50%, dont try to join */
- return(0); /* blk over 50%, dont try to join */
+ if (count > (state->node_ents >> 1)) {
+ *action = 0; /* blk over 50%, don't try to join */
+ return(0); /* blk over 50%, don't try to join */
}
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
- forward = (!INT_ISZERO(info->forw, ARCH_CONVERT));
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ forward = info->forw;
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
ASSERT(bp != NULL);
node = (xfs_da_intnode_t *)info;
- count = XFS_DA_NODE_ENTRIES(state->mp);
- count -= XFS_DA_NODE_ENTRIES(state->mp) >> 2;
+ count = state->node_ents;
+ count -= state->node_ents >> 2;
count -= INT_GET(node->hdr.count, ARCH_CONVERT);
node = bp->data;
ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
return(0);
}
-
/*
* Walk back up the tree adjusting hash values as necessary,
* when we stop making changes, return.
level = path->active-1;
blk = &path->blk[ level ];
switch (blk->magic) {
-#ifdef __KERNEL__
case XFS_ATTR_LEAF_MAGIC:
lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
if (count == 0)
return;
break;
-#endif
case XFS_DIR_LEAF_MAGIC:
ASSERT(XFS_DIR_IS_V1(state->mp));
lasthash = xfs_dir_leaf_lasthash(blk->bp, &count);
if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash)
break;
blk->hashval = lasthash;
- INT_SET(btree->hashval, ARCH_CONVERT, lasthash);
+ INT_SET(btree->hashval, ARCH_CONVERT, lasthash);
xfs_da_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
}
}
-
-
/*
* Remove an entry from an intermediate node.
*/
if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) {
tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
- ovbcopy(btree + 1, btree, tmp);
+ memmove(btree, btree + 1, tmp);
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, tmp));
btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ];
}
- bzero((char *)btree, sizeof(xfs_da_node_entry_t));
+ memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
INT_MOD(node->hdr.count, ARCH_CONVERT, -1);
{
btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ];
tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
- ovbcopy(&save_node->btree[0], btree, tmp);
+ memmove(btree, &save_node->btree[0], tmp);
btree = &save_node->btree[0];
xfs_da_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, btree,
* Move all the B-tree elements from drop_blk to save_blk.
*/
tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
- bcopy(&drop_node->btree[0], btree, tmp);
+ memcpy(btree, &drop_node->btree[0], tmp);
INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT));
xfs_da_log_buf(tp, save_blk->bp,
save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
}
-
/*========================================================================
* Routines used for finding things in the Btree.
*========================================================================*/
xfs_da_args_t *args;
args = state->args;
+
/*
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
* Read the next node down in the tree.
*/
blk->blkno = blkno;
- error = xfs_da_read_buf(state->args->trans, state->args->dp,
- blkno, -1, &blk->bp,
- state->args->whichfork);
+ error = xfs_da_read_buf(args->trans, args->dp, blkno,
+ -1, &blk->bp, args->whichfork);
if (error) {
blk->blkno = 0;
state->path.active--;
return(error);
}
- ASSERT(blk->bp != NULL);
curr = blk->bp->data;
ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC ||
INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
*/
max = INT_GET(node->hdr.count, ARCH_CONVERT);
probe = span = max / 2;
- hashval = state->args->hashval;
+ hashval = args->hashval;
for (btree = &node->btree[probe]; span > 4;
btree = &node->btree[probe]) {
span /= 2;
blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT);
} else {
blk->index = probe;
- blkno = INT_GET(btree->before, ARCH_CONVERT);
+ blkno = INT_GET(btree->before, ARCH_CONVERT);
}
}
-#ifdef __KERNEL__
else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) {
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
-#endif
else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) {
blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL);
break;
for (;;) {
if (blk->magic == XFS_DIR_LEAF_MAGIC) {
ASSERT(XFS_DIR_IS_V1(state->mp));
- retval = xfs_dir_leaf_lookup_int(blk->bp, state->args,
+ retval = xfs_dir_leaf_lookup_int(blk->bp, args,
&blk->index);
} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
ASSERT(XFS_DIR_IS_V2(state->mp));
- retval = xfs_dir2_leafn_lookup_int(blk->bp, state->args,
+ retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
}
-#ifdef __KERNEL__
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
- retval = xfs_attr_leaf_lookup_int(blk->bp, state->args);
- blk->index = state->args->index;
- state->args->blkno = blk->blkno;
+ retval = xfs_attr_leaf_lookup_int(blk->bp, args);
+ blk->index = args->index;
+ args->blkno = blk->blkno;
}
-#endif
if (((retval == ENOENT) || (retval == ENOATTR)) &&
- (blk->hashval == state->args->hashval)) {
+ (blk->hashval == args->hashval)) {
error = xfs_da_path_shift(state, &state->path, 1, 1,
&retval);
if (error)
if (retval == 0) {
continue;
}
-#ifdef __KERNEL__
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
/* path_shift() gives ENOENT */
retval = XFS_ERROR(ENOATTR);
}
-#endif
}
break;
}
*result = retval;
- return(0);
+ return(0);
}
-
/*========================================================================
* Utility routines.
*========================================================================*/
ASSERT(old_blk->magic == new_blk->magic);
switch (old_blk->magic) {
-#ifdef __KERNEL__
case XFS_ATTR_LEAF_MAGIC:
before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
break;
-#endif
case XFS_DIR_LEAF_MAGIC:
ASSERT(XFS_DIR_IS_V1(state->mp));
before = xfs_dir_leaf_order(old_blk->bp, new_blk->bp);
return(0);
}
-
/*
* Compare two intermediate nodes for "order".
*/
node2 = node2_bp->data;
ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) &&
(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC));
- if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) &&
+ if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) &&
((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) <
INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) ||
(INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
return(0);
}
-
/*
* Pick up the last hashvalue from an intermediate node.
*/
ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
if (count)
*count = INT_GET(node->hdr.count, ARCH_CONVERT);
- if (INT_GET(node->hdr.count, ARCH_CONVERT) == 0)
+ if (!node->hdr.count)
return(0);
return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT));
}
ASSERT(level == path->active-1);
blk->index = 0;
switch(blk->magic) {
-#ifdef __KERNEL__
case XFS_ATTR_LEAF_MAGIC:
blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
NULL);
break;
-#endif
case XFS_DIR_LEAF_MAGIC:
ASSERT(XFS_DIR_IS_V1(state->mp));
blk->hashval = xfs_dir_leaf_lasthash(blk->bp,
* This is implemented with some source-level loop unrolling.
*/
xfs_dahash_t
-xfs_da_hashname(char *name, int namelen)
+xfs_da_hashname(const uchar_t *name, int namelen)
{
xfs_dahash_t hash;
-#define ROTL(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
-#ifdef SLOWVERSION
- /*
- * This is the old one-byte-at-a-time version.
- */
- for (hash = 0; namelen > 0; namelen--) {
- hash = *name++ ^ ROTL(hash, 7);
- }
- return(hash);
-#else
/*
* Do four characters at a time as long as we can.
*/
- for (hash = 0; namelen >= 4; namelen -= 4, name += 4) {
+ for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
- (name[3] << 0) ^ ROTL(hash, 7 * 4);
- }
+ (name[3] << 0) ^ rol32(hash, 7 * 4);
+
/*
* Now do the rest of the characters.
*/
switch (namelen) {
case 3:
return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
- ROTL(hash, 7 * 3);
+ rol32(hash, 7 * 3);
case 2:
- return (name[0] << 7) ^ (name[1] << 0) ^ ROTL(hash, 7 * 2);
+ return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
case 1:
- return (name[0] << 0) ^ ROTL(hash, 7 * 1);
- case 0:
+ return (name[0] << 0) ^ rol32(hash, 7 * 1);
+ default: /* case 0: */
return hash;
}
- /* NOTREACHED */
-#endif
-#undef ROTL
- return 0; /* keep gcc happy */
}
/*
return 0;
}
-
/*
* Ick. We need to always be able to remove a btree block, even
* if there's no space reservation because the filesystem is full.
error = xfs_bmap_last_offset(tp, ip, &lastoff, w);
if (error)
return error;
- if (lastoff == 0)
+ if (unlikely(lastoff == 0)) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
+ mp);
return XFS_ERROR(EFSCORRUPTED);
+ }
/*
* Read the last block in the btree space.
*/
/*
* Copy the last block into the dead buffer and log it.
*/
- bcopy(last_buf->data, dead_buf->data, mp->m_dirblksize);
+ memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
dead_info = dead_buf->data;
/*
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
goto done;
sib_info = sib_buf->data;
- if (INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno ||
- INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT)) {
+ if (unlikely(
+ INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno ||
+ INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
goto done;
sib_info = sib_buf->data;
- if ( INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno
+ if (unlikely(
+ INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno
|| INT_GET(sib_info->magic, ARCH_CONVERT)
- != INT_GET(dead_info->magic, ARCH_CONVERT)) {
+ != INT_GET(dead_info->magic, ARCH_CONVERT))) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
goto done;
par_node = par_buf->data;
- if (INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC ||
- (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1)) {
+ if (unlikely(
+ INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC ||
+ (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash;
entno++)
continue;
- if (entno == INT_GET(par_node->hdr.count, ARCH_CONVERT)) {
+ if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT);
xfs_da_brelse(tp, par_buf);
par_buf = NULL;
- if (par_blkno == 0) {
+ if (unlikely(par_blkno == 0)) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
goto done;
par_node = par_buf->data;
- if (INT_GET(par_node->hdr.level, ARCH_CONVERT) != level ||
- INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) {
+ if (unlikely(
+ INT_GET(par_node->hdr.level, ARCH_CONVERT) != level ||
+ INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) {
+ XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
+ XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
int caller,
inst_t *ra)
{
- xfs_buf_t *bp = 0;
+ xfs_buf_t *bp = NULL;
xfs_buf_t **bplist;
int error=0;
int i;
nmap = 1;
}
} else {
- xfs_fsblock_t firstblock;
-
- firstblock = NULLFSBLOCK;
- mapp = kmem_alloc(sizeof(*mapp) * nfsb,
- trans ? KM_SLEEP : KM_SLEEP_IO);
+ mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
nmap = nfsb;
if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
nfsb,
XFS_BMAPI_METADATA |
XFS_BMAPI_AFLAG(whichfork),
- &firstblock, 0, mapp, &nmap, NULL)))
+ NULL, 0, mapp, &nmap, NULL)))
goto exit0;
}
} else {
}
if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
+ if (unlikely(error == EFSCORRUPTED)) {
+ if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ int i;
+ cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
+ (long long)bno);
+ cmn_err(CE_ALERT, "dir: inode %lld\n",
+ (long long)dp->i_ino);
+ for (i = 0; i < nmap; i++) {
+ cmn_err(CE_ALERT,
+ "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n",
+ i,
+ (long long)mapp[i].br_startoff,
+ (long long)mapp[i].br_startblock,
+ (long long)mapp[i].br_blockcount,
+ mapp[i].br_state);
+ }
+ }
+ XFS_ERROR_REPORT("xfs_da_do_buf(1)",
+ XFS_ERRLEVEL_LOW, mp);
+ }
goto exit0;
}
if (caller != 3 && nmap > 1) {
error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
break;
case 1:
-#ifndef __KERNEL__
case 2:
-#endif
bp = NULL;
error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
mappedbno, nmapped, 0, &bp);
break;
-#ifdef __KERNEL__
case 3:
xfs_baread(mp->m_ddev_targp, mappedbno, nmapped);
error = 0;
bp = NULL;
break;
-#endif
}
if (error) {
if (bp)
continue;
if (caller == 1) {
if (whichfork == XFS_ATTR_FORK) {
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
+ XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
XFS_ATTR_BTREE_REF);
} else {
XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
xfs_dir2_data_t *data;
xfs_dir2_free_t *free;
xfs_da_blkinfo_t *info;
+ uint magic, magic1;
info = rbp->data;
data = rbp->data;
free = rbp->data;
- if (XFS_TEST_ERROR((INT_GET(info->magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) &&
- (INT_GET(info->magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) &&
- (INT_GET(info->magic, ARCH_CONVERT) != XFS_ATTR_LEAF_MAGIC) &&
- (INT_GET(info->magic, ARCH_CONVERT) != XFS_DIR2_LEAF1_MAGIC) &&
- (INT_GET(info->magic, ARCH_CONVERT) != XFS_DIR2_LEAFN_MAGIC) &&
- (INT_GET(data->hdr.magic, ARCH_CONVERT) != XFS_DIR2_BLOCK_MAGIC) &&
- (INT_GET(data->hdr.magic, ARCH_CONVERT) != XFS_DIR2_DATA_MAGIC) &&
+ magic = INT_GET(info->magic, ARCH_CONVERT);
+ magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT);
+ if (unlikely(
+ XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
+ (magic != XFS_DIR_LEAF_MAGIC) &&
+ (magic != XFS_ATTR_LEAF_MAGIC) &&
+ (magic != XFS_DIR2_LEAF1_MAGIC) &&
+ (magic != XFS_DIR2_LEAFN_MAGIC) &&
+ (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
+ (magic1 != XFS_DIR2_DATA_MAGIC) &&
(INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC),
mp, XFS_ERRTAG_DA_READ_BUF,
- XFS_RANDOM_DA_READ_BUF)) {
+ XFS_RANDOM_DA_READ_BUF))) {
xfs_buftrace("DA READ ERROR", rbp->bps[0]);
+ XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
+ XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
xfs_da_brelse(trans, rbp);
nbplist = 0;
if (state->extravalid && state->extrablk.bp)
xfs_da_buf_done(state->extrablk.bp);
#ifdef DEBUG
- bzero((char *)state, sizeof(*state));
+ memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
kmem_zone_free(xfs_da_state_zone, state);
}
STATIC xfs_dabuf_t *
xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
{
- xfs_buf_t *bp;
+ xfs_buf_t *bp;
xfs_dabuf_t *dabuf;
int i;
int off;
dabuf->dirty = 0;
#ifdef XFS_DABUF_DEBUG
dabuf->ra = ra;
- dabuf->dev = XFS_BUF_TARGET(bps[0]);
+ dabuf->target = XFS_BUF_TARGET(bps[0]);
dabuf->blkno = XFS_BUF_ADDR(bps[0]);
#endif
if (nbuf == 1) {
dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
bp = bps[i];
- bcopy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
+ memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
XFS_BUF_COUNT(bp));
}
}
#ifdef XFS_DABUF_DEBUG
{
- int s;
+ SPLDECL(s);
xfs_dabuf_t *p;
s = mutex_spinlock(&xfs_dabuf_global_lock);
for (p = xfs_dabuf_global_list; p; p = p->next) {
ASSERT(p->blkno != dabuf->blkno ||
- p->dev != dabuf->dev);
+ p->target != dabuf->target);
}
dabuf->prev = NULL;
if (xfs_dabuf_global_list)
xfs_da_buf_clean(xfs_dabuf_t *dabuf)
{
xfs_buf_t *bp;
- int i;
- int off;
+ int i;
+ int off;
if (dabuf->dirty) {
ASSERT(dabuf->nbuf > 1);
for (i = off = 0; i < dabuf->nbuf;
i++, off += XFS_BUF_COUNT(bp)) {
bp = dabuf->bps[i];
- bcopy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
+ memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
XFS_BUF_COUNT(bp));
}
}
void
xfs_da_buf_done(xfs_dabuf_t *dabuf)
{
- ASSERT(dabuf);
+ ASSERT(dabuf);
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if (dabuf->dirty)
xfs_da_buf_clean(dabuf);
kmem_free(dabuf->data, BBTOB(dabuf->bbcount));
#ifdef XFS_DABUF_DEBUG
{
- int s;
+ SPLDECL(s);
s = mutex_spinlock(&xfs_dabuf_global_lock);
if (dabuf->prev)
dabuf->next->prev = dabuf->prev;
mutex_spinunlock(&xfs_dabuf_global_lock, s);
}
- bzero(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf));
+ memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
#endif
if (dabuf->nbuf == 1)
kmem_zone_free(xfs_dabuf_zone, dabuf);
xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
{
xfs_buf_t *bp;
- uint f;
- int i;
- uint l;
- int off;
+ uint f;
+ int i;
+ uint l;
+ int off;
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if (dabuf->nbuf == 1) {
l = last;
if (f <= l)
xfs_trans_log_buf(tp, bp, f - off, l - off);
- /*
+ /*
* B_DONE is set by xfs_trans_log buf.
* If we don't set it on a new buffer (get not read)
* then if we don't put anything in the buffer it won't
{
xfs_buf_t *bp;
xfs_buf_t **bplist;
- int i;
- int nbuf;
+ int i;
+ int nbuf;
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if ((nbuf = dabuf->nbuf) == 1) {
bp = dabuf->bps[0];
} else {
bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- bcopy(dabuf->bps, bplist, nbuf * sizeof(*bplist));
+ memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
}
xfs_da_buf_done(dabuf);
for (i = 0; i < nbuf; i++)
{
xfs_buf_t *bp;
xfs_buf_t **bplist;
- int i;
- int nbuf;
+ int i;
+ int nbuf;
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if ((nbuf = dabuf->nbuf) == 1) {
bp = dabuf->bps[0];
} else {
bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- bcopy(dabuf->bps, bplist, nbuf * sizeof(*bplist));
+ memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
}
xfs_da_buf_done(dabuf);
for (i = 0; i < nbuf; i++)
if (bplist != &bp)
kmem_free(bplist, nbuf * sizeof(*bplist));
}
+
+/*
+ * Get the first daddr from a dabuf.
+ */
+xfs_daddr_t
+xfs_da_blkno(xfs_dabuf_t *dabuf)
+{
+ ASSERT(dabuf->nbuf);
+ ASSERT(dabuf->data);
+ return XFS_BUF_ADDR(dabuf->bps[0]);
+}