}
}
-static int
-_xfs_buf_alloc(
- struct xfs_buftarg *target,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags,
- struct xfs_buf **bpp)
-{
- struct xfs_buf *bp;
- int error;
- int i;
-
- *bpp = NULL;
- bp = kmem_cache_zalloc(xfs_buf_cache,
- GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
-
- /*
- * We don't want certain flags to appear in b_flags unless they are
- * specifically set by later operations on the buffer.
- */
- flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
-
- /*
- * A new buffer is held and locked by the owner. This ensures that the
- * buffer is owned by the caller and racing RCU lookups right after
- * inserting into the hash table are safe (and will have to wait for
- * the unlock to do anything non-trivial).
- */
- bp->b_hold = 1;
- sema_init(&bp->b_sema, 0); /* held, no waiters */
-
- spin_lock_init(&bp->b_lock);
- atomic_set(&bp->b_lru_ref, 1);
- init_completion(&bp->b_iowait);
- INIT_LIST_HEAD(&bp->b_lru);
- INIT_LIST_HEAD(&bp->b_list);
- INIT_LIST_HEAD(&bp->b_li_list);
- bp->b_target = target;
- bp->b_mount = target->bt_mount;
- bp->b_flags = flags;
-
- error = xfs_buf_get_maps(bp, nmaps);
- if (error) {
- kmem_cache_free(xfs_buf_cache, bp);
- return error;
- }
-
- bp->b_rhash_key = map[0].bm_bn;
- bp->b_length = 0;
- for (i = 0; i < nmaps; i++) {
- bp->b_maps[i].bm_bn = map[i].bm_bn;
- bp->b_maps[i].bm_len = map[i].bm_len;
- bp->b_length += map[i].bm_len;
- }
-
- atomic_set(&bp->b_pin_count, 0);
- init_waitqueue_head(&bp->b_waiters);
-
- XFS_STATS_INC(bp->b_mount, xb_create);
- trace_xfs_buf_init(bp, _RET_IP_);
-
- *bpp = bp;
- return 0;
-}
-
static void
xfs_buf_free_callback(
struct callback_head *cb)
return 0;
}
+static int
+xfs_buf_alloc(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
+{
+ struct xfs_buf *bp;
+ int error;
+ int i;
+
+ *bpp = NULL;
+ bp = kmem_cache_zalloc(xfs_buf_cache,
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
+
+ /*
+ * We don't want certain flags to appear in b_flags unless they are
+ * specifically set by later operations on the buffer.
+ */
+ flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+
+ /*
+ * A new buffer is held and locked by the owner. This ensures that the
+ * buffer is owned by the caller and racing RCU lookups right after
+ * inserting into the hash table are safe (and will have to wait for
+ * the unlock to do anything non-trivial).
+ */
+ bp->b_hold = 1;
+ sema_init(&bp->b_sema, 0); /* held, no waiters */
+
+ spin_lock_init(&bp->b_lock);
+ atomic_set(&bp->b_lru_ref, 1);
+ init_completion(&bp->b_iowait);
+ INIT_LIST_HEAD(&bp->b_lru);
+ INIT_LIST_HEAD(&bp->b_list);
+ INIT_LIST_HEAD(&bp->b_li_list);
+ bp->b_target = target;
+ bp->b_mount = target->bt_mount;
+ bp->b_flags = flags;
+
+ error = xfs_buf_get_maps(bp, nmaps);
+ if (error) {
+ kmem_cache_free(xfs_buf_cache, bp);
+ return error;
+ }
+
+ bp->b_rhash_key = map[0].bm_bn;
+ bp->b_length = 0;
+ for (i = 0; i < nmaps; i++) {
+ bp->b_maps[i].bm_bn = map[i].bm_bn;
+ bp->b_maps[i].bm_len = map[i].bm_len;
+ bp->b_length += map[i].bm_len;
+ }
+
+ atomic_set(&bp->b_pin_count, 0);
+ init_waitqueue_head(&bp->b_waiters);
+
+ XFS_STATS_INC(bp->b_mount, xb_create);
+ trace_xfs_buf_init(bp, _RET_IP_);
+
+ error = xfs_buf_alloc_backing_mem(bp, flags);
+ if (error) {
+ xfs_buf_free(bp);
+ return error;
+ }
+
+ *bpp = bp;
+ return 0;
+}
+
/*
* Finding and Reading Buffers
*/
struct xfs_buf *bp;
int error;
- error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
+ error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
if (error)
goto out_drop_pag;
- error = xfs_buf_alloc_backing_mem(new_bp, flags);
- if (error)
- goto out_free_buf;
-
/* The new buffer keeps the perag reference until it is freed. */
new_bp->b_pag = pag;
struct xfs_buf **bpp)
{
int error;
- struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
- /* there are currently no valid flags for xfs_buf_get_uncached */
- ASSERT(flags == 0);
-
- *bpp = NULL;
-
- error = _xfs_buf_alloc(target, &map, 1, flags, &bp);
- if (error)
- return error;
-
- error = xfs_buf_alloc_backing_mem(bp, flags);
- if (error)
- goto fail_free_buf;
-
- trace_xfs_buf_get_uncached(bp, _RET_IP_);
- *bpp = bp;
- return 0;
-
-fail_free_buf:
- xfs_buf_free(bp);
+ error = xfs_buf_alloc(target, &map, 1, flags, bpp);
+ if (!error)
+ trace_xfs_buf_get_uncached(*bpp, _RET_IP_);
return error;
}