]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
xfs: use GFP_KERNEL in pure transaction contexts
authorDave Chinner <dchinner@redhat.com>
Mon, 15 Jan 2024 22:59:46 +0000 (09:59 +1100)
committerChandan Babu R <chandanbabu@kernel.org>
Tue, 13 Feb 2024 12:37:35 +0000 (18:07 +0530)
When running in a transaction context, memory allocations are scoped
to GFP_NOFS. Hence we don't need to use GFP_NOFS contexts in pure
transaction context allocations - GFP_KERNEL will automatically get
converted to GFP_NOFS as appropriate.

Go through the code and convert all the obvious GFP_NOFS allocations
in transaction context to use GFP_KERNEL. This further reduces the
explicit use of GFP_NOFS in XFS.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
12 files changed:
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_rmap.c
fs/xfs/xfs_attr_item.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mru_cache.c

index e965a48e7db96f89b782038e0aa363d526c2a42e..82ab559f1da18212eddbd36ad838d808a86db391 100644 (file)
@@ -891,7 +891,8 @@ xfs_attr_defer_add(
 
        struct xfs_attr_intent  *new;
 
-       new = kmem_cache_zalloc(xfs_attr_intent_cache, GFP_NOFS | __GFP_NOFAIL);
+       new = kmem_cache_zalloc(xfs_attr_intent_cache,
+                       GFP_KERNEL | __GFP_NOFAIL);
        new->xattri_op_flags = op_flags;
        new->xattri_da_args = args;
 
index f362345467facd57cc314547142e1093b4e54983..b525524a2da4ef1cf116418f33498d0740093ec1 100644 (file)
@@ -6098,7 +6098,7 @@ __xfs_bmap_add(
                        bmap->br_blockcount,
                        bmap->br_state);
 
-       bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
+       bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&bi->bi_list);
        bi->bi_type = type;
        bi->bi_owner = ip;
index 75689c151a54d81b3c00212714e0675a4f2f2519..8ae4401f6810b2bd64c1823132aa3d5e3a555388 100644 (file)
@@ -825,7 +825,7 @@ xfs_defer_alloc(
        struct xfs_defer_pending        *dfp;
 
        dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
-                       GFP_NOFS | __GFP_NOFAIL);
+                       GFP_KERNEL | __GFP_NOFAIL);
        dfp->dfp_ops = ops;
        INIT_LIST_HEAD(&dfp->dfp_work);
        list_add_tail(&dfp->dfp_list, &tp->t_dfops);
@@ -888,7 +888,7 @@ xfs_defer_start_recovery(
        struct xfs_defer_pending        *dfp;
 
        dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
-                       GFP_NOFS | __GFP_NOFAIL);
+                       GFP_KERNEL | __GFP_NOFAIL);
        dfp->dfp_ops = ops;
        dfp->dfp_intent = lip;
        INIT_LIST_HEAD(&dfp->dfp_work);
@@ -979,7 +979,7 @@ xfs_defer_ops_capture(
                return ERR_PTR(error);
 
        /* Create an object to capture the defer ops. */
-       dfc = kzalloc(sizeof(*dfc), GFP_NOFS | __GFP_NOFAIL);
+       dfc = kzalloc(sizeof(*dfc), GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&dfc->dfc_list);
        INIT_LIST_HEAD(&dfc->dfc_dfops);
 
index 728f72f0d078f96aea9e35a0f7fe63757dcc1f2e..8c9403b33191b23036efac852c9832b847c2d53a 100644 (file)
@@ -236,7 +236,7 @@ xfs_dir_init(
        if (error)
                return error;
 
-       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
+       args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
@@ -273,7 +273,7 @@ xfs_dir_createname(
                XFS_STATS_INC(dp->i_mount, xs_dir_create);
        }
 
-       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
+       args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
@@ -435,7 +435,7 @@ xfs_dir_removename(
        ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_remove);
 
-       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
+       args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
@@ -496,7 +496,7 @@ xfs_dir_replace(
        if (rval)
                return rval;
 
-       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
+       args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
index 709fda3d742f63dac6f0ebf1429b3f09f8a33139..136d5d7b9de98c33a31ed7f8258ba536c0398368 100644 (file)
@@ -402,7 +402,7 @@ xfs_iroot_realloc(
                if (ifp->if_broot_bytes == 0) {
                        new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
                        ifp->if_broot = kmalloc(new_size,
-                                               GFP_NOFS | __GFP_NOFAIL);
+                                               GFP_KERNEL | __GFP_NOFAIL);
                        ifp->if_broot_bytes = (int)new_size;
                        return;
                }
@@ -417,7 +417,7 @@ xfs_iroot_realloc(
                new_max = cur_max + rec_diff;
                new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
                ifp->if_broot = krealloc(ifp->if_broot, new_size,
-                                        GFP_NOFS | __GFP_NOFAIL);
+                                        GFP_KERNEL | __GFP_NOFAIL);
                op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
                                                     ifp->if_broot_bytes);
                np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@@ -443,7 +443,7 @@ xfs_iroot_realloc(
        else
                new_size = 0;
        if (new_size > 0) {
-               new_broot = kmalloc(new_size, GFP_NOFS | __GFP_NOFAIL);
+               new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL);
                /*
                 * First copy over the btree block header.
                 */
@@ -512,7 +512,7 @@ xfs_idata_realloc(
 
        if (byte_diff) {
                ifp->if_data = krealloc(ifp->if_data, new_size,
-                                       GFP_NOFS | __GFP_NOFAIL);
+                                       GFP_KERNEL | __GFP_NOFAIL);
                if (new_size == 0)
                        ifp->if_data = NULL;
                ifp->if_bytes = new_size;
index 6709a7f8bad5a4f67bf674d64abb6d4118cec485..7df52daa22cf29ddf27d14abd0eced63064cc817 100644 (file)
@@ -1449,7 +1449,7 @@ __xfs_refcount_add(
                        blockcount);
 
        ri = kmem_cache_alloc(xfs_refcount_intent_cache,
-                       GFP_NOFS | __GFP_NOFAIL);
+                       GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ri->ri_list);
        ri->ri_type = type;
        ri->ri_startblock = startblock;
index 76bf7f48cb5acf19f00f04307b625afc017364cd..0bd1f47b2c2b26f618c42567f9de86fa7feb8429 100644 (file)
@@ -2559,7 +2559,7 @@ __xfs_rmap_add(
                        bmap->br_blockcount,
                        bmap->br_state);
 
-       ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
+       ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ri->ri_list);
        ri->ri_type = type;
        ri->ri_owner = owner;
index 2a142cefdc3db86fadfe464779aff64300e939d2..0bf25a2ba3b678e1d232f1c98fd6276f1c2cf457 100644 (file)
@@ -226,7 +226,7 @@ xfs_attri_init(
 {
        struct xfs_attri_log_item       *attrip;
 
-       attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_NOFS | __GFP_NOFAIL);
+       attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_KERNEL | __GFP_NOFAIL);
 
        /*
         * Grab an extra reference to the name/value buffer for this log item.
@@ -666,7 +666,7 @@ xfs_attr_create_done(
 
        attrip = ATTRI_ITEM(intent);
 
-       attrdp = kmem_cache_zalloc(xfs_attrd_cache, GFP_NOFS | __GFP_NOFAIL);
+       attrdp = kmem_cache_zalloc(xfs_attrd_cache, GFP_KERNEL | __GFP_NOFAIL);
 
        xfs_log_item_init(tp->t_mountp, &attrdp->attrd_item, XFS_LI_ATTRD,
                          &xfs_attrd_item_ops);
index c2531c28905c09e938028e829107e626e05356f0..cb2a4b940292834272491cacaaf5a8c982f64f82 100644 (file)
@@ -66,7 +66,7 @@ xfs_zero_extent(
        return blkdev_issue_zeroout(target->bt_bdev,
                block << (mp->m_super->s_blocksize_bits - 9),
                count_fsb << (mp->m_super->s_blocksize_bits - 9),
-               GFP_NOFS, 0);
+               GFP_KERNEL, 0);
 }
 
 /*
index 10bbde3ce7cc08c350ab44c8ac89a6388d26a7ea..ab7546a9dfd7004ef2368218d5e5e28611b7e3de 100644 (file)
@@ -190,7 +190,7 @@ xfs_buf_get_maps(
        }
 
        bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
-                               GFP_NOFS | __GFP_NOFAIL);
+                       GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
        if (!bp->b_maps)
                return -ENOMEM;
        return 0;
@@ -222,7 +222,8 @@ _xfs_buf_alloc(
        int                     i;
 
        *bpp = NULL;
-       bp = kmem_cache_zalloc(xfs_buf_cache, GFP_NOFS | __GFP_NOFAIL);
+       bp = kmem_cache_zalloc(xfs_buf_cache,
+                       GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
 
        /*
         * We don't want certain flags to appear in b_flags unless they are
@@ -325,7 +326,7 @@ xfs_buf_alloc_kmem(
        struct xfs_buf  *bp,
        xfs_buf_flags_t flags)
 {
-       gfp_t           gfp_mask = GFP_NOFS | __GFP_NOFAIL;
+       gfp_t           gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
        size_t          size = BBTOB(bp->b_length);
 
        /* Assure zeroed buffer for non-read cases. */
@@ -356,13 +357,11 @@ xfs_buf_alloc_pages(
        struct xfs_buf  *bp,
        xfs_buf_flags_t flags)
 {
-       gfp_t           gfp_mask = __GFP_NOWARN;
+       gfp_t           gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
        long            filled = 0;
 
        if (flags & XBF_READ_AHEAD)
                gfp_mask |= __GFP_NORETRY;
-       else
-               gfp_mask |= GFP_NOFS;
 
        /* Make sure that we have a page list */
        bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
@@ -429,11 +428,18 @@ _xfs_buf_map_pages(
 
                /*
                 * vm_map_ram() will allocate auxiliary structures (e.g.
-                * pagetables) with GFP_KERNEL, yet we are likely to be under
-                * GFP_NOFS context here. Hence we need to tell memory reclaim
-                * that we are in such a context via PF_MEMALLOC_NOFS to prevent
-                * memory reclaim re-entering the filesystem here and
-                * potentially deadlocking.
+                * pagetables) with GFP_KERNEL, yet we often under a scoped nofs
+                * context here. Mixing GFP_KERNEL with GFP_NOFS allocations
+                * from the same call site that can be run from both above and
+                * below memory reclaim causes lockdep false positives. Hence we
+                * always need to force this allocation to nofs context because
+                * we can't pass __GFP_NOLOCKDEP down to auxillary structures to
+                * prevent false positive lockdep reports.
+                *
+                * XXX(dgc): I think dquot reclaim is the only place we can get
+                * to this function from memory reclaim context now. If we fix
+                * that like we've fixed inode reclaim to avoid writeback from
+                * reclaim, this nofs wrapping can go away.
                 */
                nofs_flag = memalloc_nofs_save();
                do {
index ee39639bb92b9a6ead5b2a1a0be4cd80c0c01292..1f68569e62ca090feaf54211ee6a3b0c513eac05 100644 (file)
@@ -3518,7 +3518,8 @@ xlog_ticket_alloc(
        struct xlog_ticket      *tic;
        int                     unit_res;
 
-       tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
+       tic = kmem_cache_zalloc(xfs_log_ticket_cache,
+                       GFP_KERNEL | __GFP_NOFAIL);
 
        unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
 
index ce496704748de45d4ecaeea8a654129173ff77d8..7443debaffd65aeeba54438e3df6fa45cbbc8354 100644 (file)
@@ -428,7 +428,7 @@ xfs_mru_cache_insert(
        if (!mru || !mru->lists)
                return -EINVAL;
 
-       if (radix_tree_preload(GFP_NOFS))
+       if (radix_tree_preload(GFP_KERNEL))
                return -ENOMEM;
 
        INIT_LIST_HEAD(&elem->list_node);