]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/ttm: Replace multiple booleans with flags in pool init
authorTvrtko Ursulin <tvrtko.ursulin@igalia.com>
Mon, 20 Oct 2025 11:54:07 +0000 (12:54 +0100)
committerTvrtko Ursulin <tursulin@ursulin.net>
Fri, 31 Oct 2025 09:01:08 +0000 (09:01 +0000)
Multiple consecutive boolean function arguments are usually not very
readable.

Replace the ones in ttm_pool_init() with flags with the additional
benefit of soon being able to pass in more data with just this one
code base churning cost.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-3-tvrtko.ursulin@igalia.com
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/ttm/tests/ttm_device_test.c
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/ttm/ttm_pool_internal.h
include/drm/ttm/ttm_allocation.h [new file with mode: 0644]
include/drm/ttm/ttm_pool.h

index aa9ee5dffa45514b0133e25a521b9a8b66e5ec3d..8f6d331e1ea2b84fd29b0fdbf9b6e2f408d334a8 100644 (file)
@@ -1837,7 +1837,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
                ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
                              adev->gmc.mem_partitions[i].numa.node,
-                             false, false);
+                             0);
        }
        return 0;
 }
index 1621903818e532830cda7556c828e484f7cccf7a..98648d5f20e78c8bc8161de3ad20ac5910ec8409 100644 (file)
@@ -7,11 +7,11 @@
 #include <drm/ttm/ttm_placement.h>
 
 #include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
 
 struct ttm_device_test_case {
        const char *description;
-       bool use_dma_alloc;
-       bool use_dma32;
+       unsigned int alloc_flags;
        bool pools_init_expected;
 };
 
@@ -119,26 +119,22 @@ static void ttm_device_init_no_vma_man(struct kunit *test)
 static const struct ttm_device_test_case ttm_device_cases[] = {
        {
                .description = "No DMA allocations, no DMA32 required",
-               .use_dma_alloc = false,
-               .use_dma32 = false,
                .pools_init_expected = false,
        },
        {
                .description = "DMA allocations, DMA32 required",
-               .use_dma_alloc = true,
-               .use_dma32 = true,
+               .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC |
+                              TTM_ALLOCATION_POOL_USE_DMA32,
                .pools_init_expected = true,
        },
        {
                .description = "No DMA allocations, DMA32 required",
-               .use_dma_alloc = false,
-               .use_dma32 = true,
+               .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32,
                .pools_init_expected = false,
        },
        {
                .description = "DMA allocations, no DMA32 required",
-               .use_dma_alloc = true,
-               .use_dma32 = false,
+               .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
                .pools_init_expected = true,
        },
 };
@@ -163,15 +159,14 @@ static void ttm_device_init_pools(struct kunit *test)
        KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
 
        err = ttm_device_kunit_init(priv, ttm_dev,
-                                   params->use_dma_alloc,
-                                   params->use_dma32);
+                                   params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+                                   params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32);
        KUNIT_ASSERT_EQ(test, err, 0);
 
        pool = &ttm_dev->pool;
        KUNIT_ASSERT_NOT_NULL(test, pool);
        KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
-       KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
-       KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+       KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags);
 
        if (params->pools_init_expected) {
                for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
@@ -181,7 +176,7 @@ static void ttm_device_init_pools(struct kunit *test)
                                KUNIT_EXPECT_EQ(test, pt.caching, i);
                                KUNIT_EXPECT_EQ(test, pt.order, j);
 
-                               if (params->use_dma_alloc)
+                               if (ttm_pool_uses_dma_alloc(pool))
                                        KUNIT_ASSERT_FALSE(test,
                                                           list_empty(&pt.pages));
                        }
index 17ebb9fbd6889c3dfcf83b4bea757d30857fddd4..11c92bd75779751391bebc374adf52db33537366 100644 (file)
@@ -13,7 +13,7 @@
 struct ttm_pool_test_case {
        const char *description;
        unsigned int order;
-       bool use_dma_alloc;
+       unsigned int alloc_flags;
 };
 
 struct ttm_pool_test_priv {
@@ -87,7 +87,7 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
        pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, pool);
 
-       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -114,12 +114,12 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
        {
                .description = "One page, with coherent DMA mappings enabled",
                .order = 0,
-               .use_dma_alloc = true,
+               .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
        },
        {
                .description = "Above the allocation limit, with coherent DMA mappings enabled",
                .order = MAX_PAGE_ORDER + 1,
-               .use_dma_alloc = true,
+               .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
        },
 };
 
@@ -151,13 +151,11 @@ static void ttm_pool_alloc_basic(struct kunit *test)
        pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, pool);
 
-       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
-                     false);
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
 
        KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
        KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
-       KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool),
-                       params->use_dma_alloc);
+       KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -167,14 +165,14 @@ static void ttm_pool_alloc_basic(struct kunit *test)
        last_page = tt->pages[tt->num_pages - 1];
 
        if (params->order <= MAX_PAGE_ORDER) {
-               if (params->use_dma_alloc) {
+               if (ttm_pool_uses_dma_alloc(pool)) {
                        KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
                        KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
                } else {
                        KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
                }
        } else {
-               if (params->use_dma_alloc) {
+               if (ttm_pool_uses_dma_alloc(pool)) {
                        KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
                        KUNIT_ASSERT_NULL(test, (void *)last_page->private);
                } else {
@@ -220,7 +218,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
        pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, pool);
 
-       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -350,7 +348,7 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
        pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, pool);
 
-       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
        ttm_pool_alloc(pool, tt, &simple_ctx);
 
        pt = &pool->caching[caching].orders[order];
@@ -381,7 +379,7 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
        pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, pool);
 
-       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
        ttm_pool_alloc(pool, tt, &simple_ctx);
 
        pt = &pool->caching[caching].orders[order];
index c3e2fcbdd2cc6fa2935178e15186f41118b50f9d..a97b1444536cbadbd4d86386c0a6905248ccf0ef 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/export.h>
 #include <linux/mm.h>
 
+#include <drm/ttm/ttm_allocation.h>
 #include <drm/ttm/ttm_bo.h>
 #include <drm/ttm/ttm_device.h>
 #include <drm/ttm/ttm_tt.h>
@@ -236,7 +237,9 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
        else
                nid = NUMA_NO_NODE;
 
-       ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
+       ttm_pool_init(&bdev->pool, dev, nid,
+                     (use_dma_alloc ? TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+                     (use_dma32 ? TTM_ALLOCATION_POOL_USE_DMA32 : 0));
 
        bdev->vma_manager = vma_manager;
        spin_lock_init(&bdev->lru_lock);
index ff6fab4122bb86e5a1bbc35015bb6801a30f45b7..4fc69447060cdc28026327b966585d472ab9cf7e 100644 (file)
@@ -1059,13 +1059,12 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
  * @pool: the pool to initialize
  * @dev: device for DMA allocations and mappings
  * @nid: NUMA node to use for allocations
- * @use_dma_alloc: true if coherent DMA alloc should be used
- * @use_dma32: true if GFP_DMA32 should be used
+ * @alloc_flags: TTM_ALLOCATION_POOL_ flags
  *
  * Initialize the pool and its pool types.
  */
 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
-                  int nid, bool use_dma_alloc, bool use_dma32)
+                  int nid, unsigned int alloc_flags)
 {
        unsigned int i, j;
 
@@ -1073,8 +1072,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
 
        pool->dev = dev;
        pool->nid = nid;
-       pool->use_dma_alloc = use_dma_alloc;
-       pool->use_dma32 = use_dma32;
+       pool->alloc_flags = alloc_flags;
 
        for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
                for (j = 0; j < NR_PAGE_ORDERS; ++j) {
index 3e50d30bd95a3473655026cd62dfc8203041ea4e..96b7f21514fb483044ef719c38f915eb5f1969d4 100644 (file)
@@ -4,16 +4,17 @@
 #ifndef _TTM_POOL_INTERNAL_H_
 #define _TTM_POOL_INTERNAL_H_
 
+#include <drm/ttm/ttm_allocation.h>
 #include <drm/ttm/ttm_pool.h>
 
 static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
 {
-       return pool->use_dma_alloc;
+       return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC;
 }
 
 static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
 {
-       return pool->use_dma32;
+       return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
 }
 
 #endif
diff --git a/include/drm/ttm/ttm_allocation.h b/include/drm/ttm/ttm_allocation.h
new file mode 100644 (file)
index 0000000..7869dc3
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_ALLOCATION_H_
+#define _TTM_ALLOCATION_H_
+
+#define TTM_ALLOCATION_POOL_USE_DMA_ALLOC      BIT(0) /* Use coherent DMA allocations. */
+#define TTM_ALLOCATION_POOL_USE_DMA32          BIT(1) /* Use GFP_DMA32 allocations. */
+
+#endif
index 54cd34a6e4c0ac5e17844b50fd08e72143b460c1..67c72de913bb9d53e399726cacf4893445535dff 100644 (file)
@@ -64,16 +64,14 @@ struct ttm_pool_type {
  *
  * @dev: the device we allocate pages for
  * @nid: which numa node to use
- * @use_dma_alloc: if coherent DMA allocations should be used
- * @use_dma32: if GFP_DMA32 should be used
+ * @alloc_flags: TTM_ALLOCATION_POOL_ flags
  * @caching: pools for each caching/order
  */
 struct ttm_pool {
        struct device *dev;
        int nid;
 
-       bool use_dma_alloc;
-       bool use_dma32;
+       unsigned int alloc_flags;
 
        struct {
                struct ttm_pool_type orders[NR_PAGE_ORDERS];
@@ -85,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
 
 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
-                  int nid, bool use_dma_alloc, bool use_dma32);
+                  int nid, unsigned int alloc_flags);
 void ttm_pool_fini(struct ttm_pool *pool);
 
 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);