]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/ttm: Add getter for some pool properties
authorTvrtko Ursulin <tvrtko.ursulin@igalia.com>
Mon, 20 Oct 2025 11:54:06 +0000 (12:54 +0100)
committerTvrtko Ursulin <tursulin@ursulin.net>
Fri, 31 Oct 2025 08:54:54 +0000 (08:54 +0000)
No functional change but to allow easier refactoring in the future.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-2-tvrtko.ursulin@igalia.com
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/ttm/ttm_pool_internal.h [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_tt.c

index 8ade53371f72d3a7bd533203935a87566193a09e..17ebb9fbd6889c3dfcf83b4bea757d30857fddd4 100644 (file)
@@ -8,6 +8,7 @@
 #include <drm/ttm/ttm_pool.h>
 
 #include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
 
 struct ttm_pool_test_case {
        const char *description;
@@ -155,7 +156,8 @@ static void ttm_pool_alloc_basic(struct kunit *test)
 
        KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
        KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
-       KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+       KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool),
+                       params->use_dma_alloc);
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
index baf27c70a4193a121fbc8b4e67cd6feb4c612b85..ff6fab4122bb86e5a1bbc35015bb6801a30f45b7 100644 (file)
@@ -48,6 +48,7 @@
 #include <drm/ttm/ttm_bo.h>
 
 #include "ttm_module.h"
+#include "ttm_pool_internal.h"
 
 #ifdef CONFIG_FAULT_INJECTION
 #include <linux/fault-inject.h>
@@ -148,7 +149,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
                gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
                        __GFP_THISNODE;
 
-       if (!pool->use_dma_alloc) {
+       if (!ttm_pool_uses_dma_alloc(pool)) {
                p = alloc_pages_node(pool->nid, gfp_flags, order);
                if (p)
                        p->private = order;
@@ -200,7 +201,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
                set_pages_wb(p, 1 << order);
 #endif
 
-       if (!pool || !pool->use_dma_alloc) {
+       if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
                __free_pages(p, order);
                return;
        }
@@ -243,7 +244,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
 {
        dma_addr_t addr;
 
-       if (pool->use_dma_alloc) {
+       if (ttm_pool_uses_dma_alloc(pool)) {
                struct ttm_pool_dma *dma = (void *)p->private;
 
                addr = dma->addr;
@@ -265,7 +266,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
                           unsigned int num_pages)
 {
        /* Unmapped while freeing the page */
-       if (pool->use_dma_alloc)
+       if (ttm_pool_uses_dma_alloc(pool))
                return;
 
        dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
@@ -339,7 +340,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                                                  enum ttm_caching caching,
                                                  unsigned int order)
 {
-       if (pool->use_dma_alloc)
+       if (ttm_pool_uses_dma_alloc(pool))
                return &pool->caching[caching].orders[order];
 
 #ifdef CONFIG_X86
@@ -348,7 +349,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                if (pool->nid != NUMA_NO_NODE)
                        return &pool->caching[caching].orders[order];
 
-               if (pool->use_dma32)
+               if (ttm_pool_uses_dma32(pool))
                        return &global_dma32_write_combined[order];
 
                return &global_write_combined[order];
@@ -356,7 +357,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                if (pool->nid != NUMA_NO_NODE)
                        return &pool->caching[caching].orders[order];
 
-               if (pool->use_dma32)
+               if (ttm_pool_uses_dma32(pool))
                        return &global_dma32_uncached[order];
 
                return &global_uncached[order];
@@ -396,7 +397,7 @@ static unsigned int ttm_pool_shrink(void)
 /* Return the allocation order based for a page */
 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
 {
-       if (pool->use_dma_alloc) {
+       if (ttm_pool_uses_dma_alloc(pool)) {
                struct ttm_pool_dma *dma = (void *)p->private;
 
                return dma->vaddr & ~PAGE_MASK;
@@ -719,7 +720,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
        if (ctx->gfp_retry_mayfail)
                gfp_flags |= __GFP_RETRY_MAYFAIL;
 
-       if (pool->use_dma32)
+       if (ttm_pool_uses_dma32(pool))
                gfp_flags |= GFP_DMA32;
        else
                gfp_flags |= GFP_HIGHUSER;
@@ -977,7 +978,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
                return -EINVAL;
 
        if ((!ttm_backup_bytes_avail() && !flags->purge) ||
-           pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
+           ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt))
                return -EBUSY;
 
 #ifdef CONFIG_X86
@@ -1014,7 +1015,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
        if (flags->purge)
                return shrunken;
 
-       if (pool->use_dma32)
+       if (ttm_pool_uses_dma32(pool))
                gfp = GFP_DMA32;
        else
                gfp = GFP_HIGHUSER;
@@ -1068,7 +1069,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
 {
        unsigned int i, j;
 
-       WARN_ON(!dev && use_dma_alloc);
+       WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool));
 
        pool->dev = dev;
        pool->nid = nid;
@@ -1239,7 +1240,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 {
        unsigned int i;
 
-       if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
+       if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) {
                seq_puts(m, "unused\n");
                return 0;
        }
@@ -1250,7 +1251,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
        for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
                if (!ttm_pool_select_type(pool, i, 0))
                        continue;
-               if (pool->use_dma_alloc)
+               if (ttm_pool_uses_dma_alloc(pool))
                        seq_puts(m, "DMA ");
                else
                        seq_printf(m, "N%d ", pool->nid);
diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h
new file mode 100644 (file)
index 0000000..3e50d30
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_POOL_INTERNAL_H_
+#define _TTM_POOL_INTERNAL_H_
+
+#include <drm/ttm/ttm_pool.h>
+
+static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
+{
+       return pool->use_dma_alloc;
+}
+
+static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
+{
+       return pool->use_dma32;
+}
+
+#endif
index 705c7d2f579817c5c946faba4aecf82891eae3b6..611d20ab966d2b2436b931f5fade221d5ff5b3ba 100644 (file)
@@ -47,6 +47,7 @@
 #include <drm/ttm/ttm_tt.h>
 
 #include "ttm_module.h"
+#include "ttm_pool_internal.h"
 
 static unsigned long ttm_pages_limit;
 
@@ -94,7 +95,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
         * mapped TT pages need to be decrypted or otherwise the drivers
         * will end up sending encrypted mem to the gpu.
         */
-       if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+       if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+           cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
                page_flags |= TTM_TT_FLAG_DECRYPTED;
                drm_info_once(ddev, "TT memory decryption enabled.");
        }
@@ -379,7 +381,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
 
        if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
                atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
-               if (bdev->pool.use_dma32)
+               if (ttm_pool_uses_dma32(&bdev->pool))
                        atomic_long_add(ttm->num_pages,
                                        &ttm_dma32_pages_allocated);
        }
@@ -417,7 +419,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
 error:
        if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
                atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
-               if (bdev->pool.use_dma32)
+               if (ttm_pool_uses_dma32(&bdev->pool))
                        atomic_long_sub(ttm->num_pages,
                                        &ttm_dma32_pages_allocated);
        }
@@ -440,7 +442,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
 
        if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
                atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
-               if (bdev->pool.use_dma32)
+               if (ttm_pool_uses_dma32(&bdev->pool))
                        atomic_long_sub(ttm->num_pages,
                                        &ttm_dma32_pages_allocated);
        }