]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/msm/a8xx: Implement IFPC support for A840
authorAkhil P Oommen <akhilpo@oss.qualcomm.com>
Fri, 27 Mar 2026 00:14:03 +0000 (05:44 +0530)
committerRob Clark <robin.clark@oss.qualcomm.com>
Tue, 31 Mar 2026 20:47:30 +0000 (13:47 -0700)
Implement pwrup reglist support and add the necessary register
configurations to enable IFPC support on A840

Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
Patchwork: https://patchwork.freedesktop.org/patch/714679/
Message-ID: <20260327-a8xx-gpu-batch2-v2-14-2b53c38d2101@oss.qualcomm.com>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
drivers/gpu/drm/msm/adreno/a6xx_catalog.c
drivers/gpu/drm/msm/adreno/a8xx_gpu.c

index 758bc7bd31f6935a60c7795f39ac5d57f6ccab1e..53548f6e891b97eec3d9e28880fbd43c33c4d39e 100644 (file)
@@ -1891,6 +1891,185 @@ static const struct adreno_reglist a840_gbif[] = {
        { },
 };
 
+static const uint32_t a840_pwrup_reglist_regs[] = {
+       REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP,
+       REG_A7XX_SP_READ_SEL,
+       REG_A6XX_UCHE_MODE_CNTL,
+       REG_A8XX_UCHE_VARB_IDLE_TIMEOUT,
+       REG_A8XX_UCHE_GBIF_GX_CONFIG,
+       REG_A8XX_UCHE_CCHE_MODE_CNTL,
+       REG_A8XX_UCHE_CCHE_CACHE_WAYS,
+       REG_A8XX_UCHE_CACHE_WAYS,
+       REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN,
+       REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN + 1,
+       REG_A8XX_UCHE_CCHE_LPAC_GMEM_RANGE_MIN,
+       REG_A8XX_UCHE_CCHE_LPAC_GMEM_RANGE_MIN + 1,
+       REG_A8XX_UCHE_CCHE_TRAP_BASE,
+       REG_A8XX_UCHE_CCHE_TRAP_BASE + 1,
+       REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE,
+       REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE + 1,
+       REG_A8XX_UCHE_HW_DBG_CNTL,
+       REG_A8XX_UCHE_WRITE_THRU_BASE,
+       REG_A8XX_UCHE_WRITE_THRU_BASE + 1,
+       REG_A8XX_UCHE_TRAP_BASE,
+       REG_A8XX_UCHE_TRAP_BASE + 1,
+       REG_A8XX_UCHE_CLIENT_PF,
+       REG_A8XX_RB_CMP_NC_MODE_CNTL,
+       REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN,
+       REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN + 1,
+       REG_A6XX_TPL1_NC_MODE_CNTL,
+       REG_A6XX_TPL1_DBG_ECO_CNTL,
+       REG_A6XX_TPL1_DBG_ECO_CNTL1,
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18),
+       REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19),
+};
+DECLARE_ADRENO_REGLIST_LIST(a840_pwrup_reglist);
+
+static const u32 a840_ifpc_reglist_regs[] = {
+       REG_A8XX_RBBM_NC_MODE_CNTL,
+       REG_A8XX_RBBM_SLICE_NC_MODE_CNTL,
+       REG_A6XX_SP_NC_MODE_CNTL,
+       REG_A6XX_SP_CHICKEN_BITS,
+       REG_A8XX_SP_SS_CHICKEN_BITS_0,
+       REG_A7XX_SP_CHICKEN_BITS_1,
+       REG_A7XX_SP_CHICKEN_BITS_2,
+       REG_A7XX_SP_CHICKEN_BITS_3,
+       REG_A8XX_SP_CHICKEN_BITS_4,
+       REG_A6XX_SP_PERFCTR_SHADER_MASK,
+       REG_A8XX_RBBM_SLICE_PERFCTR_CNTL,
+       REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL,
+       REG_A7XX_SP_HLSQ_DBG_ECO_CNTL,
+       REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_1,
+       REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2,
+       REG_A8XX_SP_HLSQ_DBG_ECO_CNTL_3,
+       REG_A8XX_SP_HLSQ_LPAC_GMEM_RANGE_MIN,
+       REG_A8XX_SP_HLSQ_LPAC_GMEM_RANGE_MIN + 1,
+       REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL,
+       REG_A8XX_RBBM_PERFCTR_CNTL,
+       REG_A8XX_CP_PROTECT_GLOBAL(0),
+       REG_A8XX_CP_PROTECT_GLOBAL(1),
+       REG_A8XX_CP_PROTECT_GLOBAL(2),
+       REG_A8XX_CP_PROTECT_GLOBAL(3),
+       REG_A8XX_CP_PROTECT_GLOBAL(4),
+       REG_A8XX_CP_PROTECT_GLOBAL(5),
+       REG_A8XX_CP_PROTECT_GLOBAL(6),
+       REG_A8XX_CP_PROTECT_GLOBAL(7),
+       REG_A8XX_CP_PROTECT_GLOBAL(8),
+       REG_A8XX_CP_PROTECT_GLOBAL(9),
+       REG_A8XX_CP_PROTECT_GLOBAL(10),
+       REG_A8XX_CP_PROTECT_GLOBAL(11),
+       REG_A8XX_CP_PROTECT_GLOBAL(12),
+       REG_A8XX_CP_PROTECT_GLOBAL(13),
+       REG_A8XX_CP_PROTECT_GLOBAL(14),
+       REG_A8XX_CP_PROTECT_GLOBAL(15),
+       REG_A8XX_CP_PROTECT_GLOBAL(16),
+       REG_A8XX_CP_PROTECT_GLOBAL(17),
+       REG_A8XX_CP_PROTECT_GLOBAL(18),
+       REG_A8XX_CP_PROTECT_GLOBAL(19),
+       REG_A8XX_CP_PROTECT_GLOBAL(20),
+       REG_A8XX_CP_PROTECT_GLOBAL(21),
+       REG_A8XX_CP_PROTECT_GLOBAL(22),
+       REG_A8XX_CP_PROTECT_GLOBAL(23),
+       REG_A8XX_CP_PROTECT_GLOBAL(24),
+       REG_A8XX_CP_PROTECT_GLOBAL(25),
+       REG_A8XX_CP_PROTECT_GLOBAL(26),
+       REG_A8XX_CP_PROTECT_GLOBAL(27),
+       REG_A8XX_CP_PROTECT_GLOBAL(28),
+       REG_A8XX_CP_PROTECT_GLOBAL(29),
+       REG_A8XX_CP_PROTECT_GLOBAL(30),
+       REG_A8XX_CP_PROTECT_GLOBAL(31),
+       REG_A8XX_CP_PROTECT_GLOBAL(32),
+       REG_A8XX_CP_PROTECT_GLOBAL(33),
+       REG_A8XX_CP_PROTECT_GLOBAL(34),
+       REG_A8XX_CP_PROTECT_GLOBAL(35),
+       REG_A8XX_CP_PROTECT_GLOBAL(36),
+       REG_A8XX_CP_PROTECT_GLOBAL(37),
+       REG_A8XX_CP_PROTECT_GLOBAL(38),
+       REG_A8XX_CP_PROTECT_GLOBAL(39),
+       REG_A8XX_CP_PROTECT_GLOBAL(40),
+       REG_A8XX_CP_PROTECT_GLOBAL(41),
+       REG_A8XX_CP_PROTECT_GLOBAL(42),
+       REG_A8XX_CP_PROTECT_GLOBAL(43),
+       REG_A8XX_CP_PROTECT_GLOBAL(44),
+       REG_A8XX_CP_PROTECT_GLOBAL(45),
+       REG_A8XX_CP_PROTECT_GLOBAL(46),
+       REG_A8XX_CP_PROTECT_GLOBAL(47),
+       REG_A8XX_CP_PROTECT_GLOBAL(48),
+       REG_A8XX_CP_PROTECT_GLOBAL(49),
+       REG_A8XX_CP_PROTECT_GLOBAL(50),
+       REG_A8XX_CP_PROTECT_GLOBAL(51),
+       REG_A8XX_CP_PROTECT_GLOBAL(52),
+       REG_A8XX_CP_PROTECT_GLOBAL(53),
+       REG_A8XX_CP_PROTECT_GLOBAL(54),
+       REG_A8XX_CP_PROTECT_GLOBAL(55),
+       REG_A8XX_CP_PROTECT_GLOBAL(56),
+       REG_A8XX_CP_PROTECT_GLOBAL(57),
+       REG_A8XX_CP_PROTECT_GLOBAL(58),
+       REG_A8XX_CP_PROTECT_GLOBAL(59),
+       REG_A8XX_CP_PROTECT_GLOBAL(60),
+       REG_A8XX_CP_PROTECT_GLOBAL(61),
+       REG_A8XX_CP_PROTECT_GLOBAL(62),
+       REG_A8XX_CP_PROTECT_GLOBAL(63),
+};
+DECLARE_ADRENO_REGLIST_LIST(a840_ifpc_reglist);
+
+static const struct adreno_reglist_pipe a840_dyn_pwrup_reglist_regs[] = {
+       { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_GRAS_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_CHICKEN_BITS_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_CHICKEN_BITS_2, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_CHICKEN_BITS_3, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_CHICKEN_BITS_4, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_PC_VIS_STREAM_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A7XX_RB_CCU_CNTL, 0, BIT(PIPE_BR) },
+       { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0, BIT(PIPE_BR)},
+       { REG_A8XX_RB_CCU_NC_MODE_CNTL, 0, BIT(PIPE_BR) },
+       { REG_A8XX_RB_CMP_NC_MODE_CNTL, 0, BIT(PIPE_BR) },
+       { REG_A6XX_RB_RBP_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0, BIT(PIPE_BR) },
+       { REG_A6XX_RB_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0, BIT(PIPE_BR) },
+       { REG_A7XX_VFD_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_VFD_CB_BV_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_VFD_CB_BR_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_VFD_CB_LP_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+       { REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) |
+               BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) |
+               BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) },
+       { REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) |
+               BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) |
+               BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) },
+       { REG_A8XX_CP_PROTECT_CNTL_PIPE, 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC)},
+       { REG_A8XX_CP_PROTECT_PIPE(15), 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC) },
+       { REG_A8XX_RB_GC_GMEM_PROTECT, 0, BIT(PIPE_BR) },
+       { REG_A8XX_RB_LPAC_GMEM_PROTECT, 0, BIT(PIPE_BR) },
+       { REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0, BIT(PIPE_BR) },
+};
+DECLARE_ADRENO_REGLIST_PIPE_LIST(a840_dyn_pwrup_reglist);
+
 static const struct adreno_info a8xx_gpus[] = {
        {
                .chip_ids = ADRENO_CHIP_IDS(0x44070001),
@@ -1940,11 +2119,15 @@ static const struct adreno_info a8xx_gpus[] = {
                .gmem = 18 * SZ_1M,
                .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
-                         ADRENO_QUIRK_HAS_HW_APRIV,
+                         ADRENO_QUIRK_HAS_HW_APRIV |
+                         ADRENO_QUIRK_IFPC,
                .funcs = &a8xx_gpu_funcs,
                .a6xx = &(const struct a6xx_info) {
                        .protect = &a840_protect,
                        .nonctxt_reglist = a840_nonctxt_regs,
+                       .pwrup_reglist = &a840_pwrup_reglist,
+                       .dyn_pwrup_reglist = &a840_dyn_pwrup_reglist,
+                       .ifpc_reglist = &a840_ifpc_reglist,
                        .gbif_cx = a840_gbif,
                        .max_slices = 3,
                        .gmu_chipid = 0x8020100,
index d5c547d347e248dbac941824b8142a36d662c979..d6782bdde067cdf2c95aa271098008b183aa1b89 100644 (file)
@@ -183,7 +183,7 @@ void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        /* Update HW if this is the current ring and we are not in preempt*/
        if (!a6xx_in_preempt(a6xx_gpu)) {
                if (a6xx_gpu->cur_ring == ring)
-                       gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+                       a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
                else
                        ring->restore_wptr = true;
        } else {
@@ -396,8 +396,87 @@ static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect)
        a8xx_aperture_clear(gpu);
 }
 
+static void a8xx_patch_pwrup_reglist(struct msm_gpu *gpu)
+{
+       const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       const struct adreno_reglist_list *reglist;
+       void *ptr = a6xx_gpu->pwrup_reglist_ptr;
+       struct cpu_gpu_lock *lock = ptr;
+       u32 *dest = (u32 *)&lock->regs[0];
+       u32 dyn_pwrup_reglist_count = 0;
+       int i;
+
+       lock->gpu_req = lock->cpu_req = lock->turn = 0;
+
+       reglist = adreno_gpu->info->a6xx->ifpc_reglist;
+       if (reglist) {
+               lock->ifpc_list_len = reglist->count;
+
+               /*
+                * For each entry in each of the lists, write the offset and the current
+                * register value into the GPU buffer
+                */
+               for (i = 0; i < reglist->count; i++) {
+                       *dest++ = reglist->regs[i];
+                       *dest++ = gpu_read(gpu, reglist->regs[i]);
+               }
+       }
+
+       reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+       if (reglist) {
+               lock->preemption_list_len = reglist->count;
+
+               for (i = 0; i < reglist->count; i++) {
+                       *dest++ = reglist->regs[i];
+                       *dest++ = gpu_read(gpu, reglist->regs[i]);
+               }
+       }
+
+       /*
+        * The overall register list is composed of
+        * 1. Static IFPC-only registers
+        * 2. Static IFPC + preemption registers
+        * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+        *
+        * The first two lists are static. Size of these lists are stored as
+        * number of pairs in ifpc_list_len and preemption_list_len
+        * respectively. With concurrent binning, Some of the perfcounter
+        * registers being virtualized, CP needs to know the pipe id to program
+        * the aperture inorder to restore the same. Thus, third list is a
+        * dynamic list with triplets as
+        * (<aperture, shifted 12 bits> <address> <data>), and the length is
+        * stored as number for triplets in dynamic_list_len.
+        */
+       dyn_pwrup_reglist = adreno_gpu->info->a6xx->dyn_pwrup_reglist;
+       if (!dyn_pwrup_reglist)
+               goto done;
+
+       for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+               for (i = 0; i < dyn_pwrup_reglist->count; i++) {
+                       if (!(dyn_pwrup_reglist->regs[i].pipe & BIT(pipe_id)))
+                               continue;
+                       *dest++ = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe_id);
+                       *dest++ = dyn_pwrup_reglist->regs[i].offset;
+                       *dest++ = a8xx_read_pipe_slice(gpu,
+                                                      pipe_id,
+                                                      a8xx_get_first_slice(a6xx_gpu),
+                                                      dyn_pwrup_reglist->regs[i].offset);
+                       dyn_pwrup_reglist_count++;
+               }
+       }
+
+       lock->dynamic_list_len = dyn_pwrup_reglist_count;
+
+done:
+       a8xx_aperture_clear(gpu);
+}
+
 static int a8xx_cp_init(struct msm_gpu *gpu)
 {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        struct msm_ringbuffer *ring = gpu->rb[0];
        u32 mask;
 
@@ -405,7 +484,7 @@ static int a8xx_cp_init(struct msm_gpu *gpu)
        OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
        OUT_RING(ring, BIT(27));
 
-       OUT_PKT7(ring, CP_ME_INIT, 4);
+       OUT_PKT7(ring, CP_ME_INIT, 7);
 
        /* Use multiple HW contexts */
        mask = BIT(0);
@@ -419,6 +498,9 @@ static int a8xx_cp_init(struct msm_gpu *gpu)
        /* Disable save/restore of performance counters across preemption */
        mask |= BIT(6);
 
+       /* Enable the register init list with the spinlock */
+       mask |= BIT(8);
+
        OUT_RING(ring, mask);
 
        /* Enable multiple hardware contexts */
@@ -430,6 +512,14 @@ static int a8xx_cp_init(struct msm_gpu *gpu)
        /* Operation mode mask */
        OUT_RING(ring, 0x00000002);
 
+       /* Lo address */
+       OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova));
+       /* Hi address */
+       OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova));
+
+       /* Enable dyn pwrup list with triplets (offset, value, pipe) */
+       OUT_RING(ring, BIT(31));
+
        a6xx_flush(gpu, ring);
        return a8xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
@@ -712,6 +802,11 @@ static int hw_init(struct msm_gpu *gpu)
        WARN_ON(!gmem_protect);
        a8xx_aperture_clear(gpu);
 
+       if (!a6xx_gpu->pwrup_reglist_emitted) {
+               a8xx_patch_pwrup_reglist(gpu);
+               a6xx_gpu->pwrup_reglist_emitted = true;
+       }
+
        /* Enable hardware clockgating */
        a8xx_set_hwcg(gpu, true);
 out: