]> git.ipfire.org Git - thirdparty/openwrt.git/blob
a13516a04ea36158191e4ac728e4c034d8068149
[thirdparty/openwrt.git] /
1 From 97988373018e7fa7ff33b7774f88d30e48f71509 Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= <mcanal@igalia.com>
3 Date: Tue, 25 Feb 2025 20:44:59 -0300
4 Subject: [PATCH] drm/v3d: Associate a V3D tech revision to all supported
5 devices
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 The V3D driver currently determines the GPU tech version (33, 41...)
11 by reading a register. This approach has worked so far since this
12 information wasn’t needed before powering on the GPU.
13
14 V3D 7.1 introduces new registers that must be written to power on the
15 GPU, requiring us to know the V3D version beforehand. To address this,
16 associate each supported SoC with the corresponding VideoCore GPU version
17 as part of the device data.
18
19 To prevent possible mistakes, add an assertion to verify that the version
20 specified in the device data matches the one reported by the hardware.
21 If there is a mismatch, the kernel will trigger a warning.
22
23 Signed-off-by: Maíra Canal <mcanal@igalia.com>
24 ---
25 drivers/gpu/drm/v3d/v3d_debugfs.c | 128 +++++++++++++++---------------
26 drivers/gpu/drm/v3d/v3d_drv.c | 22 +++--
27 drivers/gpu/drm/v3d/v3d_drv.h | 11 ++-
28 drivers/gpu/drm/v3d/v3d_gem.c | 12 +--
29 drivers/gpu/drm/v3d/v3d_irq.c | 10 +--
30 drivers/gpu/drm/v3d/v3d_sched.c | 12 +--
31 6 files changed, 106 insertions(+), 89 deletions(-)
32
33 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c
34 +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
35 @@ -22,74 +22,74 @@ struct v3d_reg_def {
36 };
37
38 static const struct v3d_reg_def v3d_hub_reg_defs[] = {
39 - REGDEF(33, 42, V3D_HUB_AXICFG),
40 - REGDEF(33, 71, V3D_HUB_UIFCFG),
41 - REGDEF(33, 71, V3D_HUB_IDENT0),
42 - REGDEF(33, 71, V3D_HUB_IDENT1),
43 - REGDEF(33, 71, V3D_HUB_IDENT2),
44 - REGDEF(33, 71, V3D_HUB_IDENT3),
45 - REGDEF(33, 71, V3D_HUB_INT_STS),
46 - REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
47 -
48 - REGDEF(33, 71, V3D_MMU_CTL),
49 - REGDEF(33, 71, V3D_MMU_VIO_ADDR),
50 - REGDEF(33, 71, V3D_MMU_VIO_ID),
51 - REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
52 -
53 - REGDEF(71, 71, V3D_V7_GMP_STATUS),
54 - REGDEF(71, 71, V3D_V7_GMP_CFG),
55 - REGDEF(71, 71, V3D_V7_GMP_VIO_ADDR),
56 + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
57 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
58 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
59 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
60 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
61 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
62 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
63 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
64 +
65 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
66 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
67 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
68 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
69 +
70 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_GMP_STATUS),
71 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_GMP_CFG),
72 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_GMP_VIO_ADDR),
73 };
74
75 static const struct v3d_reg_def v3d_gca_reg_defs[] = {
76 - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
77 - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
78 + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
79 + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
80 };
81
82 static const struct v3d_reg_def v3d_core_reg_defs[] = {
83 - REGDEF(33, 71, V3D_CTL_IDENT0),
84 - REGDEF(33, 71, V3D_CTL_IDENT1),
85 - REGDEF(33, 71, V3D_CTL_IDENT2),
86 - REGDEF(33, 71, V3D_CTL_MISCCFG),
87 - REGDEF(33, 71, V3D_CTL_INT_STS),
88 - REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
89 - REGDEF(33, 71, V3D_CLE_CT0CS),
90 - REGDEF(33, 71, V3D_CLE_CT0CA),
91 - REGDEF(33, 71, V3D_CLE_CT0EA),
92 - REGDEF(33, 71, V3D_CLE_CT1CS),
93 - REGDEF(33, 71, V3D_CLE_CT1CA),
94 - REGDEF(33, 71, V3D_CLE_CT1EA),
95 -
96 - REGDEF(33, 71, V3D_PTB_BPCA),
97 - REGDEF(33, 71, V3D_PTB_BPCS),
98 -
99 - REGDEF(33, 41, V3D_GMP_STATUS),
100 - REGDEF(33, 41, V3D_GMP_CFG),
101 - REGDEF(33, 41, V3D_GMP_VIO_ADDR),
102 -
103 - REGDEF(33, 71, V3D_ERR_FDBGO),
104 - REGDEF(33, 71, V3D_ERR_FDBGB),
105 - REGDEF(33, 71, V3D_ERR_FDBGS),
106 - REGDEF(33, 71, V3D_ERR_STAT),
107 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
108 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
109 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
110 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
111 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
112 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
113 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
114 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
115 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
116 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
117 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
118 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
119 +
120 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
121 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
122 +
123 + REGDEF(V3D_GEN_33, V3D_GEN_41, V3D_GMP_STATUS),
124 + REGDEF(V3D_GEN_33, V3D_GEN_41, V3D_GMP_CFG),
125 + REGDEF(V3D_GEN_33, V3D_GEN_41, V3D_GMP_VIO_ADDR),
126 +
127 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
128 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
129 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
130 + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
131 };
132
133 static const struct v3d_reg_def v3d_csd_reg_defs[] = {
134 - REGDEF(41, 71, V3D_CSD_STATUS),
135 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG0),
136 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG1),
137 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG2),
138 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG3),
139 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG4),
140 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG5),
141 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG6),
142 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG0),
143 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG1),
144 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG2),
145 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG3),
146 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG4),
147 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG5),
148 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG6),
149 - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
150 + REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
151 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG0),
152 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG1),
153 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG2),
154 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG3),
155 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG4),
156 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG5),
157 + REGDEF(V3D_GEN_41, V3D_GEN_41, V3D_CSD_CURRENT_CFG6),
158 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG0),
159 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG1),
160 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG2),
161 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG3),
162 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG4),
163 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG5),
164 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG6),
165 + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
166 };
167
168 static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
169 @@ -165,7 +165,7 @@ static int v3d_v3d_debugfs_ident(struct
170 str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
171 seq_printf(m, "TFU: %s\n",
172 str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
173 - if (v3d->ver <= 42) {
174 + if (v3d->ver <= V3D_GEN_42) {
175 seq_printf(m, "TSY: %s\n",
176 str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
177 }
178 @@ -197,11 +197,11 @@ static int v3d_v3d_debugfs_ident(struct
179 seq_printf(m, " QPUs: %d\n", nslc * qups);
180 seq_printf(m, " Semaphores: %d\n",
181 V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
182 - if (v3d->ver <= 42) {
183 + if (v3d->ver <= V3D_GEN_42) {
184 seq_printf(m, " BCG int: %d\n",
185 (ident2 & V3D_IDENT2_BCG_INT) != 0);
186 }
187 - if (v3d->ver < 40) {
188 + if (v3d->ver < V3D_GEN_41) {
189 seq_printf(m, " Override TMU: %d\n",
190 (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
191 }
192 @@ -311,8 +311,8 @@ static int v3d_measure_clock(struct seq_
193 int core = 0;
194 int measure_ms = 1000;
195
196 - if (v3d->ver >= 40) {
197 - int cycle_count_reg = v3d->ver < 71 ?
198 + if (v3d->ver >= V3D_GEN_41) {
199 + int cycle_count_reg = v3d->ver < V3D_GEN_71 ?
200 V3D_PCTR_CYCLE_COUNT : V3D_V7_PCTR_CYCLE_COUNT;
201 V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
202 V3D_SET_FIELD(cycle_count_reg,
203 --- a/drivers/gpu/drm/v3d/v3d_drv.c
204 +++ b/drivers/gpu/drm/v3d/v3d_drv.c
205 @@ -17,6 +17,7 @@
206 #include <linux/dma-mapping.h>
207 #include <linux/io.h>
208 #include <linux/module.h>
209 +#include <linux/of.h>
210 #include <linux/of_platform.h>
211 #include <linux/platform_device.h>
212 #include <linux/reset.h>
213 @@ -88,7 +89,7 @@ static int v3d_get_param_ioctl(struct dr
214 args->value = 1;
215 return 0;
216 case DRM_V3D_PARAM_SUPPORTS_PERFMON:
217 - args->value = (v3d->ver >= 40);
218 + args->value = (v3d->ver >= V3D_GEN_41);
219 return 0;
220 case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
221 args->value = 1;
222 @@ -189,10 +190,10 @@ static const struct drm_driver v3d_drm_d
223 };
224
225 static const struct of_device_id v3d_of_match[] = {
226 - { .compatible = "brcm,2712-v3d" },
227 - { .compatible = "brcm,2711-v3d" },
228 - { .compatible = "brcm,7268-v3d" },
229 - { .compatible = "brcm,7278-v3d" },
230 + { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
231 + { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
232 + { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
233 + { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
234 {},
235 };
236 MODULE_DEVICE_TABLE(of, v3d_of_match);
237 @@ -211,6 +212,7 @@ static int v3d_platform_drm_probe(struct
238 struct device_node *node;
239 struct drm_device *drm;
240 struct v3d_dev *v3d;
241 + enum v3d_gen gen;
242 int ret;
243 u32 mmu_debug;
244 u32 ident1;
245 @@ -224,6 +226,9 @@ static int v3d_platform_drm_probe(struct
246
247 platform_set_drvdata(pdev, drm);
248
249 + gen = (enum v3d_gen)of_device_get_match_data(dev);
250 + v3d->ver = gen;
251 +
252 ret = map_regs(v3d, &v3d->hub_regs, "hub");
253 if (ret)
254 return ret;
255 @@ -253,6 +258,11 @@ static int v3d_platform_drm_probe(struct
256 ident1 = V3D_READ(V3D_HUB_IDENT1);
257 v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
258 V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
259 + /* Make sure that the V3D tech version retrieved from the HW is equal
260 + * to the one advertised by the device tree.
261 + */
262 + WARN_ON(v3d->ver != gen);
263 +
264 v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
265 WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
266
267 @@ -297,7 +307,7 @@ static int v3d_platform_drm_probe(struct
268 v3d->clk_down_rate =
269 (clk_get_rate(clk_get_parent(v3d->clk)) / (1 << 4)) + 10000;
270
271 - if (v3d->ver < 41) {
272 + if (v3d->ver < V3D_GEN_41) {
273 ret = map_regs(v3d, &v3d->gca_regs, "gca");
274 if (ret)
275 goto clk_disable;
276 --- a/drivers/gpu/drm/v3d/v3d_drv.h
277 +++ b/drivers/gpu/drm/v3d/v3d_drv.h
278 @@ -115,13 +115,20 @@ struct v3d_perfmon {
279 u64 values[];
280 };
281
282 +enum v3d_gen {
283 + V3D_GEN_33 = 33,
284 + V3D_GEN_41 = 41,
285 + V3D_GEN_42 = 42,
286 + V3D_GEN_71 = 71,
287 +};
288 +
289 struct v3d_dev {
290 struct drm_device drm;
291
292 /* Short representation (e.g. 33, 41) of the V3D tech version
293 * and revision.
294 */
295 - int ver;
296 + enum v3d_gen ver;
297 bool single_irq_line;
298
299 void __iomem *hub_regs;
300 @@ -213,7 +220,7 @@ to_v3d_dev(struct drm_device *dev)
301 static inline bool
302 v3d_has_csd(struct v3d_dev *v3d)
303 {
304 - return v3d->ver >= 41;
305 + return v3d->ver >= V3D_GEN_41;
306 }
307
308 #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
309 --- a/drivers/gpu/drm/v3d/v3d_gem.c
310 +++ b/drivers/gpu/drm/v3d/v3d_gem.c
311 @@ -69,7 +69,7 @@ v3d_init_core(struct v3d_dev *v3d, int c
312 * type. If you want the default behavior, you can still put
313 * "2" in the indirect texture state's output_type field.
314 */
315 - if (v3d->ver < 40)
316 + if (v3d->ver < V3D_GEN_41)
317 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
318
319 /* Whenever we flush the L2T cache, we always want to flush
320 @@ -89,7 +89,7 @@ v3d_init_hw_state(struct v3d_dev *v3d)
321 static void
322 v3d_idle_axi(struct v3d_dev *v3d, int core)
323 {
324 - if (v3d->ver >= 71)
325 + if (v3d->ver >= V3D_GEN_71)
326 return;
327
328 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
329 @@ -105,7 +105,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int co
330 static void
331 v3d_idle_gca(struct v3d_dev *v3d)
332 {
333 - if (v3d->ver >= 41)
334 + if (v3d->ver >= V3D_GEN_41)
335 return;
336
337 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
338 @@ -179,13 +179,13 @@ v3d_reset(struct v3d_dev *v3d)
339 static void
340 v3d_flush_l3(struct v3d_dev *v3d)
341 {
342 - if (v3d->ver < 41) {
343 + if (v3d->ver < V3D_GEN_41) {
344 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
345
346 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
347 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
348
349 - if (v3d->ver < 33) {
350 + if (v3d->ver < V3D_GEN_33) {
351 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
352 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
353 }
354 @@ -198,7 +198,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
355 static void
356 v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
357 {
358 - if (v3d->ver > 32)
359 + if (v3d->ver >= V3D_GEN_33)
360 return;
361
362 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
363 --- a/drivers/gpu/drm/v3d/v3d_irq.c
364 +++ b/drivers/gpu/drm/v3d/v3d_irq.c
365 @@ -125,8 +125,8 @@ v3d_irq(int irq, void *arg)
366 status = IRQ_HANDLED;
367 }
368
369 - if ((v3d->ver < 71 && (intsts & V3D_INT_CSDDONE)) ||
370 - (v3d->ver >= 71 && (intsts & V3D_V7_INT_CSDDONE))) {
371 + if ((v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_CSDDONE)) ||
372 + (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_INT_CSDDONE))) {
373 struct v3d_fence *fence =
374 to_v3d_fence(v3d->csd_job->base.irq_fence);
375 v3d->gpu_queue_stats[V3D_CSD].last_exec_end = local_clock();
376 @@ -142,7 +142,7 @@ v3d_irq(int irq, void *arg)
377 /* We shouldn't be triggering these if we have GMP in
378 * always-allowed mode.
379 */
380 - if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
381 + if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
382 dev_err(v3d->drm.dev, "GMP violation\n");
383
384 /* V3D 4.2 wires the hub and core IRQs together, so if we &
385 @@ -200,7 +200,7 @@ v3d_hub_irq(int irq, void *arg)
386
387 V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
388
389 - if (v3d->ver >= 41) {
390 + if (v3d->ver >= V3D_GEN_41) {
391 axi_id = axi_id >> 5;
392 if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
393 client = v3d41_axi_ids[axi_id];
394 @@ -219,7 +219,7 @@ v3d_hub_irq(int irq, void *arg)
395 status = IRQ_HANDLED;
396 }
397
398 - if (v3d->ver >= 71 && intsts & V3D_V7_HUB_INT_GMPV) {
399 + if (v3d->ver >= V3D_GEN_71 && intsts & V3D_V7_HUB_INT_GMPV) {
400 dev_err(v3d->drm.dev, "GMP Violation\n");
401 status = IRQ_HANDLED;
402 }
403 --- a/drivers/gpu/drm/v3d/v3d_sched.c
404 +++ b/drivers/gpu/drm/v3d/v3d_sched.c
405 @@ -288,7 +288,7 @@ static struct dma_fence *v3d_render_job_
406 return fence;
407 }
408
409 -#define V3D_TFU_REG(name) ((v3d->ver < 71) ? V3D_TFU_ ## name : V3D_V7_TFU_ ## name)
410 +#define V3D_TFU_REG(name) ((v3d->ver < V3D_GEN_71) ? V3D_TFU_ ## name : V3D_V7_TFU_ ## name)
411
412 static struct dma_fence *
413 v3d_tfu_job_run(struct drm_sched_job *sched_job)
414 @@ -321,11 +321,11 @@ v3d_tfu_job_run(struct drm_sched_job *sc
415 V3D_WRITE(V3D_TFU_REG(ICA), job->args.ica);
416 V3D_WRITE(V3D_TFU_REG(IUA), job->args.iua);
417 V3D_WRITE(V3D_TFU_REG(IOA), job->args.ioa);
418 - if (v3d->ver >= 71)
419 + if (v3d->ver >= V3D_GEN_71)
420 V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
421 V3D_WRITE(V3D_TFU_REG(IOS), job->args.ios);
422 V3D_WRITE(V3D_TFU_REG(COEF0), job->args.coef[0]);
423 - if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
424 + if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
425 V3D_WRITE(V3D_TFU_REG(COEF1), job->args.coef[1]);
426 V3D_WRITE(V3D_TFU_REG(COEF2), job->args.coef[2]);
427 V3D_WRITE(V3D_TFU_REG(COEF3), job->args.coef[3]);
428 @@ -367,8 +367,8 @@ v3d_csd_job_run(struct drm_sched_job *sc
429 v3d_sched_stats_add_job(&v3d->gpu_queue_stats[V3D_CSD], sched_job);
430 v3d_switch_perfmon(v3d, &job->base);
431
432 - csd_cfg0_reg = v3d->ver < 71 ? V3D_CSD_QUEUED_CFG0 : V3D_V7_CSD_QUEUED_CFG0;
433 - csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
434 + csd_cfg0_reg = v3d->ver < V3D_GEN_71 ? V3D_CSD_QUEUED_CFG0 : V3D_V7_CSD_QUEUED_CFG0;
435 + csd_cfg_reg_count = v3d->ver < V3D_GEN_71 ? 6 : 7;
436 for (i = 1; i <= csd_cfg_reg_count; i++)
437 V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
438 /* CFG0 write kicks off the job. */
439 @@ -475,7 +475,7 @@ v3d_csd_job_timedout(struct drm_sched_jo
440 {
441 struct v3d_csd_job *job = to_csd_job(sched_job);
442 struct v3d_dev *v3d = job->base.v3d;
443 - u32 batches = V3D_CORE_READ(0, (v3d->ver < 71 ? V3D_CSD_CURRENT_CFG4 :
444 + u32 batches = V3D_CORE_READ(0, (v3d->ver < V3D_GEN_71 ? V3D_CSD_CURRENT_CFG4 :
445 V3D_V7_CSD_CURRENT_CFG4));
446
447 /* If we've made progress, skip reset and let the timer get