2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "intel_device_info.h"
30 #define PLATFORM_NAME(x) [INTEL_##x] = #x
31 static const char * const platform_names
[] = {
37 PLATFORM_NAME(I915GM
),
39 PLATFORM_NAME(I945GM
),
41 PLATFORM_NAME(PINEVIEW
),
43 PLATFORM_NAME(I965GM
),
46 PLATFORM_NAME(IRONLAKE
),
47 PLATFORM_NAME(SANDYBRIDGE
),
48 PLATFORM_NAME(IVYBRIDGE
),
49 PLATFORM_NAME(VALLEYVIEW
),
50 PLATFORM_NAME(HASWELL
),
51 PLATFORM_NAME(BROADWELL
),
52 PLATFORM_NAME(CHERRYVIEW
),
53 PLATFORM_NAME(SKYLAKE
),
54 PLATFORM_NAME(BROXTON
),
55 PLATFORM_NAME(KABYLAKE
),
56 PLATFORM_NAME(GEMINILAKE
),
57 PLATFORM_NAME(COFFEELAKE
),
58 PLATFORM_NAME(CANNONLAKE
),
59 PLATFORM_NAME(ICELAKE
),
63 const char *intel_platform_name(enum intel_platform platform
)
65 BUILD_BUG_ON(ARRAY_SIZE(platform_names
) != INTEL_MAX_PLATFORMS
);
67 if (WARN_ON_ONCE(platform
>= ARRAY_SIZE(platform_names
) ||
68 platform_names
[platform
] == NULL
))
71 return platform_names
[platform
];
74 void intel_device_info_dump_flags(const struct intel_device_info
*info
,
75 struct drm_printer
*p
)
77 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
78 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
);
81 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
82 DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG
);
86 static void sseu_dump(const struct sseu_dev_info
*sseu
, struct drm_printer
*p
)
90 drm_printf(p
, "slice total: %u, mask=%04x\n",
91 hweight8(sseu
->slice_mask
), sseu
->slice_mask
);
92 drm_printf(p
, "subslice total: %u\n", sseu_subslice_total(sseu
));
93 for (s
= 0; s
< sseu
->max_slices
; s
++) {
94 drm_printf(p
, "slice%d: %u subslices, mask=%04x\n",
95 s
, hweight8(sseu
->subslice_mask
[s
]),
96 sseu
->subslice_mask
[s
]);
98 drm_printf(p
, "EU total: %u\n", sseu
->eu_total
);
99 drm_printf(p
, "EU per subslice: %u\n", sseu
->eu_per_subslice
);
100 drm_printf(p
, "has slice power gating: %s\n",
101 yesno(sseu
->has_slice_pg
));
102 drm_printf(p
, "has subslice power gating: %s\n",
103 yesno(sseu
->has_subslice_pg
));
104 drm_printf(p
, "has EU power gating: %s\n", yesno(sseu
->has_eu_pg
));
107 void intel_device_info_dump_runtime(const struct intel_runtime_info
*info
,
108 struct drm_printer
*p
)
110 sseu_dump(&info
->sseu
, p
);
112 drm_printf(p
, "CS timestamp frequency: %u kHz\n",
113 info
->cs_timestamp_frequency_khz
);
116 void intel_device_info_dump_topology(const struct sseu_dev_info
*sseu
,
117 struct drm_printer
*p
)
121 if (sseu
->max_slices
== 0) {
122 drm_printf(p
, "Unavailable\n");
126 for (s
= 0; s
< sseu
->max_slices
; s
++) {
127 drm_printf(p
, "slice%d: %u subslice(s) (0x%hhx):\n",
128 s
, hweight8(sseu
->subslice_mask
[s
]),
129 sseu
->subslice_mask
[s
]);
131 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
132 u16 enabled_eus
= sseu_get_eus(sseu
, s
, ss
);
134 drm_printf(p
, "\tsubslice%d: %u EUs (0x%hx)\n",
135 ss
, hweight16(enabled_eus
), enabled_eus
);
140 static u16
compute_eu_total(const struct sseu_dev_info
*sseu
)
144 for (i
= 0; i
< ARRAY_SIZE(sseu
->eu_mask
); i
++)
145 total
+= hweight8(sseu
->eu_mask
[i
]);
150 static void gen11_sseu_info_init(struct drm_i915_private
*dev_priv
)
152 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
154 u32 ss_en
, ss_en_mask
;
158 sseu
->max_slices
= 1;
159 sseu
->max_subslices
= 8;
160 sseu
->max_eus_per_subslice
= 8;
162 s_en
= I915_READ(GEN11_GT_SLICE_ENABLE
) & GEN11_GT_S_ENA_MASK
;
163 ss_en
= ~I915_READ(GEN11_GT_SUBSLICE_DISABLE
);
164 ss_en_mask
= BIT(sseu
->max_subslices
) - 1;
165 eu_en
= ~(I915_READ(GEN11_EU_DISABLE
) & GEN11_EU_DIS_MASK
);
167 for (s
= 0; s
< sseu
->max_slices
; s
++) {
169 int ss_idx
= sseu
->max_subslices
* s
;
172 sseu
->slice_mask
|= BIT(s
);
173 sseu
->subslice_mask
[s
] = (ss_en
>> ss_idx
) & ss_en_mask
;
174 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
175 if (sseu
->subslice_mask
[s
] & BIT(ss
))
176 sseu_set_eus(sseu
, s
, ss
, eu_en
);
180 sseu
->eu_per_subslice
= hweight8(eu_en
);
181 sseu
->eu_total
= compute_eu_total(sseu
);
183 /* ICL has no power gating restrictions. */
184 sseu
->has_slice_pg
= 1;
185 sseu
->has_subslice_pg
= 1;
189 static void gen10_sseu_info_init(struct drm_i915_private
*dev_priv
)
191 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
192 const u32 fuse2
= I915_READ(GEN8_FUSE2
);
194 const int eu_mask
= 0xff;
195 u32 subslice_mask
, eu_en
;
197 sseu
->slice_mask
= (fuse2
& GEN10_F2_S_ENA_MASK
) >>
198 GEN10_F2_S_ENA_SHIFT
;
199 sseu
->max_slices
= 6;
200 sseu
->max_subslices
= 4;
201 sseu
->max_eus_per_subslice
= 8;
203 subslice_mask
= (1 << 4) - 1;
204 subslice_mask
&= ~((fuse2
& GEN10_F2_SS_DIS_MASK
) >>
205 GEN10_F2_SS_DIS_SHIFT
);
208 * Slice0 can have up to 3 subslices, but there are only 2 in
211 sseu
->subslice_mask
[0] = subslice_mask
;
212 for (s
= 1; s
< sseu
->max_slices
; s
++)
213 sseu
->subslice_mask
[s
] = subslice_mask
& 0x3;
216 eu_en
= ~I915_READ(GEN8_EU_DISABLE0
);
217 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++)
218 sseu_set_eus(sseu
, 0, ss
, (eu_en
>> (8 * ss
)) & eu_mask
);
220 sseu_set_eus(sseu
, 1, 0, (eu_en
>> 24) & eu_mask
);
221 eu_en
= ~I915_READ(GEN8_EU_DISABLE1
);
222 sseu_set_eus(sseu
, 1, 1, eu_en
& eu_mask
);
224 sseu_set_eus(sseu
, 2, 0, (eu_en
>> 8) & eu_mask
);
225 sseu_set_eus(sseu
, 2, 1, (eu_en
>> 16) & eu_mask
);
227 sseu_set_eus(sseu
, 3, 0, (eu_en
>> 24) & eu_mask
);
228 eu_en
= ~I915_READ(GEN8_EU_DISABLE2
);
229 sseu_set_eus(sseu
, 3, 1, eu_en
& eu_mask
);
231 sseu_set_eus(sseu
, 4, 0, (eu_en
>> 8) & eu_mask
);
232 sseu_set_eus(sseu
, 4, 1, (eu_en
>> 16) & eu_mask
);
234 sseu_set_eus(sseu
, 5, 0, (eu_en
>> 24) & eu_mask
);
235 eu_en
= ~I915_READ(GEN10_EU_DISABLE3
);
236 sseu_set_eus(sseu
, 5, 1, eu_en
& eu_mask
);
238 /* Do a second pass where we mark the subslices disabled if all their
241 for (s
= 0; s
< sseu
->max_slices
; s
++) {
242 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
243 if (sseu_get_eus(sseu
, s
, ss
) == 0)
244 sseu
->subslice_mask
[s
] &= ~BIT(ss
);
248 sseu
->eu_total
= compute_eu_total(sseu
);
251 * CNL is expected to always have a uniform distribution
252 * of EU across subslices with the exception that any one
253 * EU in any one subslice may be fused off for die
256 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
257 DIV_ROUND_UP(sseu
->eu_total
,
258 sseu_subslice_total(sseu
)) : 0;
260 /* No restrictions on Power Gating */
261 sseu
->has_slice_pg
= 1;
262 sseu
->has_subslice_pg
= 1;
266 static void cherryview_sseu_info_init(struct drm_i915_private
*dev_priv
)
268 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
271 fuse
= I915_READ(CHV_FUSE_GT
);
273 sseu
->slice_mask
= BIT(0);
274 sseu
->max_slices
= 1;
275 sseu
->max_subslices
= 2;
276 sseu
->max_eus_per_subslice
= 8;
278 if (!(fuse
& CHV_FGT_DISABLE_SS0
)) {
280 ((fuse
& CHV_FGT_EU_DIS_SS0_R0_MASK
) >>
281 CHV_FGT_EU_DIS_SS0_R0_SHIFT
) |
282 (((fuse
& CHV_FGT_EU_DIS_SS0_R1_MASK
) >>
283 CHV_FGT_EU_DIS_SS0_R1_SHIFT
) << 4);
285 sseu
->subslice_mask
[0] |= BIT(0);
286 sseu_set_eus(sseu
, 0, 0, ~disabled_mask
);
289 if (!(fuse
& CHV_FGT_DISABLE_SS1
)) {
291 ((fuse
& CHV_FGT_EU_DIS_SS1_R0_MASK
) >>
292 CHV_FGT_EU_DIS_SS1_R0_SHIFT
) |
293 (((fuse
& CHV_FGT_EU_DIS_SS1_R1_MASK
) >>
294 CHV_FGT_EU_DIS_SS1_R1_SHIFT
) << 4);
296 sseu
->subslice_mask
[0] |= BIT(1);
297 sseu_set_eus(sseu
, 0, 1, ~disabled_mask
);
300 sseu
->eu_total
= compute_eu_total(sseu
);
303 * CHV expected to always have a uniform distribution of EU
306 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
307 sseu
->eu_total
/ sseu_subslice_total(sseu
) :
310 * CHV supports subslice power gating on devices with more than
311 * one subslice, and supports EU power gating on devices with
312 * more than one EU pair per subslice.
314 sseu
->has_slice_pg
= 0;
315 sseu
->has_subslice_pg
= sseu_subslice_total(sseu
) > 1;
316 sseu
->has_eu_pg
= (sseu
->eu_per_subslice
> 2);
319 static void gen9_sseu_info_init(struct drm_i915_private
*dev_priv
)
321 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
322 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
324 u32 fuse2
, eu_disable
, subslice_mask
;
325 const u8 eu_mask
= 0xff;
327 fuse2
= I915_READ(GEN8_FUSE2
);
328 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
330 /* BXT has a single slice and at most 3 subslices. */
331 sseu
->max_slices
= IS_GEN9_LP(dev_priv
) ? 1 : 3;
332 sseu
->max_subslices
= IS_GEN9_LP(dev_priv
) ? 3 : 4;
333 sseu
->max_eus_per_subslice
= 8;
336 * The subslice disable field is global, i.e. it applies
337 * to each of the enabled slices.
339 subslice_mask
= (1 << sseu
->max_subslices
) - 1;
340 subslice_mask
&= ~((fuse2
& GEN9_F2_SS_DIS_MASK
) >>
341 GEN9_F2_SS_DIS_SHIFT
);
344 * Iterate through enabled slices and subslices to
345 * count the total enabled EU.
347 for (s
= 0; s
< sseu
->max_slices
; s
++) {
348 if (!(sseu
->slice_mask
& BIT(s
)))
349 /* skip disabled slice */
352 sseu
->subslice_mask
[s
] = subslice_mask
;
354 eu_disable
= I915_READ(GEN9_EU_DISABLE(s
));
355 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
359 if (!(sseu
->subslice_mask
[s
] & BIT(ss
)))
360 /* skip disabled subslice */
363 eu_disabled_mask
= (eu_disable
>> (ss
* 8)) & eu_mask
;
365 sseu_set_eus(sseu
, s
, ss
, ~eu_disabled_mask
);
367 eu_per_ss
= sseu
->max_eus_per_subslice
-
368 hweight8(eu_disabled_mask
);
371 * Record which subslice(s) has(have) 7 EUs. we
372 * can tune the hash used to spread work among
373 * subslices if they are unbalanced.
376 sseu
->subslice_7eu
[s
] |= BIT(ss
);
380 sseu
->eu_total
= compute_eu_total(sseu
);
383 * SKL is expected to always have a uniform distribution
384 * of EU across subslices with the exception that any one
385 * EU in any one subslice may be fused off for die
386 * recovery. BXT is expected to be perfectly uniform in EU
389 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
390 DIV_ROUND_UP(sseu
->eu_total
,
391 sseu_subslice_total(sseu
)) : 0;
393 * SKL+ supports slice power gating on devices with more than
394 * one slice, and supports EU power gating on devices with
395 * more than one EU pair per subslice. BXT+ supports subslice
396 * power gating on devices with more than one subslice, and
397 * supports EU power gating on devices with more than one EU
401 !IS_GEN9_LP(dev_priv
) && hweight8(sseu
->slice_mask
) > 1;
402 sseu
->has_subslice_pg
=
403 IS_GEN9_LP(dev_priv
) && sseu_subslice_total(sseu
) > 1;
404 sseu
->has_eu_pg
= sseu
->eu_per_subslice
> 2;
406 if (IS_GEN9_LP(dev_priv
)) {
407 #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
408 info
->has_pooled_eu
= hweight8(sseu
->subslice_mask
[0]) == 3;
410 sseu
->min_eu_in_pool
= 0;
411 if (info
->has_pooled_eu
) {
412 if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
413 sseu
->min_eu_in_pool
= 3;
414 else if (IS_SS_DISABLED(1))
415 sseu
->min_eu_in_pool
= 6;
417 sseu
->min_eu_in_pool
= 9;
419 #undef IS_SS_DISABLED
423 static void broadwell_sseu_info_init(struct drm_i915_private
*dev_priv
)
425 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
427 u32 fuse2
, subslice_mask
, eu_disable
[3]; /* s_max */
429 fuse2
= I915_READ(GEN8_FUSE2
);
430 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
431 sseu
->max_slices
= 3;
432 sseu
->max_subslices
= 3;
433 sseu
->max_eus_per_subslice
= 8;
436 * The subslice disable field is global, i.e. it applies
437 * to each of the enabled slices.
439 subslice_mask
= GENMASK(sseu
->max_subslices
- 1, 0);
440 subslice_mask
&= ~((fuse2
& GEN8_F2_SS_DIS_MASK
) >>
441 GEN8_F2_SS_DIS_SHIFT
);
443 eu_disable
[0] = I915_READ(GEN8_EU_DISABLE0
) & GEN8_EU_DIS0_S0_MASK
;
444 eu_disable
[1] = (I915_READ(GEN8_EU_DISABLE0
) >> GEN8_EU_DIS0_S1_SHIFT
) |
445 ((I915_READ(GEN8_EU_DISABLE1
) & GEN8_EU_DIS1_S1_MASK
) <<
446 (32 - GEN8_EU_DIS0_S1_SHIFT
));
447 eu_disable
[2] = (I915_READ(GEN8_EU_DISABLE1
) >> GEN8_EU_DIS1_S2_SHIFT
) |
448 ((I915_READ(GEN8_EU_DISABLE2
) & GEN8_EU_DIS2_S2_MASK
) <<
449 (32 - GEN8_EU_DIS1_S2_SHIFT
));
452 * Iterate through enabled slices and subslices to
453 * count the total enabled EU.
455 for (s
= 0; s
< sseu
->max_slices
; s
++) {
456 if (!(sseu
->slice_mask
& BIT(s
)))
457 /* skip disabled slice */
460 sseu
->subslice_mask
[s
] = subslice_mask
;
462 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
466 if (!(sseu
->subslice_mask
[s
] & BIT(ss
)))
467 /* skip disabled subslice */
471 eu_disable
[s
] >> (ss
* sseu
->max_eus_per_subslice
);
473 sseu_set_eus(sseu
, s
, ss
, ~eu_disabled_mask
);
475 n_disabled
= hweight8(eu_disabled_mask
);
478 * Record which subslices have 7 EUs.
480 if (sseu
->max_eus_per_subslice
- n_disabled
== 7)
481 sseu
->subslice_7eu
[s
] |= 1 << ss
;
485 sseu
->eu_total
= compute_eu_total(sseu
);
488 * BDW is expected to always have a uniform distribution of EU across
489 * subslices with the exception that any one EU in any one subslice may
490 * be fused off for die recovery.
492 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
493 DIV_ROUND_UP(sseu
->eu_total
,
494 sseu_subslice_total(sseu
)) : 0;
497 * BDW supports slice power gating on devices with more than
500 sseu
->has_slice_pg
= hweight8(sseu
->slice_mask
) > 1;
501 sseu
->has_subslice_pg
= 0;
505 static void haswell_sseu_info_init(struct drm_i915_private
*dev_priv
)
507 struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
512 * There isn't a register to tell us how many slices/subslices. We
513 * work off the PCI-ids here.
515 switch (INTEL_INFO(dev_priv
)->gt
) {
517 MISSING_CASE(INTEL_INFO(dev_priv
)->gt
);
520 sseu
->slice_mask
= BIT(0);
521 sseu
->subslice_mask
[0] = BIT(0);
524 sseu
->slice_mask
= BIT(0);
525 sseu
->subslice_mask
[0] = BIT(0) | BIT(1);
528 sseu
->slice_mask
= BIT(0) | BIT(1);
529 sseu
->subslice_mask
[0] = BIT(0) | BIT(1);
530 sseu
->subslice_mask
[1] = BIT(0) | BIT(1);
534 sseu
->max_slices
= hweight8(sseu
->slice_mask
);
535 sseu
->max_subslices
= hweight8(sseu
->subslice_mask
[0]);
537 fuse1
= I915_READ(HSW_PAVP_FUSE1
);
538 switch ((fuse1
& HSW_F1_EU_DIS_MASK
) >> HSW_F1_EU_DIS_SHIFT
) {
540 MISSING_CASE((fuse1
& HSW_F1_EU_DIS_MASK
) >>
541 HSW_F1_EU_DIS_SHIFT
);
543 case HSW_F1_EU_DIS_10EUS
:
544 sseu
->eu_per_subslice
= 10;
546 case HSW_F1_EU_DIS_8EUS
:
547 sseu
->eu_per_subslice
= 8;
549 case HSW_F1_EU_DIS_6EUS
:
550 sseu
->eu_per_subslice
= 6;
553 sseu
->max_eus_per_subslice
= sseu
->eu_per_subslice
;
555 for (s
= 0; s
< sseu
->max_slices
; s
++) {
556 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
557 sseu_set_eus(sseu
, s
, ss
,
558 (1UL << sseu
->eu_per_subslice
) - 1);
562 sseu
->eu_total
= compute_eu_total(sseu
);
564 /* No powergating for you. */
565 sseu
->has_slice_pg
= 0;
566 sseu
->has_subslice_pg
= 0;
570 static u32
read_reference_ts_freq(struct drm_i915_private
*dev_priv
)
572 u32 ts_override
= I915_READ(GEN9_TIMESTAMP_OVERRIDE
);
573 u32 base_freq
, frac_freq
;
575 base_freq
= ((ts_override
& GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK
) >>
576 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT
) + 1;
579 frac_freq
= ((ts_override
&
580 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK
) >>
581 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT
);
582 frac_freq
= 1000 / (frac_freq
+ 1);
584 return base_freq
+ frac_freq
;
587 static u32
gen10_get_crystal_clock_freq(struct drm_i915_private
*dev_priv
,
590 u32 f19_2_mhz
= 19200;
592 u32 crystal_clock
= (rpm_config_reg
&
593 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK
) >>
594 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT
;
596 switch (crystal_clock
) {
597 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ
:
599 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ
:
602 MISSING_CASE(crystal_clock
);
607 static u32
gen11_get_crystal_clock_freq(struct drm_i915_private
*dev_priv
,
610 u32 f19_2_mhz
= 19200;
613 u32 f38_4_mhz
= 38400;
614 u32 crystal_clock
= (rpm_config_reg
&
615 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK
) >>
616 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT
;
618 switch (crystal_clock
) {
619 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ
:
621 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ
:
623 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ
:
625 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ
:
628 MISSING_CASE(crystal_clock
);
633 static u32
read_timestamp_frequency(struct drm_i915_private
*dev_priv
)
635 u32 f12_5_mhz
= 12500;
636 u32 f19_2_mhz
= 19200;
639 if (INTEL_GEN(dev_priv
) <= 4) {
642 * "The value in this register increments once every 16
643 * hclks." (through the “Clocking Configuration”
644 * (“CLKCFG”) MCHBAR register)
646 return dev_priv
->rawclk_freq
/ 16;
647 } else if (INTEL_GEN(dev_priv
) <= 8) {
650 * "The PCU TSC counts 10ns increments; this timestamp
651 * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
652 * rolling over every 1.5 hours).
655 } else if (INTEL_GEN(dev_priv
) <= 9) {
656 u32 ctc_reg
= I915_READ(CTC_MODE
);
659 if ((ctc_reg
& CTC_SOURCE_PARAMETER_MASK
) == CTC_SOURCE_DIVIDE_LOGIC
) {
660 freq
= read_reference_ts_freq(dev_priv
);
662 freq
= IS_GEN9_LP(dev_priv
) ? f19_2_mhz
: f24_mhz
;
664 /* Now figure out how the command stream's timestamp
665 * register increments from this frequency (it might
666 * increment only every few clock cycle).
668 freq
>>= 3 - ((ctc_reg
& CTC_SHIFT_PARAMETER_MASK
) >>
669 CTC_SHIFT_PARAMETER_SHIFT
);
673 } else if (INTEL_GEN(dev_priv
) <= 11) {
674 u32 ctc_reg
= I915_READ(CTC_MODE
);
677 /* First figure out the reference frequency. There are 2 ways
678 * we can compute the frequency, either through the
679 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
680 * tells us which one we should use.
682 if ((ctc_reg
& CTC_SOURCE_PARAMETER_MASK
) == CTC_SOURCE_DIVIDE_LOGIC
) {
683 freq
= read_reference_ts_freq(dev_priv
);
685 u32 rpm_config_reg
= I915_READ(RPM_CONFIG0
);
687 if (INTEL_GEN(dev_priv
) <= 10)
688 freq
= gen10_get_crystal_clock_freq(dev_priv
,
691 freq
= gen11_get_crystal_clock_freq(dev_priv
,
694 /* Now figure out how the command stream's timestamp
695 * register increments from this frequency (it might
696 * increment only every few clock cycle).
698 freq
>>= 3 - ((rpm_config_reg
&
699 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK
) >>
700 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT
);
706 MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
711 * intel_device_info_runtime_init - initialize runtime info
712 * @dev_priv: the i915 device
714 * Determine various intel_device_info fields at runtime.
716 * Use it when either:
717 * - it's judged too laborious to fill n static structures with the limit
718 * when a simple if statement does the job,
719 * - run-time checks (eg read fuse/strap registers) are needed.
721 * This function needs to be called:
722 * - after the MMIO has been setup as we are reading registers,
723 * - after the PCH has been detected,
724 * - before the first usage of the fields it can tweak.
726 void intel_device_info_runtime_init(struct drm_i915_private
*dev_priv
)
728 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
729 struct intel_runtime_info
*runtime
= RUNTIME_INFO(dev_priv
);
732 if (INTEL_GEN(dev_priv
) >= 10) {
733 for_each_pipe(dev_priv
, pipe
)
734 runtime
->num_scalers
[pipe
] = 2;
735 } else if (IS_GEN(dev_priv
, 9)) {
736 runtime
->num_scalers
[PIPE_A
] = 2;
737 runtime
->num_scalers
[PIPE_B
] = 2;
738 runtime
->num_scalers
[PIPE_C
] = 1;
741 BUILD_BUG_ON(I915_NUM_ENGINES
> BITS_PER_TYPE(intel_ring_mask_t
));
743 if (IS_GEN(dev_priv
, 11))
744 for_each_pipe(dev_priv
, pipe
)
745 runtime
->num_sprites
[pipe
] = 6;
746 else if (IS_GEN(dev_priv
, 10) || IS_GEMINILAKE(dev_priv
))
747 for_each_pipe(dev_priv
, pipe
)
748 runtime
->num_sprites
[pipe
] = 3;
749 else if (IS_BROXTON(dev_priv
)) {
751 * Skylake and Broxton currently don't expose the topmost plane as its
752 * use is exclusive with the legacy cursor and we only want to expose
753 * one of those, not both. Until we can safely expose the topmost plane
754 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
755 * we don't expose the topmost plane at all to prevent ABI breakage
759 runtime
->num_sprites
[PIPE_A
] = 2;
760 runtime
->num_sprites
[PIPE_B
] = 2;
761 runtime
->num_sprites
[PIPE_C
] = 1;
762 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
763 for_each_pipe(dev_priv
, pipe
)
764 runtime
->num_sprites
[pipe
] = 2;
765 } else if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
)) {
766 for_each_pipe(dev_priv
, pipe
)
767 runtime
->num_sprites
[pipe
] = 1;
770 if (i915_modparams
.disable_display
) {
771 DRM_INFO("Display disabled (module parameter)\n");
773 } else if (HAS_DISPLAY(dev_priv
) &&
774 (IS_GEN_RANGE(dev_priv
, 7, 8)) &&
775 HAS_PCH_SPLIT(dev_priv
)) {
776 u32 fuse_strap
= I915_READ(FUSE_STRAP
);
777 u32 sfuse_strap
= I915_READ(SFUSE_STRAP
);
780 * SFUSE_STRAP is supposed to have a bit signalling the display
781 * is fused off. Unfortunately it seems that, at least in
782 * certain cases, fused off display means that PCH display
783 * reads don't land anywhere. In that case, we read 0s.
785 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
786 * should be set when taking over after the firmware.
788 if (fuse_strap
& ILK_INTERNAL_DISPLAY_DISABLE
||
789 sfuse_strap
& SFUSE_STRAP_DISPLAY_DISABLED
||
790 (HAS_PCH_CPT(dev_priv
) &&
791 !(sfuse_strap
& SFUSE_STRAP_FUSE_LOCK
))) {
792 DRM_INFO("Display fused off, disabling\n");
794 } else if (fuse_strap
& IVB_PIPE_C_DISABLE
) {
795 DRM_INFO("PipeC fused off\n");
796 info
->num_pipes
-= 1;
798 } else if (HAS_DISPLAY(dev_priv
) && INTEL_GEN(dev_priv
) >= 9) {
799 u32 dfsm
= I915_READ(SKL_DFSM
);
800 u8 disabled_mask
= 0;
804 if (dfsm
& SKL_DFSM_PIPE_A_DISABLE
)
805 disabled_mask
|= BIT(PIPE_A
);
806 if (dfsm
& SKL_DFSM_PIPE_B_DISABLE
)
807 disabled_mask
|= BIT(PIPE_B
);
808 if (dfsm
& SKL_DFSM_PIPE_C_DISABLE
)
809 disabled_mask
|= BIT(PIPE_C
);
811 num_bits
= hweight8(disabled_mask
);
813 switch (disabled_mask
) {
816 case BIT(PIPE_A
) | BIT(PIPE_B
):
817 case BIT(PIPE_A
) | BIT(PIPE_C
):
824 if (num_bits
> info
->num_pipes
|| invalid
)
825 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
828 info
->num_pipes
-= num_bits
;
831 /* Initialize slice/subslice/EU info */
832 if (IS_HASWELL(dev_priv
))
833 haswell_sseu_info_init(dev_priv
);
834 else if (IS_CHERRYVIEW(dev_priv
))
835 cherryview_sseu_info_init(dev_priv
);
836 else if (IS_BROADWELL(dev_priv
))
837 broadwell_sseu_info_init(dev_priv
);
838 else if (IS_GEN(dev_priv
, 9))
839 gen9_sseu_info_init(dev_priv
);
840 else if (IS_GEN(dev_priv
, 10))
841 gen10_sseu_info_init(dev_priv
);
842 else if (INTEL_GEN(dev_priv
) >= 11)
843 gen11_sseu_info_init(dev_priv
);
845 if (IS_GEN(dev_priv
, 6) && intel_vtd_active()) {
846 DRM_INFO("Disabling ppGTT for VT-d support\n");
847 info
->ppgtt
= INTEL_PPGTT_NONE
;
850 /* Initialize command stream timestamp frequency */
851 runtime
->cs_timestamp_frequency_khz
= read_timestamp_frequency(dev_priv
);
854 void intel_driver_caps_print(const struct intel_driver_caps
*caps
,
855 struct drm_printer
*p
)
857 drm_printf(p
, "Has logical contexts? %s\n",
858 yesno(caps
->has_logical_contexts
));
859 drm_printf(p
, "scheduler: %x\n", caps
->scheduler
);
863 * Determine which engines are fused off in our particular hardware. Since the
864 * fuse register is in the blitter powerwell, we need forcewake to be ready at
865 * this point (but later we need to prune the forcewake domains for engines that
866 * are indeed fused off).
868 void intel_device_info_init_mmio(struct drm_i915_private
*dev_priv
)
870 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
871 unsigned int logical_vdbox
= 0;
875 if (INTEL_GEN(dev_priv
) < 11)
878 media_fuse
= ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE
);
880 RUNTIME_INFO(dev_priv
)->vdbox_enable
= media_fuse
& GEN11_GT_VDBOX_DISABLE_MASK
;
881 RUNTIME_INFO(dev_priv
)->vebox_enable
= (media_fuse
& GEN11_GT_VEBOX_DISABLE_MASK
) >>
882 GEN11_GT_VEBOX_DISABLE_SHIFT
;
884 DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv
)->vdbox_enable
);
885 for (i
= 0; i
< I915_MAX_VCS
; i
++) {
886 if (!HAS_ENGINE(dev_priv
, _VCS(i
)))
889 if (!(BIT(i
) & RUNTIME_INFO(dev_priv
)->vdbox_enable
)) {
890 info
->ring_mask
&= ~ENGINE_MASK(_VCS(i
));
891 DRM_DEBUG_DRIVER("vcs%u fused off\n", i
);
896 * In Gen11, only even numbered logical VDBOXes are
897 * hooked up to an SFC (Scaler & Format Converter) unit.
899 if (logical_vdbox
++ % 2 == 0)
900 RUNTIME_INFO(dev_priv
)->vdbox_sfc_access
|= BIT(i
);
903 DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv
)->vebox_enable
);
904 for (i
= 0; i
< I915_MAX_VECS
; i
++) {
905 if (!HAS_ENGINE(dev_priv
, _VECS(i
)))
908 if (!(BIT(i
) & RUNTIME_INFO(dev_priv
)->vebox_enable
)) {
909 info
->ring_mask
&= ~ENGINE_MASK(_VECS(i
));
910 DRM_DEBUG_DRIVER("vecs%u fused off\n", i
);