2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_pm.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_reset.h"
38 #include "gt/intel_rc6.h"
39 #include "gt/intel_rps.h"
41 #include "i915_debugfs.h"
42 #include "i915_debugfs_params.h"
44 #include "i915_trace.h"
46 #include "intel_sideband.h"
48 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
50 return to_i915(node
->minor
->dev
);
53 static int i915_capabilities(struct seq_file
*m
, void *data
)
55 struct drm_i915_private
*i915
= node_to_i915(m
->private);
56 struct drm_printer p
= drm_seq_file_printer(m
);
58 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(i915
));
60 intel_device_info_print_static(INTEL_INFO(i915
), &p
);
61 intel_device_info_print_runtime(RUNTIME_INFO(i915
), &p
);
62 intel_driver_caps_print(&i915
->caps
, &p
);
64 kernel_param_lock(THIS_MODULE
);
65 i915_params_dump(&i915_modparams
, &p
);
66 kernel_param_unlock(THIS_MODULE
);
71 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
73 switch (i915_gem_object_get_tiling(obj
)) {
75 case I915_TILING_NONE
: return ' ';
76 case I915_TILING_X
: return 'X';
77 case I915_TILING_Y
: return 'Y';
81 static char get_global_flag(struct drm_i915_gem_object
*obj
)
83 return READ_ONCE(obj
->userfault_count
) ? 'g' : ' ';
86 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
88 return obj
->mm
.mapping
? 'M' : ' ';
92 stringify_page_sizes(unsigned int page_sizes
, char *buf
, size_t len
)
99 case I915_GTT_PAGE_SIZE_4K
:
101 case I915_GTT_PAGE_SIZE_64K
:
103 case I915_GTT_PAGE_SIZE_2M
:
109 if (page_sizes
& I915_GTT_PAGE_SIZE_2M
)
110 x
+= snprintf(buf
+ x
, len
- x
, "2M, ");
111 if (page_sizes
& I915_GTT_PAGE_SIZE_64K
)
112 x
+= snprintf(buf
+ x
, len
- x
, "64K, ");
113 if (page_sizes
& I915_GTT_PAGE_SIZE_4K
)
114 x
+= snprintf(buf
+ x
, len
- x
, "4K, ");
122 i915_debugfs_describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
124 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
125 struct intel_engine_cs
*engine
;
126 struct i915_vma
*vma
;
129 seq_printf(m
, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
131 get_tiling_flag(obj
),
132 get_global_flag(obj
),
133 get_pin_mapped_flag(obj
),
134 obj
->base
.size
/ 1024,
137 i915_cache_level_str(dev_priv
, obj
->cache_level
),
138 obj
->mm
.dirty
? " dirty" : "",
139 obj
->mm
.madv
== I915_MADV_DONTNEED
? " purgeable" : "");
141 seq_printf(m
, " (name: %d)", obj
->base
.name
);
143 spin_lock(&obj
->vma
.lock
);
144 list_for_each_entry(vma
, &obj
->vma
.list
, obj_link
) {
145 if (!drm_mm_node_allocated(&vma
->node
))
148 spin_unlock(&obj
->vma
.lock
);
150 if (i915_vma_is_pinned(vma
))
153 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
154 i915_vma_is_ggtt(vma
) ? "g" : "pp",
155 vma
->node
.start
, vma
->node
.size
,
156 stringify_page_sizes(vma
->page_sizes
.gtt
, NULL
, 0));
157 if (i915_vma_is_ggtt(vma
)) {
158 switch (vma
->ggtt_view
.type
) {
159 case I915_GGTT_VIEW_NORMAL
:
160 seq_puts(m
, ", normal");
163 case I915_GGTT_VIEW_PARTIAL
:
164 seq_printf(m
, ", partial [%08llx+%x]",
165 vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
,
166 vma
->ggtt_view
.partial
.size
<< PAGE_SHIFT
);
169 case I915_GGTT_VIEW_ROTATED
:
170 seq_printf(m
, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
171 vma
->ggtt_view
.rotated
.plane
[0].width
,
172 vma
->ggtt_view
.rotated
.plane
[0].height
,
173 vma
->ggtt_view
.rotated
.plane
[0].stride
,
174 vma
->ggtt_view
.rotated
.plane
[0].offset
,
175 vma
->ggtt_view
.rotated
.plane
[1].width
,
176 vma
->ggtt_view
.rotated
.plane
[1].height
,
177 vma
->ggtt_view
.rotated
.plane
[1].stride
,
178 vma
->ggtt_view
.rotated
.plane
[1].offset
);
181 case I915_GGTT_VIEW_REMAPPED
:
182 seq_printf(m
, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
183 vma
->ggtt_view
.remapped
.plane
[0].width
,
184 vma
->ggtt_view
.remapped
.plane
[0].height
,
185 vma
->ggtt_view
.remapped
.plane
[0].stride
,
186 vma
->ggtt_view
.remapped
.plane
[0].offset
,
187 vma
->ggtt_view
.remapped
.plane
[1].width
,
188 vma
->ggtt_view
.remapped
.plane
[1].height
,
189 vma
->ggtt_view
.remapped
.plane
[1].stride
,
190 vma
->ggtt_view
.remapped
.plane
[1].offset
);
194 MISSING_CASE(vma
->ggtt_view
.type
);
199 seq_printf(m
, " , fence: %d", vma
->fence
->id
);
202 spin_lock(&obj
->vma
.lock
);
204 spin_unlock(&obj
->vma
.lock
);
206 seq_printf(m
, " (pinned x %d)", pin_count
);
208 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
209 if (i915_gem_object_is_framebuffer(obj
))
210 seq_printf(m
, " (fb)");
212 engine
= i915_gem_object_last_write_engine(obj
);
214 seq_printf(m
, " (%s)", engine
->name
);
218 struct i915_address_space
*vm
;
221 u64 active
, inactive
;
225 static int per_file_stats(int id
, void *ptr
, void *data
)
227 struct drm_i915_gem_object
*obj
= ptr
;
228 struct file_stats
*stats
= data
;
229 struct i915_vma
*vma
;
231 if (!kref_get_unless_zero(&obj
->base
.refcount
))
235 stats
->total
+= obj
->base
.size
;
237 spin_lock(&obj
->vma
.lock
);
239 for_each_ggtt_vma(vma
, obj
) {
240 if (!drm_mm_node_allocated(&vma
->node
))
243 if (i915_vma_is_active(vma
))
244 stats
->active
+= vma
->node
.size
;
246 stats
->inactive
+= vma
->node
.size
;
248 if (i915_vma_is_closed(vma
))
249 stats
->closed
+= vma
->node
.size
;
252 struct rb_node
*p
= obj
->vma
.tree
.rb_node
;
257 vma
= rb_entry(p
, typeof(*vma
), obj_node
);
258 cmp
= i915_vma_compare(vma
, stats
->vm
, NULL
);
260 if (drm_mm_node_allocated(&vma
->node
)) {
261 if (i915_vma_is_active(vma
))
262 stats
->active
+= vma
->node
.size
;
264 stats
->inactive
+= vma
->node
.size
;
266 if (i915_vma_is_closed(vma
))
267 stats
->closed
+= vma
->node
.size
;
277 spin_unlock(&obj
->vma
.lock
);
279 i915_gem_object_put(obj
);
283 #define print_file_stats(m, name, stats) do { \
285 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
294 static void print_context_stats(struct seq_file
*m
,
295 struct drm_i915_private
*i915
)
297 struct file_stats kstats
= {};
298 struct i915_gem_context
*ctx
, *cn
;
300 spin_lock(&i915
->gem
.contexts
.lock
);
301 list_for_each_entry_safe(ctx
, cn
, &i915
->gem
.contexts
.list
, link
) {
302 struct i915_gem_engines_iter it
;
303 struct intel_context
*ce
;
305 if (!kref_get_unless_zero(&ctx
->ref
))
308 spin_unlock(&i915
->gem
.contexts
.lock
);
310 for_each_gem_engine(ce
,
311 i915_gem_context_lock_engines(ctx
), it
) {
312 if (intel_context_pin_if_active(ce
)) {
316 ce
->state
->obj
, &kstats
);
317 per_file_stats(0, ce
->ring
->vma
->obj
, &kstats
);
319 intel_context_unpin(ce
);
322 i915_gem_context_unlock_engines(ctx
);
324 if (!IS_ERR_OR_NULL(ctx
->file_priv
)) {
325 struct file_stats stats
= {
326 .vm
= rcu_access_pointer(ctx
->vm
),
328 struct drm_file
*file
= ctx
->file_priv
->file
;
329 struct task_struct
*task
;
333 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
337 task
= pid_task(ctx
->pid
?: file
->pid
, PIDTYPE_PID
);
338 snprintf(name
, sizeof(name
), "%s",
339 task
? task
->comm
: "<unknown>");
342 print_file_stats(m
, name
, stats
);
345 spin_lock(&i915
->gem
.contexts
.lock
);
346 list_safe_reset_next(ctx
, cn
, link
);
347 i915_gem_context_put(ctx
);
349 spin_unlock(&i915
->gem
.contexts
.lock
);
351 print_file_stats(m
, "[k]contexts", kstats
);
354 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
356 struct drm_i915_private
*i915
= node_to_i915(m
->private);
357 struct intel_memory_region
*mr
;
358 enum intel_region_id id
;
360 seq_printf(m
, "%u shrinkable [%u free] objects, %llu bytes\n",
361 i915
->mm
.shrink_count
,
362 atomic_read(&i915
->mm
.free_count
),
363 i915
->mm
.shrink_memory
);
364 for_each_memory_region(mr
, i915
, id
)
365 seq_printf(m
, "%s: total:%pa, available:%pa bytes\n",
366 mr
->name
, &mr
->total
, &mr
->avail
);
369 print_context_stats(m
, i915
);
374 static void gen8_display_interrupt_info(struct seq_file
*m
)
376 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
379 for_each_pipe(dev_priv
, pipe
) {
380 enum intel_display_power_domain power_domain
;
381 intel_wakeref_t wakeref
;
383 power_domain
= POWER_DOMAIN_PIPE(pipe
);
384 wakeref
= intel_display_power_get_if_enabled(dev_priv
,
387 seq_printf(m
, "Pipe %c power disabled\n",
391 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
393 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
394 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
396 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
397 seq_printf(m
, "Pipe %c IER:\t%08x\n",
399 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
401 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
404 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
405 I915_READ(GEN8_DE_PORT_IMR
));
406 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
407 I915_READ(GEN8_DE_PORT_IIR
));
408 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
409 I915_READ(GEN8_DE_PORT_IER
));
411 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
412 I915_READ(GEN8_DE_MISC_IMR
));
413 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
414 I915_READ(GEN8_DE_MISC_IIR
));
415 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
416 I915_READ(GEN8_DE_MISC_IER
));
418 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
419 I915_READ(GEN8_PCU_IMR
));
420 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
421 I915_READ(GEN8_PCU_IIR
));
422 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
423 I915_READ(GEN8_PCU_IER
));
426 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
428 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
429 struct intel_engine_cs
*engine
;
430 intel_wakeref_t wakeref
;
433 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
435 if (IS_CHERRYVIEW(dev_priv
)) {
436 intel_wakeref_t pref
;
438 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
439 I915_READ(GEN8_MASTER_IRQ
));
441 seq_printf(m
, "Display IER:\t%08x\n",
443 seq_printf(m
, "Display IIR:\t%08x\n",
445 seq_printf(m
, "Display IIR_RW:\t%08x\n",
446 I915_READ(VLV_IIR_RW
));
447 seq_printf(m
, "Display IMR:\t%08x\n",
449 for_each_pipe(dev_priv
, pipe
) {
450 enum intel_display_power_domain power_domain
;
452 power_domain
= POWER_DOMAIN_PIPE(pipe
);
453 pref
= intel_display_power_get_if_enabled(dev_priv
,
456 seq_printf(m
, "Pipe %c power disabled\n",
461 seq_printf(m
, "Pipe %c stat:\t%08x\n",
463 I915_READ(PIPESTAT(pipe
)));
465 intel_display_power_put(dev_priv
, power_domain
, pref
);
468 pref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
469 seq_printf(m
, "Port hotplug:\t%08x\n",
470 I915_READ(PORT_HOTPLUG_EN
));
471 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
472 I915_READ(VLV_DPFLIPSTAT
));
473 seq_printf(m
, "DPINVGTT:\t%08x\n",
474 I915_READ(DPINVGTT
));
475 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
, pref
);
477 for (i
= 0; i
< 4; i
++) {
478 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
479 i
, I915_READ(GEN8_GT_IMR(i
)));
480 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
481 i
, I915_READ(GEN8_GT_IIR(i
)));
482 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
483 i
, I915_READ(GEN8_GT_IER(i
)));
486 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
487 I915_READ(GEN8_PCU_IMR
));
488 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
489 I915_READ(GEN8_PCU_IIR
));
490 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
491 I915_READ(GEN8_PCU_IER
));
492 } else if (INTEL_GEN(dev_priv
) >= 11) {
493 seq_printf(m
, "Master Interrupt Control: %08x\n",
494 I915_READ(GEN11_GFX_MSTR_IRQ
));
496 seq_printf(m
, "Render/Copy Intr Enable: %08x\n",
497 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE
));
498 seq_printf(m
, "VCS/VECS Intr Enable: %08x\n",
499 I915_READ(GEN11_VCS_VECS_INTR_ENABLE
));
500 seq_printf(m
, "GUC/SG Intr Enable:\t %08x\n",
501 I915_READ(GEN11_GUC_SG_INTR_ENABLE
));
502 seq_printf(m
, "GPM/WGBOXPERF Intr Enable: %08x\n",
503 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
));
504 seq_printf(m
, "Crypto Intr Enable:\t %08x\n",
505 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE
));
506 seq_printf(m
, "GUnit/CSME Intr Enable:\t %08x\n",
507 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE
));
509 seq_printf(m
, "Display Interrupt Control:\t%08x\n",
510 I915_READ(GEN11_DISPLAY_INT_CTL
));
512 gen8_display_interrupt_info(m
);
513 } else if (INTEL_GEN(dev_priv
) >= 8) {
514 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
515 I915_READ(GEN8_MASTER_IRQ
));
517 for (i
= 0; i
< 4; i
++) {
518 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
519 i
, I915_READ(GEN8_GT_IMR(i
)));
520 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
521 i
, I915_READ(GEN8_GT_IIR(i
)));
522 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
523 i
, I915_READ(GEN8_GT_IER(i
)));
526 gen8_display_interrupt_info(m
);
527 } else if (IS_VALLEYVIEW(dev_priv
)) {
528 intel_wakeref_t pref
;
530 seq_printf(m
, "Display IER:\t%08x\n",
532 seq_printf(m
, "Display IIR:\t%08x\n",
534 seq_printf(m
, "Display IIR_RW:\t%08x\n",
535 I915_READ(VLV_IIR_RW
));
536 seq_printf(m
, "Display IMR:\t%08x\n",
538 for_each_pipe(dev_priv
, pipe
) {
539 enum intel_display_power_domain power_domain
;
541 power_domain
= POWER_DOMAIN_PIPE(pipe
);
542 pref
= intel_display_power_get_if_enabled(dev_priv
,
545 seq_printf(m
, "Pipe %c power disabled\n",
550 seq_printf(m
, "Pipe %c stat:\t%08x\n",
552 I915_READ(PIPESTAT(pipe
)));
553 intel_display_power_put(dev_priv
, power_domain
, pref
);
556 seq_printf(m
, "Master IER:\t%08x\n",
557 I915_READ(VLV_MASTER_IER
));
559 seq_printf(m
, "Render IER:\t%08x\n",
561 seq_printf(m
, "Render IIR:\t%08x\n",
563 seq_printf(m
, "Render IMR:\t%08x\n",
566 seq_printf(m
, "PM IER:\t\t%08x\n",
567 I915_READ(GEN6_PMIER
));
568 seq_printf(m
, "PM IIR:\t\t%08x\n",
569 I915_READ(GEN6_PMIIR
));
570 seq_printf(m
, "PM IMR:\t\t%08x\n",
571 I915_READ(GEN6_PMIMR
));
573 pref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
574 seq_printf(m
, "Port hotplug:\t%08x\n",
575 I915_READ(PORT_HOTPLUG_EN
));
576 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
577 I915_READ(VLV_DPFLIPSTAT
));
578 seq_printf(m
, "DPINVGTT:\t%08x\n",
579 I915_READ(DPINVGTT
));
580 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
, pref
);
582 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
583 seq_printf(m
, "Interrupt enable: %08x\n",
584 I915_READ(GEN2_IER
));
585 seq_printf(m
, "Interrupt identity: %08x\n",
586 I915_READ(GEN2_IIR
));
587 seq_printf(m
, "Interrupt mask: %08x\n",
588 I915_READ(GEN2_IMR
));
589 for_each_pipe(dev_priv
, pipe
)
590 seq_printf(m
, "Pipe %c stat: %08x\n",
592 I915_READ(PIPESTAT(pipe
)));
594 seq_printf(m
, "North Display Interrupt enable: %08x\n",
596 seq_printf(m
, "North Display Interrupt identity: %08x\n",
598 seq_printf(m
, "North Display Interrupt mask: %08x\n",
600 seq_printf(m
, "South Display Interrupt enable: %08x\n",
602 seq_printf(m
, "South Display Interrupt identity: %08x\n",
604 seq_printf(m
, "South Display Interrupt mask: %08x\n",
606 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
608 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
610 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
614 if (INTEL_GEN(dev_priv
) >= 11) {
615 seq_printf(m
, "RCS Intr Mask:\t %08x\n",
616 I915_READ(GEN11_RCS0_RSVD_INTR_MASK
));
617 seq_printf(m
, "BCS Intr Mask:\t %08x\n",
618 I915_READ(GEN11_BCS_RSVD_INTR_MASK
));
619 seq_printf(m
, "VCS0/VCS1 Intr Mask:\t %08x\n",
620 I915_READ(GEN11_VCS0_VCS1_INTR_MASK
));
621 seq_printf(m
, "VCS2/VCS3 Intr Mask:\t %08x\n",
622 I915_READ(GEN11_VCS2_VCS3_INTR_MASK
));
623 seq_printf(m
, "VECS0/VECS1 Intr Mask:\t %08x\n",
624 I915_READ(GEN11_VECS0_VECS1_INTR_MASK
));
625 seq_printf(m
, "GUC/SG Intr Mask:\t %08x\n",
626 I915_READ(GEN11_GUC_SG_INTR_MASK
));
627 seq_printf(m
, "GPM/WGBOXPERF Intr Mask: %08x\n",
628 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
));
629 seq_printf(m
, "Crypto Intr Mask:\t %08x\n",
630 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK
));
631 seq_printf(m
, "Gunit/CSME Intr Mask:\t %08x\n",
632 I915_READ(GEN11_GUNIT_CSME_INTR_MASK
));
634 } else if (INTEL_GEN(dev_priv
) >= 6) {
635 for_each_uabi_engine(engine
, dev_priv
) {
637 "Graphics Interrupt mask (%s): %08x\n",
638 engine
->name
, ENGINE_READ(engine
, RING_IMR
));
642 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
647 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
649 struct drm_i915_private
*i915
= node_to_i915(m
->private);
652 seq_printf(m
, "Total fences = %d\n", i915
->ggtt
.num_fences
);
655 for (i
= 0; i
< i915
->ggtt
.num_fences
; i
++) {
656 struct i915_fence_reg
*reg
= &i915
->ggtt
.fence_regs
[i
];
657 struct i915_vma
*vma
= reg
->vma
;
659 seq_printf(m
, "Fence %d, pin count = %d, object = ",
660 i
, atomic_read(®
->pin_count
));
662 seq_puts(m
, "unused");
664 i915_debugfs_describe_obj(m
, vma
->obj
);
672 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
673 static ssize_t
gpu_state_read(struct file
*file
, char __user
*ubuf
,
674 size_t count
, loff_t
*pos
)
676 struct i915_gpu_coredump
*error
;
680 error
= file
->private_data
;
684 /* Bounce buffer required because of kernfs __user API convenience. */
685 buf
= kmalloc(count
, GFP_KERNEL
);
689 ret
= i915_gpu_coredump_copy_to_buffer(error
, buf
, *pos
, count
);
693 if (!copy_to_user(ubuf
, buf
, ret
))
703 static int gpu_state_release(struct inode
*inode
, struct file
*file
)
705 i915_gpu_coredump_put(file
->private_data
);
709 static int i915_gpu_info_open(struct inode
*inode
, struct file
*file
)
711 struct drm_i915_private
*i915
= inode
->i_private
;
712 struct i915_gpu_coredump
*gpu
;
713 intel_wakeref_t wakeref
;
716 with_intel_runtime_pm(&i915
->runtime_pm
, wakeref
)
717 gpu
= i915_gpu_coredump(i915
);
721 file
->private_data
= gpu
;
725 static const struct file_operations i915_gpu_info_fops
= {
726 .owner
= THIS_MODULE
,
727 .open
= i915_gpu_info_open
,
728 .read
= gpu_state_read
,
729 .llseek
= default_llseek
,
730 .release
= gpu_state_release
,
734 i915_error_state_write(struct file
*filp
,
735 const char __user
*ubuf
,
739 struct i915_gpu_coredump
*error
= filp
->private_data
;
744 drm_dbg(&error
->i915
->drm
, "Resetting error state\n");
745 i915_reset_error_state(error
->i915
);
750 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
752 struct i915_gpu_coredump
*error
;
754 error
= i915_first_error_state(inode
->i_private
);
756 return PTR_ERR(error
);
758 file
->private_data
= error
;
762 static const struct file_operations i915_error_state_fops
= {
763 .owner
= THIS_MODULE
,
764 .open
= i915_error_state_open
,
765 .read
= gpu_state_read
,
766 .write
= i915_error_state_write
,
767 .llseek
= default_llseek
,
768 .release
= gpu_state_release
,
772 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
774 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
775 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
776 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
777 intel_wakeref_t wakeref
;
780 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
782 if (IS_GEN(dev_priv
, 5)) {
783 u16 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
784 u16 rgvstat
= intel_uncore_read16(uncore
, MEMSTAT_ILK
);
786 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
787 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
788 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
790 seq_printf(m
, "Current P-state: %d\n",
791 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
792 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
793 u32 rpmodectl
, freq_sts
;
795 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
796 seq_printf(m
, "Video Turbo Mode: %s\n",
797 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
798 seq_printf(m
, "HW control enabled: %s\n",
799 yesno(rpmodectl
& GEN6_RP_ENABLE
));
800 seq_printf(m
, "SW control enabled: %s\n",
801 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
802 GEN6_RP_MEDIA_SW_MODE
));
804 vlv_punit_get(dev_priv
);
805 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
806 vlv_punit_put(dev_priv
);
808 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
809 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
811 seq_printf(m
, "actual GPU freq: %d MHz\n",
812 intel_gpu_freq(rps
, (freq_sts
>> 8) & 0xff));
814 seq_printf(m
, "current GPU freq: %d MHz\n",
815 intel_gpu_freq(rps
, rps
->cur_freq
));
817 seq_printf(m
, "max GPU freq: %d MHz\n",
818 intel_gpu_freq(rps
, rps
->max_freq
));
820 seq_printf(m
, "min GPU freq: %d MHz\n",
821 intel_gpu_freq(rps
, rps
->min_freq
));
823 seq_printf(m
, "idle GPU freq: %d MHz\n",
824 intel_gpu_freq(rps
, rps
->idle_freq
));
827 "efficient (RPe) frequency: %d MHz\n",
828 intel_gpu_freq(rps
, rps
->efficient_freq
));
829 } else if (INTEL_GEN(dev_priv
) >= 6) {
833 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
834 u32 rpstat
, cagf
, reqf
;
835 u32 rpupei
, rpcurup
, rpprevup
;
836 u32 rpdownei
, rpcurdown
, rpprevdown
;
837 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
840 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
841 if (IS_GEN9_LP(dev_priv
)) {
842 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
843 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
845 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
846 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
849 /* RPSTAT1 is in the GT power well */
850 intel_uncore_forcewake_get(&dev_priv
->uncore
, FORCEWAKE_ALL
);
852 reqf
= I915_READ(GEN6_RPNSWREQ
);
853 if (INTEL_GEN(dev_priv
) >= 9)
856 reqf
&= ~GEN6_TURBO_DISABLE
;
857 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
862 reqf
= intel_gpu_freq(rps
, reqf
);
864 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
865 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
866 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
868 rpstat
= I915_READ(GEN6_RPSTAT1
);
869 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
870 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
871 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
872 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
873 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
874 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
875 cagf
= intel_rps_read_actual_frequency(rps
);
877 intel_uncore_forcewake_put(&dev_priv
->uncore
, FORCEWAKE_ALL
);
879 if (INTEL_GEN(dev_priv
) >= 11) {
880 pm_ier
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
);
881 pm_imr
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
);
883 * The equivalent to the PM ISR & IIR cannot be read
884 * without affecting the current state of the system
888 } else if (INTEL_GEN(dev_priv
) >= 8) {
889 pm_ier
= I915_READ(GEN8_GT_IER(2));
890 pm_imr
= I915_READ(GEN8_GT_IMR(2));
891 pm_isr
= I915_READ(GEN8_GT_ISR(2));
892 pm_iir
= I915_READ(GEN8_GT_IIR(2));
894 pm_ier
= I915_READ(GEN6_PMIER
);
895 pm_imr
= I915_READ(GEN6_PMIMR
);
896 pm_isr
= I915_READ(GEN6_PMISR
);
897 pm_iir
= I915_READ(GEN6_PMIIR
);
899 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
901 seq_printf(m
, "Video Turbo Mode: %s\n",
902 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
903 seq_printf(m
, "HW control enabled: %s\n",
904 yesno(rpmodectl
& GEN6_RP_ENABLE
));
905 seq_printf(m
, "SW control enabled: %s\n",
906 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
907 GEN6_RP_MEDIA_SW_MODE
));
909 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
910 pm_ier
, pm_imr
, pm_mask
);
911 if (INTEL_GEN(dev_priv
) <= 10)
912 seq_printf(m
, "PM ISR=0x%08x IIR=0x%08x\n",
914 seq_printf(m
, "pm_intrmsk_mbz: 0x%08x\n",
915 rps
->pm_intrmsk_mbz
);
916 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
917 seq_printf(m
, "Render p-state ratio: %d\n",
918 (gt_perf_status
& (INTEL_GEN(dev_priv
) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
919 seq_printf(m
, "Render p-state VID: %d\n",
920 gt_perf_status
& 0xff);
921 seq_printf(m
, "Render p-state limit: %d\n",
922 rp_state_limits
& 0xff);
923 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
924 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
925 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
926 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
927 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
928 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
929 seq_printf(m
, "RP CUR UP EI: %d (%dus)\n",
930 rpupei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpupei
));
931 seq_printf(m
, "RP CUR UP: %d (%dus)\n",
932 rpcurup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurup
));
933 seq_printf(m
, "RP PREV UP: %d (%dus)\n",
934 rpprevup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevup
));
935 seq_printf(m
, "Up threshold: %d%%\n",
936 rps
->power
.up_threshold
);
938 seq_printf(m
, "RP CUR DOWN EI: %d (%dus)\n",
939 rpdownei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpdownei
));
940 seq_printf(m
, "RP CUR DOWN: %d (%dus)\n",
941 rpcurdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurdown
));
942 seq_printf(m
, "RP PREV DOWN: %d (%dus)\n",
943 rpprevdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevdown
));
944 seq_printf(m
, "Down threshold: %d%%\n",
945 rps
->power
.down_threshold
);
947 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 0 :
948 rp_state_cap
>> 16) & 0xff;
949 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
950 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
951 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
952 intel_gpu_freq(rps
, max_freq
));
954 max_freq
= (rp_state_cap
& 0xff00) >> 8;
955 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
956 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
957 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
958 intel_gpu_freq(rps
, max_freq
));
960 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 16 :
961 rp_state_cap
>> 0) & 0xff;
962 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
963 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
964 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
965 intel_gpu_freq(rps
, max_freq
));
966 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
967 intel_gpu_freq(rps
, rps
->max_freq
));
969 seq_printf(m
, "Current freq: %d MHz\n",
970 intel_gpu_freq(rps
, rps
->cur_freq
));
971 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
972 seq_printf(m
, "Idle freq: %d MHz\n",
973 intel_gpu_freq(rps
, rps
->idle_freq
));
974 seq_printf(m
, "Min freq: %d MHz\n",
975 intel_gpu_freq(rps
, rps
->min_freq
));
976 seq_printf(m
, "Boost freq: %d MHz\n",
977 intel_gpu_freq(rps
, rps
->boost_freq
));
978 seq_printf(m
, "Max freq: %d MHz\n",
979 intel_gpu_freq(rps
, rps
->max_freq
));
981 "efficient (RPe) frequency: %d MHz\n",
982 intel_gpu_freq(rps
, rps
->efficient_freq
));
984 seq_puts(m
, "no P-state info available\n");
987 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk
.hw
.cdclk
);
988 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
989 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
991 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
995 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
997 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
998 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
999 unsigned int max_gpu_freq
, min_gpu_freq
;
1000 intel_wakeref_t wakeref
;
1001 int gpu_freq
, ia_freq
;
1003 if (!HAS_LLC(dev_priv
))
1006 min_gpu_freq
= rps
->min_freq
;
1007 max_gpu_freq
= rps
->max_freq
;
1008 if (IS_GEN9_BC(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) {
1009 /* Convert GT frequency to 50 HZ units */
1010 min_gpu_freq
/= GEN9_FREQ_SCALER
;
1011 max_gpu_freq
/= GEN9_FREQ_SCALER
;
1014 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1016 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1017 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1019 sandybridge_pcode_read(dev_priv
,
1020 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1022 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1025 (IS_GEN9_BC(dev_priv
) ||
1026 INTEL_GEN(dev_priv
) >= 10 ?
1027 GEN9_FREQ_SCALER
: 1))),
1028 ((ia_freq
>> 0) & 0xff) * 100,
1029 ((ia_freq
>> 8) & 0xff) * 100);
1031 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1036 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1038 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1039 ring
->space
, ring
->head
, ring
->tail
, ring
->emit
);
1042 static int i915_context_status(struct seq_file
*m
, void *unused
)
1044 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1045 struct i915_gem_context
*ctx
, *cn
;
1047 spin_lock(&i915
->gem
.contexts
.lock
);
1048 list_for_each_entry_safe(ctx
, cn
, &i915
->gem
.contexts
.list
, link
) {
1049 struct i915_gem_engines_iter it
;
1050 struct intel_context
*ce
;
1052 if (!kref_get_unless_zero(&ctx
->ref
))
1055 spin_unlock(&i915
->gem
.contexts
.lock
);
1057 seq_puts(m
, "HW context ");
1059 struct task_struct
*task
;
1061 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1063 seq_printf(m
, "(%s [%d]) ",
1064 task
->comm
, task
->pid
);
1065 put_task_struct(task
);
1067 } else if (IS_ERR(ctx
->file_priv
)) {
1068 seq_puts(m
, "(deleted) ");
1070 seq_puts(m
, "(kernel) ");
1073 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1076 for_each_gem_engine(ce
,
1077 i915_gem_context_lock_engines(ctx
), it
) {
1078 if (intel_context_pin_if_active(ce
)) {
1079 seq_printf(m
, "%s: ", ce
->engine
->name
);
1081 i915_debugfs_describe_obj(m
, ce
->state
->obj
);
1082 describe_ctx_ring(m
, ce
->ring
);
1084 intel_context_unpin(ce
);
1087 i915_gem_context_unlock_engines(ctx
);
1091 spin_lock(&i915
->gem
.contexts
.lock
);
1092 list_safe_reset_next(ctx
, cn
, link
);
1093 i915_gem_context_put(ctx
);
1095 spin_unlock(&i915
->gem
.contexts
.lock
);
1100 static const char *swizzle_string(unsigned swizzle
)
1103 case I915_BIT_6_SWIZZLE_NONE
:
1105 case I915_BIT_6_SWIZZLE_9
:
1107 case I915_BIT_6_SWIZZLE_9_10
:
1108 return "bit9/bit10";
1109 case I915_BIT_6_SWIZZLE_9_11
:
1110 return "bit9/bit11";
1111 case I915_BIT_6_SWIZZLE_9_10_11
:
1112 return "bit9/bit10/bit11";
1113 case I915_BIT_6_SWIZZLE_9_17
:
1114 return "bit9/bit17";
1115 case I915_BIT_6_SWIZZLE_9_10_17
:
1116 return "bit9/bit10/bit17";
1117 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1124 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1126 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1127 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
1128 intel_wakeref_t wakeref
;
1130 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1132 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1133 swizzle_string(dev_priv
->ggtt
.bit_6_swizzle_x
));
1134 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1135 swizzle_string(dev_priv
->ggtt
.bit_6_swizzle_y
));
1137 if (IS_GEN_RANGE(dev_priv
, 3, 4)) {
1138 seq_printf(m
, "DDC = 0x%08x\n",
1139 intel_uncore_read(uncore
, DCC
));
1140 seq_printf(m
, "DDC2 = 0x%08x\n",
1141 intel_uncore_read(uncore
, DCC2
));
1142 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1143 intel_uncore_read16(uncore
, C0DRB3
));
1144 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1145 intel_uncore_read16(uncore
, C1DRB3
));
1146 } else if (INTEL_GEN(dev_priv
) >= 6) {
1147 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1148 intel_uncore_read(uncore
, MAD_DIMM_C0
));
1149 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1150 intel_uncore_read(uncore
, MAD_DIMM_C1
));
1151 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1152 intel_uncore_read(uncore
, MAD_DIMM_C2
));
1153 seq_printf(m
, "TILECTL = 0x%08x\n",
1154 intel_uncore_read(uncore
, TILECTL
));
1155 if (INTEL_GEN(dev_priv
) >= 8)
1156 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1157 intel_uncore_read(uncore
, GAMTARBMODE
));
1159 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1160 intel_uncore_read(uncore
, ARB_MODE
));
1161 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1162 intel_uncore_read(uncore
, DISP_ARB_CTL
));
1165 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
1166 seq_puts(m
, "L-shaped memory detected\n");
1168 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1173 static const char *rps_power_to_str(unsigned int power
)
1175 static const char * const strings
[] = {
1176 [LOW_POWER
] = "low power",
1177 [BETWEEN
] = "mixed",
1178 [HIGH_POWER
] = "high power",
1181 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
1184 return strings
[power
];
1187 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
1189 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1190 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
1192 seq_printf(m
, "RPS enabled? %d\n", rps
->enabled
);
1193 seq_printf(m
, "GPU busy? %s\n", yesno(dev_priv
->gt
.awake
));
1194 seq_printf(m
, "Boosts outstanding? %d\n",
1195 atomic_read(&rps
->num_waiters
));
1196 seq_printf(m
, "Interactive? %d\n", READ_ONCE(rps
->power
.interactive
));
1197 seq_printf(m
, "Frequency requested %d, actual %d\n",
1198 intel_gpu_freq(rps
, rps
->cur_freq
),
1199 intel_rps_read_actual_frequency(rps
));
1200 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1201 intel_gpu_freq(rps
, rps
->min_freq
),
1202 intel_gpu_freq(rps
, rps
->min_freq_softlimit
),
1203 intel_gpu_freq(rps
, rps
->max_freq_softlimit
),
1204 intel_gpu_freq(rps
, rps
->max_freq
));
1205 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
1206 intel_gpu_freq(rps
, rps
->idle_freq
),
1207 intel_gpu_freq(rps
, rps
->efficient_freq
),
1208 intel_gpu_freq(rps
, rps
->boost_freq
));
1210 seq_printf(m
, "Wait boosts: %d\n", atomic_read(&rps
->boosts
));
1212 if (INTEL_GEN(dev_priv
) >= 6 && rps
->enabled
&& dev_priv
->gt
.awake
) {
1214 u32 rpdown
, rpdownei
;
1216 intel_uncore_forcewake_get(&dev_priv
->uncore
, FORCEWAKE_ALL
);
1217 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
1218 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
1219 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
1220 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
1221 intel_uncore_forcewake_put(&dev_priv
->uncore
, FORCEWAKE_ALL
);
1223 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
1224 rps_power_to_str(rps
->power
.mode
));
1225 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
1226 rpup
&& rpupei
? 100 * rpup
/ rpupei
: 0,
1227 rps
->power
.up_threshold
);
1228 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
1229 rpdown
&& rpdownei
? 100 * rpdown
/ rpdownei
: 0,
1230 rps
->power
.down_threshold
);
1232 seq_puts(m
, "\nRPS Autotuning inactive\n");
1238 static int i915_llc(struct seq_file
*m
, void *data
)
1240 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1241 const bool edram
= INTEL_GEN(dev_priv
) > 8;
1243 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
1244 seq_printf(m
, "%s: %uMB\n", edram
? "eDRAM" : "eLLC",
1245 dev_priv
->edram_size_mb
);
1250 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
1252 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1253 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1255 if (!HAS_RUNTIME_PM(dev_priv
))
1256 seq_puts(m
, "Runtime power management not supported\n");
1258 seq_printf(m
, "Runtime power status: %s\n",
1259 enableddisabled(!dev_priv
->power_domains
.wakeref
));
1261 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->gt
.awake
));
1262 seq_printf(m
, "IRQs disabled: %s\n",
1263 yesno(!intel_irqs_enabled(dev_priv
)));
1265 seq_printf(m
, "Usage count: %d\n",
1266 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
1268 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
1270 seq_printf(m
, "PCI device power state: %s [%d]\n",
1271 pci_power_name(pdev
->current_state
),
1272 pdev
->current_state
);
1274 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM
)) {
1275 struct drm_printer p
= drm_seq_file_printer(m
);
1277 print_intel_runtime_pm_wakeref(&dev_priv
->runtime_pm
, &p
);
1283 static int i915_engine_info(struct seq_file
*m
, void *unused
)
1285 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1286 struct intel_engine_cs
*engine
;
1287 intel_wakeref_t wakeref
;
1288 struct drm_printer p
;
1290 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1292 seq_printf(m
, "GT awake? %s [%d]\n",
1293 yesno(dev_priv
->gt
.awake
),
1294 atomic_read(&dev_priv
->gt
.wakeref
.count
));
1295 seq_printf(m
, "CS timestamp frequency: %u kHz\n",
1296 RUNTIME_INFO(dev_priv
)->cs_timestamp_frequency_khz
);
1298 p
= drm_seq_file_printer(m
);
1299 for_each_uabi_engine(engine
, dev_priv
)
1300 intel_engine_dump(engine
, &p
, "%s\n", engine
->name
);
1302 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1307 static int i915_rcs_topology(struct seq_file
*m
, void *unused
)
1309 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1310 struct drm_printer p
= drm_seq_file_printer(m
);
1312 intel_device_info_print_topology(&RUNTIME_INFO(dev_priv
)->sseu
, &p
);
1317 static int i915_shrinker_info(struct seq_file
*m
, void *unused
)
1319 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1321 seq_printf(m
, "seeks = %d\n", i915
->mm
.shrinker
.seeks
);
1322 seq_printf(m
, "batch = %lu\n", i915
->mm
.shrinker
.batch
);
1327 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
1329 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1330 struct intel_engine_cs
*engine
;
1332 for_each_uabi_engine(engine
, i915
) {
1333 const struct i915_wa_list
*wal
= &engine
->ctx_wa_list
;
1334 const struct i915_wa
*wa
;
1341 seq_printf(m
, "%s: Workarounds applied: %u\n",
1342 engine
->name
, count
);
1344 for (wa
= wal
->list
; count
--; wa
++)
1345 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X\n",
1346 i915_mmio_reg_offset(wa
->reg
),
1349 seq_printf(m
, "\n");
1356 i915_wedged_get(void *data
, u64
*val
)
1358 struct drm_i915_private
*i915
= data
;
1359 int ret
= intel_gt_terminally_wedged(&i915
->gt
);
1374 i915_wedged_set(void *data
, u64 val
)
1376 struct drm_i915_private
*i915
= data
;
1378 /* Flush any previous reset before applying for a new one */
1379 wait_event(i915
->gt
.reset
.queue
,
1380 !test_bit(I915_RESET_BACKOFF
, &i915
->gt
.reset
.flags
));
1382 intel_gt_handle_error(&i915
->gt
, val
, I915_ERROR_CAPTURE
,
1383 "Manually set wedged engine mask = %llx", val
);
1387 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1388 i915_wedged_get
, i915_wedged_set
,
1392 i915_perf_noa_delay_set(void *data
, u64 val
)
1394 struct drm_i915_private
*i915
= data
;
1395 const u32 clk
= RUNTIME_INFO(i915
)->cs_timestamp_frequency_khz
;
1398 * This would lead to infinite waits as we're doing timestamp
1399 * difference on the CS with only 32bits.
1401 if (val
> mul_u32_u32(U32_MAX
, clk
))
1404 atomic64_set(&i915
->perf
.noa_programming_delay
, val
);
1409 i915_perf_noa_delay_get(void *data
, u64
*val
)
1411 struct drm_i915_private
*i915
= data
;
1413 *val
= atomic64_read(&i915
->perf
.noa_programming_delay
);
1417 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops
,
1418 i915_perf_noa_delay_get
,
1419 i915_perf_noa_delay_set
,
1422 #define DROP_UNBOUND BIT(0)
1423 #define DROP_BOUND BIT(1)
1424 #define DROP_RETIRE BIT(2)
1425 #define DROP_ACTIVE BIT(3)
1426 #define DROP_FREED BIT(4)
1427 #define DROP_SHRINK_ALL BIT(5)
1428 #define DROP_IDLE BIT(6)
1429 #define DROP_RESET_ACTIVE BIT(7)
1430 #define DROP_RESET_SEQNO BIT(8)
1431 #define DROP_RCU BIT(9)
1432 #define DROP_ALL (DROP_UNBOUND | \
1439 DROP_RESET_ACTIVE | \
1440 DROP_RESET_SEQNO | \
1443 i915_drop_caches_get(void *data
, u64
*val
)
1450 gt_drop_caches(struct intel_gt
*gt
, u64 val
)
1454 if (val
& DROP_RESET_ACTIVE
&&
1455 wait_for(intel_engines_are_idle(gt
), I915_IDLE_ENGINES_TIMEOUT
))
1456 intel_gt_set_wedged(gt
);
1458 if (val
& DROP_RETIRE
)
1459 intel_gt_retire_requests(gt
);
1461 if (val
& (DROP_IDLE
| DROP_ACTIVE
)) {
1462 ret
= intel_gt_wait_for_idle(gt
, MAX_SCHEDULE_TIMEOUT
);
1467 if (val
& DROP_IDLE
) {
1468 ret
= intel_gt_pm_wait_for_idle(gt
);
1473 if (val
& DROP_RESET_ACTIVE
&& intel_gt_terminally_wedged(gt
))
1474 intel_gt_handle_error(gt
, ALL_ENGINES
, 0, NULL
);
1480 i915_drop_caches_set(void *data
, u64 val
)
1482 struct drm_i915_private
*i915
= data
;
1485 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1486 val
, val
& DROP_ALL
);
1488 ret
= gt_drop_caches(&i915
->gt
, val
);
1492 fs_reclaim_acquire(GFP_KERNEL
);
1493 if (val
& DROP_BOUND
)
1494 i915_gem_shrink(i915
, LONG_MAX
, NULL
, I915_SHRINK_BOUND
);
1496 if (val
& DROP_UNBOUND
)
1497 i915_gem_shrink(i915
, LONG_MAX
, NULL
, I915_SHRINK_UNBOUND
);
1499 if (val
& DROP_SHRINK_ALL
)
1500 i915_gem_shrink_all(i915
);
1501 fs_reclaim_release(GFP_KERNEL
);
1506 if (val
& DROP_FREED
)
1507 i915_gem_drain_freed_objects(i915
);
1512 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1513 i915_drop_caches_get
, i915_drop_caches_set
,
1517 i915_cache_sharing_get(void *data
, u64
*val
)
1519 struct drm_i915_private
*dev_priv
= data
;
1520 intel_wakeref_t wakeref
;
1523 if (!(IS_GEN_RANGE(dev_priv
, 6, 7)))
1526 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
)
1527 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1529 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
1535 i915_cache_sharing_set(void *data
, u64 val
)
1537 struct drm_i915_private
*dev_priv
= data
;
1538 intel_wakeref_t wakeref
;
1540 if (!(IS_GEN_RANGE(dev_priv
, 6, 7)))
1546 drm_dbg(&dev_priv
->drm
,
1547 "Manually setting uncore sharing to %llu\n", val
);
1548 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
) {
1551 /* Update the cache sharing policy here as well */
1552 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1553 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
1554 snpcr
|= val
<< GEN6_MBC_SNPCR_SHIFT
;
1555 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
1562 intel_sseu_copy_subslices(const struct sseu_dev_info
*sseu
, int slice
,
1565 int offset
= slice
* sseu
->ss_stride
;
1567 memcpy(&to_mask
[offset
], &sseu
->subslice_mask
[offset
], sseu
->ss_stride
);
1570 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
1571 i915_cache_sharing_get
, i915_cache_sharing_set
,
1574 static void cherryview_sseu_device_status(struct drm_i915_private
*dev_priv
,
1575 struct sseu_dev_info
*sseu
)
1578 const int ss_max
= SS_MAX
;
1579 u32 sig1
[SS_MAX
], sig2
[SS_MAX
];
1582 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
1583 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
1584 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
1585 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
1587 for (ss
= 0; ss
< ss_max
; ss
++) {
1588 unsigned int eu_cnt
;
1590 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
1591 /* skip disabled subslice */
1594 sseu
->slice_mask
= BIT(0);
1595 sseu
->subslice_mask
[0] |= BIT(ss
);
1596 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
1597 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
1598 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
1599 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
1600 sseu
->eu_total
+= eu_cnt
;
1601 sseu
->eu_per_subslice
= max_t(unsigned int,
1602 sseu
->eu_per_subslice
, eu_cnt
);
1607 static void gen10_sseu_device_status(struct drm_i915_private
*dev_priv
,
1608 struct sseu_dev_info
*sseu
)
1611 const struct intel_runtime_info
*info
= RUNTIME_INFO(dev_priv
);
1612 u32 s_reg
[SS_MAX
], eu_reg
[2 * SS_MAX
], eu_mask
[2];
1615 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
1617 * FIXME: Valid SS Mask respects the spec and read
1618 * only valid bits for those registers, excluding reserved
1619 * although this seems wrong because it would leave many
1620 * subslices without ACK.
1622 s_reg
[s
] = I915_READ(GEN10_SLICE_PGCTL_ACK(s
)) &
1623 GEN10_PGCTL_VALID_SS_MASK(s
);
1624 eu_reg
[2 * s
] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s
));
1625 eu_reg
[2 * s
+ 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s
));
1628 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
1629 GEN9_PGCTL_SSA_EU19_ACK
|
1630 GEN9_PGCTL_SSA_EU210_ACK
|
1631 GEN9_PGCTL_SSA_EU311_ACK
;
1632 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
1633 GEN9_PGCTL_SSB_EU19_ACK
|
1634 GEN9_PGCTL_SSB_EU210_ACK
|
1635 GEN9_PGCTL_SSB_EU311_ACK
;
1637 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
1638 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
1639 /* skip disabled slice */
1642 sseu
->slice_mask
|= BIT(s
);
1643 intel_sseu_copy_subslices(&info
->sseu
, s
, sseu
->subslice_mask
);
1645 for (ss
= 0; ss
< info
->sseu
.max_subslices
; ss
++) {
1646 unsigned int eu_cnt
;
1648 if (info
->sseu
.has_subslice_pg
&&
1649 !(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
1650 /* skip disabled subslice */
1653 eu_cnt
= 2 * hweight32(eu_reg
[2 * s
+ ss
/ 2] &
1655 sseu
->eu_total
+= eu_cnt
;
1656 sseu
->eu_per_subslice
= max_t(unsigned int,
1657 sseu
->eu_per_subslice
,
1664 static void gen9_sseu_device_status(struct drm_i915_private
*dev_priv
,
1665 struct sseu_dev_info
*sseu
)
1668 const struct intel_runtime_info
*info
= RUNTIME_INFO(dev_priv
);
1669 u32 s_reg
[SS_MAX
], eu_reg
[2 * SS_MAX
], eu_mask
[2];
1672 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
1673 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
1674 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
1675 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
1678 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
1679 GEN9_PGCTL_SSA_EU19_ACK
|
1680 GEN9_PGCTL_SSA_EU210_ACK
|
1681 GEN9_PGCTL_SSA_EU311_ACK
;
1682 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
1683 GEN9_PGCTL_SSB_EU19_ACK
|
1684 GEN9_PGCTL_SSB_EU210_ACK
|
1685 GEN9_PGCTL_SSB_EU311_ACK
;
1687 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
1688 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
1689 /* skip disabled slice */
1692 sseu
->slice_mask
|= BIT(s
);
1694 if (IS_GEN9_BC(dev_priv
))
1695 intel_sseu_copy_subslices(&info
->sseu
, s
,
1696 sseu
->subslice_mask
);
1698 for (ss
= 0; ss
< info
->sseu
.max_subslices
; ss
++) {
1699 unsigned int eu_cnt
;
1700 u8 ss_idx
= s
* info
->sseu
.ss_stride
+
1703 if (IS_GEN9_LP(dev_priv
)) {
1704 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
1705 /* skip disabled subslice */
1708 sseu
->subslice_mask
[ss_idx
] |=
1709 BIT(ss
% BITS_PER_BYTE
);
1712 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
1714 sseu
->eu_total
+= eu_cnt
;
1715 sseu
->eu_per_subslice
= max_t(unsigned int,
1716 sseu
->eu_per_subslice
,
1723 static void bdw_sseu_device_status(struct drm_i915_private
*dev_priv
,
1724 struct sseu_dev_info
*sseu
)
1726 const struct intel_runtime_info
*info
= RUNTIME_INFO(dev_priv
);
1727 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
1730 sseu
->slice_mask
= slice_info
& GEN8_LSLICESTAT_MASK
;
1732 if (sseu
->slice_mask
) {
1733 sseu
->eu_per_subslice
= info
->sseu
.eu_per_subslice
;
1734 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++)
1735 intel_sseu_copy_subslices(&info
->sseu
, s
,
1736 sseu
->subslice_mask
);
1737 sseu
->eu_total
= sseu
->eu_per_subslice
*
1738 intel_sseu_subslice_total(sseu
);
1740 /* subtract fused off EU(s) from enabled slice(s) */
1741 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
1742 u8 subslice_7eu
= info
->sseu
.subslice_7eu
[s
];
1744 sseu
->eu_total
-= hweight8(subslice_7eu
);
1749 static void i915_print_sseu_info(struct seq_file
*m
, bool is_available_info
,
1750 const struct sseu_dev_info
*sseu
)
1752 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1753 const char *type
= is_available_info
? "Available" : "Enabled";
1756 seq_printf(m
, " %s Slice Mask: %04x\n", type
,
1758 seq_printf(m
, " %s Slice Total: %u\n", type
,
1759 hweight8(sseu
->slice_mask
));
1760 seq_printf(m
, " %s Subslice Total: %u\n", type
,
1761 intel_sseu_subslice_total(sseu
));
1762 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
1763 seq_printf(m
, " %s Slice%i subslices: %u\n", type
,
1764 s
, intel_sseu_subslices_per_slice(sseu
, s
));
1766 seq_printf(m
, " %s EU Total: %u\n", type
,
1768 seq_printf(m
, " %s EU Per Subslice: %u\n", type
,
1769 sseu
->eu_per_subslice
);
1771 if (!is_available_info
)
1774 seq_printf(m
, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv
)));
1775 if (HAS_POOLED_EU(dev_priv
))
1776 seq_printf(m
, " Min EU in pool: %u\n", sseu
->min_eu_in_pool
);
1778 seq_printf(m
, " Has Slice Power Gating: %s\n",
1779 yesno(sseu
->has_slice_pg
));
1780 seq_printf(m
, " Has Subslice Power Gating: %s\n",
1781 yesno(sseu
->has_subslice_pg
));
1782 seq_printf(m
, " Has EU Power Gating: %s\n",
1783 yesno(sseu
->has_eu_pg
));
1786 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
1788 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1789 const struct intel_runtime_info
*info
= RUNTIME_INFO(dev_priv
);
1790 struct sseu_dev_info sseu
;
1791 intel_wakeref_t wakeref
;
1793 if (INTEL_GEN(dev_priv
) < 8)
1796 seq_puts(m
, "SSEU Device Info\n");
1797 i915_print_sseu_info(m
, true, &info
->sseu
);
1799 seq_puts(m
, "SSEU Device Status\n");
1800 memset(&sseu
, 0, sizeof(sseu
));
1801 intel_sseu_set_info(&sseu
, info
->sseu
.max_slices
,
1802 info
->sseu
.max_subslices
,
1803 info
->sseu
.max_eus_per_subslice
);
1805 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
) {
1806 if (IS_CHERRYVIEW(dev_priv
))
1807 cherryview_sseu_device_status(dev_priv
, &sseu
);
1808 else if (IS_BROADWELL(dev_priv
))
1809 bdw_sseu_device_status(dev_priv
, &sseu
);
1810 else if (IS_GEN(dev_priv
, 9))
1811 gen9_sseu_device_status(dev_priv
, &sseu
);
1812 else if (INTEL_GEN(dev_priv
) >= 10)
1813 gen10_sseu_device_status(dev_priv
, &sseu
);
1816 i915_print_sseu_info(m
, false, &sseu
);
1821 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
1823 struct drm_i915_private
*i915
= inode
->i_private
;
1824 struct intel_gt
*gt
= &i915
->gt
;
1826 atomic_inc(>
->user_wakeref
);
1827 intel_gt_pm_get(gt
);
1828 if (INTEL_GEN(i915
) >= 6)
1829 intel_uncore_forcewake_user_get(gt
->uncore
);
1834 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
1836 struct drm_i915_private
*i915
= inode
->i_private
;
1837 struct intel_gt
*gt
= &i915
->gt
;
1839 if (INTEL_GEN(i915
) >= 6)
1840 intel_uncore_forcewake_user_put(&i915
->uncore
);
1841 intel_gt_pm_put(gt
);
1842 atomic_dec(>
->user_wakeref
);
1847 static const struct file_operations i915_forcewake_fops
= {
1848 .owner
= THIS_MODULE
,
1849 .open
= i915_forcewake_open
,
1850 .release
= i915_forcewake_release
,
1853 static const struct drm_info_list i915_debugfs_list
[] = {
1854 {"i915_capabilities", i915_capabilities
, 0},
1855 {"i915_gem_objects", i915_gem_object_info
, 0},
1856 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
1857 {"i915_gem_interrupt", i915_interrupt_info
, 0},
1858 {"i915_frequency_info", i915_frequency_info
, 0},
1859 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
1860 {"i915_context_status", i915_context_status
, 0},
1861 {"i915_swizzle_info", i915_swizzle_info
, 0},
1862 {"i915_llc", i915_llc
, 0},
1863 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
1864 {"i915_engine_info", i915_engine_info
, 0},
1865 {"i915_rcs_topology", i915_rcs_topology
, 0},
1866 {"i915_shrinker_info", i915_shrinker_info
, 0},
1867 {"i915_wa_registers", i915_wa_registers
, 0},
1868 {"i915_sseu_status", i915_sseu_status
, 0},
1869 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
1871 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1873 static const struct i915_debugfs_files
{
1875 const struct file_operations
*fops
;
1876 } i915_debugfs_files
[] = {
1877 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops
},
1878 {"i915_wedged", &i915_wedged_fops
},
1879 {"i915_cache_sharing", &i915_cache_sharing_fops
},
1880 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
1881 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1882 {"i915_error_state", &i915_error_state_fops
},
1883 {"i915_gpu_info", &i915_gpu_info_fops
},
1887 void i915_debugfs_register(struct drm_i915_private
*dev_priv
)
1889 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
1892 i915_debugfs_params(dev_priv
);
1894 debugfs_create_file("i915_forcewake_user", S_IRUSR
, minor
->debugfs_root
,
1895 to_i915(minor
->dev
), &i915_forcewake_fops
);
1896 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
1897 debugfs_create_file(i915_debugfs_files
[i
].name
,
1899 minor
->debugfs_root
,
1900 to_i915(minor
->dev
),
1901 i915_debugfs_files
[i
].fops
);
1904 drm_debugfs_create_files(i915_debugfs_list
,
1905 I915_DEBUGFS_ENTRIES
,
1906 minor
->debugfs_root
, minor
);