2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/nospec.h>
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include "gt/intel_engine_user.h"
13 #include <uapi/drm/i915_drm.h>
15 static int copy_query_item(void *query_hdr
, size_t query_sz
,
17 struct drm_i915_query_item
*query_item
)
19 if (query_item
->length
== 0)
22 if (query_item
->length
< total_length
)
25 if (copy_from_user(query_hdr
, u64_to_user_ptr(query_item
->data_ptr
),
32 static int fill_topology_info(const struct sseu_dev_info
*sseu
,
33 struct drm_i915_query_item
*query_item
,
34 intel_sseu_ss_mask_t subslice_mask
)
36 struct drm_i915_query_topology_info topo
;
37 u32 slice_length
, subslice_length
, eu_length
, total_length
;
38 int ss_stride
= GEN_SSEU_STRIDE(sseu
->max_subslices
);
39 int eu_stride
= GEN_SSEU_STRIDE(sseu
->max_eus_per_subslice
);
42 BUILD_BUG_ON(sizeof(u8
) != sizeof(sseu
->slice_mask
));
44 if (sseu
->max_slices
== 0)
47 slice_length
= sizeof(sseu
->slice_mask
);
48 subslice_length
= sseu
->max_slices
* ss_stride
;
49 eu_length
= sseu
->max_slices
* sseu
->max_subslices
* eu_stride
;
50 total_length
= sizeof(topo
) + slice_length
+ subslice_length
+
53 ret
= copy_query_item(&topo
, sizeof(topo
), total_length
, query_item
);
58 memset(&topo
, 0, sizeof(topo
));
59 topo
.max_slices
= sseu
->max_slices
;
60 topo
.max_subslices
= sseu
->max_subslices
;
61 topo
.max_eus_per_subslice
= sseu
->max_eus_per_subslice
;
63 topo
.subslice_offset
= slice_length
;
64 topo
.subslice_stride
= ss_stride
;
65 topo
.eu_offset
= slice_length
+ subslice_length
;
66 topo
.eu_stride
= eu_stride
;
68 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
),
72 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+ sizeof(topo
)),
73 &sseu
->slice_mask
, slice_length
))
76 if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item
->data_ptr
+
77 sizeof(topo
) + slice_length
),
81 if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item
->data_ptr
+
83 slice_length
+ subslice_length
),
90 static int query_topology_info(struct drm_i915_private
*dev_priv
,
91 struct drm_i915_query_item
*query_item
)
93 const struct sseu_dev_info
*sseu
= &to_gt(dev_priv
)->info
.sseu
;
95 if (query_item
->flags
!= 0)
98 return fill_topology_info(sseu
, query_item
, sseu
->subslice_mask
);
101 static int query_geometry_subslices(struct drm_i915_private
*i915
,
102 struct drm_i915_query_item
*query_item
)
104 const struct sseu_dev_info
*sseu
;
105 struct intel_engine_cs
*engine
;
106 struct i915_engine_class_instance classinstance
;
108 if (GRAPHICS_VER_FULL(i915
) < IP_VER(12, 50))
111 classinstance
= *((struct i915_engine_class_instance
*)&query_item
->flags
);
113 engine
= intel_engine_lookup_user(i915
, (u8
)classinstance
.engine_class
,
114 (u8
)classinstance
.engine_instance
);
119 if (engine
->class != RENDER_CLASS
)
122 sseu
= &engine
->gt
->info
.sseu
;
124 return fill_topology_info(sseu
, query_item
, sseu
->geometry_subslice_mask
);
128 query_engine_info(struct drm_i915_private
*i915
,
129 struct drm_i915_query_item
*query_item
)
131 struct drm_i915_query_engine_info __user
*query_ptr
=
132 u64_to_user_ptr(query_item
->data_ptr
);
133 struct drm_i915_engine_info __user
*info_ptr
;
134 struct drm_i915_query_engine_info query
;
135 struct drm_i915_engine_info info
= { };
136 unsigned int num_uabi_engines
= 0;
137 struct intel_engine_cs
*engine
;
140 if (query_item
->flags
)
143 for_each_uabi_engine(engine
, i915
)
146 len
= struct_size(query_ptr
, engines
, num_uabi_engines
);
148 ret
= copy_query_item(&query
, sizeof(query
), len
, query_item
);
152 if (query
.num_engines
|| query
.rsvd
[0] || query
.rsvd
[1] ||
156 info_ptr
= &query_ptr
->engines
[0];
158 for_each_uabi_engine(engine
, i915
) {
159 info
.engine
.engine_class
= engine
->uabi_class
;
160 info
.engine
.engine_instance
= engine
->uabi_instance
;
161 info
.flags
= I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE
;
162 info
.capabilities
= engine
->uabi_capabilities
;
163 info
.logical_instance
= ilog2(engine
->logical_mask
);
165 if (copy_to_user(info_ptr
, &info
, sizeof(info
)))
172 if (copy_to_user(query_ptr
, &query
, sizeof(query
)))
178 static int can_copy_perf_config_registers_or_number(u32 user_n_regs
,
183 * We'll just put the number of registers, and won't copy the
186 if (user_n_regs
== 0)
189 if (user_n_regs
< kernel_n_regs
)
195 static int copy_perf_config_registers_or_number(const struct i915_oa_reg
*kernel_regs
,
200 u32 __user
*p
= u64_to_user_ptr(user_regs_ptr
);
203 if (*user_n_regs
== 0) {
204 *user_n_regs
= kernel_n_regs
;
208 *user_n_regs
= kernel_n_regs
;
210 if (!user_write_access_begin(p
, 2 * sizeof(u32
) * kernel_n_regs
))
213 for (r
= 0; r
< kernel_n_regs
; r
++, p
+= 2) {
214 unsafe_put_user(i915_mmio_reg_offset(kernel_regs
[r
].addr
),
216 unsafe_put_user(kernel_regs
[r
].value
, p
+ 1, Efault
);
218 user_write_access_end();
221 user_write_access_end();
225 static int query_perf_config_data(struct drm_i915_private
*i915
,
226 struct drm_i915_query_item
*query_item
,
229 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
230 u64_to_user_ptr(query_item
->data_ptr
);
231 struct drm_i915_perf_oa_config __user
*user_config_ptr
=
232 u64_to_user_ptr(query_item
->data_ptr
+
233 sizeof(struct drm_i915_query_perf_config
));
234 struct drm_i915_perf_oa_config user_config
;
235 struct i915_perf
*perf
= &i915
->perf
;
236 struct i915_oa_config
*oa_config
;
237 char uuid
[UUID_STRING_LEN
+ 1];
239 u32 flags
, total_size
;
246 sizeof(struct drm_i915_query_perf_config
) +
247 sizeof(struct drm_i915_perf_oa_config
);
249 if (query_item
->length
== 0)
252 if (query_item
->length
< total_size
) {
254 "Invalid query config data item size=%u expected=%u\n",
255 query_item
->length
, total_size
);
259 if (get_user(flags
, &user_query_config_ptr
->flags
))
266 struct i915_oa_config
*tmp
;
269 BUILD_BUG_ON(sizeof(user_query_config_ptr
->uuid
) >= sizeof(uuid
));
271 memset(&uuid
, 0, sizeof(uuid
));
272 if (copy_from_user(uuid
, user_query_config_ptr
->uuid
,
273 sizeof(user_query_config_ptr
->uuid
)))
278 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
279 if (!strcmp(tmp
->uuid
, uuid
)) {
280 oa_config
= i915_oa_config_get(tmp
);
286 if (get_user(config_id
, &user_query_config_ptr
->config
))
289 oa_config
= i915_perf_get_oa_config(perf
, config_id
);
294 if (copy_from_user(&user_config
, user_config_ptr
, sizeof(user_config
))) {
299 ret
= can_copy_perf_config_registers_or_number(user_config
.n_boolean_regs
,
300 user_config
.boolean_regs_ptr
,
301 oa_config
->b_counter_regs_len
);
305 ret
= can_copy_perf_config_registers_or_number(user_config
.n_flex_regs
,
306 user_config
.flex_regs_ptr
,
307 oa_config
->flex_regs_len
);
311 ret
= can_copy_perf_config_registers_or_number(user_config
.n_mux_regs
,
312 user_config
.mux_regs_ptr
,
313 oa_config
->mux_regs_len
);
317 ret
= copy_perf_config_registers_or_number(oa_config
->b_counter_regs
,
318 oa_config
->b_counter_regs_len
,
319 user_config
.boolean_regs_ptr
,
320 &user_config
.n_boolean_regs
);
324 ret
= copy_perf_config_registers_or_number(oa_config
->flex_regs
,
325 oa_config
->flex_regs_len
,
326 user_config
.flex_regs_ptr
,
327 &user_config
.n_flex_regs
);
331 ret
= copy_perf_config_registers_or_number(oa_config
->mux_regs
,
332 oa_config
->mux_regs_len
,
333 user_config
.mux_regs_ptr
,
334 &user_config
.n_mux_regs
);
338 memcpy(user_config
.uuid
, oa_config
->uuid
, sizeof(user_config
.uuid
));
340 if (copy_to_user(user_config_ptr
, &user_config
, sizeof(user_config
))) {
348 i915_oa_config_put(oa_config
);
352 static size_t sizeof_perf_config_list(size_t count
)
354 return sizeof(struct drm_i915_query_perf_config
) + sizeof(u64
) * count
;
357 static size_t sizeof_perf_metrics(struct i915_perf
*perf
)
359 struct i915_oa_config
*tmp
;
365 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
)
369 return sizeof_perf_config_list(i
);
372 static int query_perf_config_list(struct drm_i915_private
*i915
,
373 struct drm_i915_query_item
*query_item
)
375 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
376 u64_to_user_ptr(query_item
->data_ptr
);
377 struct i915_perf
*perf
= &i915
->perf
;
378 u64
*oa_config_ids
= NULL
;
379 int alloc
, n_configs
;
386 if (query_item
->length
== 0)
387 return sizeof_perf_metrics(perf
);
389 if (get_user(flags
, &user_query_config_ptr
->flags
))
397 struct i915_oa_config
*tmp
;
401 ids
= krealloc(oa_config_ids
,
402 n_configs
* sizeof(*oa_config_ids
),
407 alloc
= fetch_and_zero(&n_configs
);
409 ids
[n_configs
++] = 1ull; /* reserved for test_config */
411 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
412 if (n_configs
< alloc
)
419 } while (n_configs
> alloc
);
421 if (query_item
->length
< sizeof_perf_config_list(n_configs
)) {
423 "Invalid query config list item size=%u expected=%zu\n",
425 sizeof_perf_config_list(n_configs
));
426 kfree(oa_config_ids
);
430 if (put_user(n_configs
, &user_query_config_ptr
->config
)) {
431 kfree(oa_config_ids
);
435 ret
= copy_to_user(user_query_config_ptr
+ 1,
437 n_configs
* sizeof(*oa_config_ids
));
438 kfree(oa_config_ids
);
442 return sizeof_perf_config_list(n_configs
);
445 static int query_perf_config(struct drm_i915_private
*i915
,
446 struct drm_i915_query_item
*query_item
)
448 switch (query_item
->flags
) {
449 case DRM_I915_QUERY_PERF_CONFIG_LIST
:
450 return query_perf_config_list(i915
, query_item
);
451 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
:
452 return query_perf_config_data(i915
, query_item
, true);
453 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID
:
454 return query_perf_config_data(i915
, query_item
, false);
460 static int query_memregion_info(struct drm_i915_private
*i915
,
461 struct drm_i915_query_item
*query_item
)
463 struct drm_i915_query_memory_regions __user
*query_ptr
=
464 u64_to_user_ptr(query_item
->data_ptr
);
465 struct drm_i915_memory_region_info __user
*info_ptr
=
466 &query_ptr
->regions
[0];
467 struct drm_i915_memory_region_info info
= { };
468 struct drm_i915_query_memory_regions query
;
469 struct intel_memory_region
*mr
;
473 if (query_item
->flags
!= 0)
476 total_length
= sizeof(query
);
477 for_each_memory_region(mr
, i915
, id
) {
481 total_length
+= sizeof(info
);
484 ret
= copy_query_item(&query
, sizeof(query
), total_length
, query_item
);
488 if (query
.num_regions
)
491 for (i
= 0; i
< ARRAY_SIZE(query
.rsvd
); i
++) {
496 for_each_memory_region(mr
, i915
, id
) {
500 info
.region
.memory_class
= mr
->type
;
501 info
.region
.memory_instance
= mr
->instance
;
502 info
.probed_size
= mr
->total
;
504 if (mr
->type
== INTEL_MEMORY_LOCAL
)
505 info
.probed_cpu_visible_size
= resource_size(&mr
->io
);
507 info
.probed_cpu_visible_size
= mr
->total
;
509 if (perfmon_capable()) {
510 intel_memory_region_avail(mr
,
511 &info
.unallocated_size
,
512 &info
.unallocated_cpu_visible_size
);
514 info
.unallocated_size
= info
.probed_size
;
515 info
.unallocated_cpu_visible_size
=
516 info
.probed_cpu_visible_size
;
519 if (__copy_to_user(info_ptr
, &info
, sizeof(info
)))
526 if (__copy_to_user(query_ptr
, &query
, sizeof(query
)))
532 static int query_hwconfig_blob(struct drm_i915_private
*i915
,
533 struct drm_i915_query_item
*query_item
)
535 struct intel_gt
*gt
= to_gt(i915
);
536 struct intel_hwconfig
*hwconfig
= >
->info
.hwconfig
;
538 if (!hwconfig
->size
|| !hwconfig
->ptr
)
541 if (query_item
->length
== 0)
542 return hwconfig
->size
;
544 if (query_item
->length
< hwconfig
->size
)
547 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
),
548 hwconfig
->ptr
, hwconfig
->size
))
551 return hwconfig
->size
;
555 query_guc_submission_version(struct drm_i915_private
*i915
,
556 struct drm_i915_query_item
*query
)
558 struct drm_i915_query_guc_submission_version __user
*query_ptr
=
559 u64_to_user_ptr(query
->data_ptr
);
560 struct drm_i915_query_guc_submission_version ver
;
561 struct intel_guc
*guc
= &to_gt(i915
)->uc
.guc
;
562 const size_t size
= sizeof(ver
);
565 if (!intel_uc_uses_guc_submission(&to_gt(i915
)->uc
))
568 ret
= copy_query_item(&ver
, size
, size
, query
);
572 if (ver
.branch
|| ver
.major
|| ver
.minor
|| ver
.patch
)
576 ver
.major
= guc
->submission_version
.major
;
577 ver
.minor
= guc
->submission_version
.minor
;
578 ver
.patch
= guc
->submission_version
.patch
;
580 if (copy_to_user(query_ptr
, &ver
, size
))
586 static int (* const i915_query_funcs
[])(struct drm_i915_private
*dev_priv
,
587 struct drm_i915_query_item
*query_item
) = {
591 query_memregion_info
,
593 query_geometry_subslices
,
594 query_guc_submission_version
,
597 int i915_query_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
599 struct drm_i915_private
*dev_priv
= to_i915(dev
);
600 struct drm_i915_query
*args
= data
;
601 struct drm_i915_query_item __user
*user_item_ptr
=
602 u64_to_user_ptr(args
->items_ptr
);
605 if (args
->flags
!= 0)
608 for (i
= 0; i
< args
->num_items
; i
++, user_item_ptr
++) {
609 struct drm_i915_query_item item
;
610 unsigned long func_idx
;
613 if (copy_from_user(&item
, user_item_ptr
, sizeof(item
)))
616 if (item
.query_id
== 0)
619 if (overflows_type(item
.query_id
- 1, unsigned long))
622 func_idx
= item
.query_id
- 1;
625 if (func_idx
< ARRAY_SIZE(i915_query_funcs
)) {
626 func_idx
= array_index_nospec(func_idx
,
627 ARRAY_SIZE(i915_query_funcs
));
628 ret
= i915_query_funcs
[func_idx
](dev_priv
, &item
);
631 /* Only write the length back to userspace if they differ. */
632 if (ret
!= item
.length
&& put_user(ret
, &user_item_ptr
->length
))