2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Robert Bragg <robert@sixbynine.org>
29 * DOC: i915 Perf Overview
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
51 * DOC: i915 Perf History and Comparison with Core Perf
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
106 * - The perf based OA PMU driver broke some significant design assumptions:
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
194 #include <linux/anon_inodes.h>
195 #include <linux/nospec.h>
196 #include <linux/sizes.h>
197 #include <linux/uuid.h>
199 #include "gem/i915_gem_context.h"
200 #include "gem/i915_gem_internal.h"
201 #include "gt/intel_engine_pm.h"
202 #include "gt/intel_engine_regs.h"
203 #include "gt/intel_engine_user.h"
204 #include "gt/intel_execlists_submission.h"
205 #include "gt/intel_gpu_commands.h"
206 #include "gt/intel_gt.h"
207 #include "gt/intel_gt_clock_utils.h"
208 #include "gt/intel_gt_mcr.h"
209 #include "gt/intel_gt_print.h"
210 #include "gt/intel_gt_regs.h"
211 #include "gt/intel_lrc.h"
212 #include "gt/intel_lrc_reg.h"
213 #include "gt/intel_rc6.h"
214 #include "gt/intel_ring.h"
215 #include "gt/uc/intel_guc_slpc.h"
217 #include "i915_drv.h"
218 #include "i915_file_private.h"
219 #include "i915_perf.h"
220 #include "i915_perf_oa_regs.h"
221 #include "i915_reg.h"
223 /* HW requires this to be a power of two, between 128k and 16M, though driver
224 * is currently generally designed assuming the largest 16M size is used such
225 * that the overflow cases are unlikely in normal operation.
227 #define OA_BUFFER_SIZE SZ_16M
229 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
232 * DOC: OA Tail Pointer Race
234 * There's a HW race condition between OA unit tail pointer register updates and
235 * writes to memory whereby the tail pointer can sometimes get ahead of what's
236 * been written out to the OA buffer so far (in terms of what's visible to the
239 * Although this can be observed explicitly while copying reports to userspace
240 * by checking for a zeroed report-id field in tail reports, we want to account
241 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
242 * redundant read() attempts.
244 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
245 * in the OA buffer, starting from the tail reported by the HW until we find a
246 * report with its first 2 dwords not 0 meaning its previous report is
247 * completely in memory and ready to be read. Those dwords are also set to 0
248 * once read and the whole buffer is cleared upon OA buffer initialization. The
249 * first dword is the reason for this report while the second is the timestamp,
250 * making the chances of having those 2 fields at 0 fairly unlikely. A more
251 * detailed explanation is available in oa_buffer_check_unlocked().
253 * Most of the implementation details for this workaround are in
254 * oa_buffer_check_unlocked() and _append_oa_reports()
256 * Note for posterity: previously the driver used to define an effective tail
257 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
258 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
259 * This was flawed considering that the OA unit may also automatically generate
260 * non-periodic reports (such as on context switch) or the OA unit may be
261 * enabled without any periodic sampling.
263 #define OA_TAIL_MARGIN_NSEC 100000ULL
264 #define INVALID_TAIL_PTR 0xffffffff
266 /* The default frequency for checking whether the OA unit has written new
267 * reports to the circular OA buffer...
269 #define DEFAULT_POLL_FREQUENCY_HZ 200
270 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
272 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
273 static u32 i915_perf_stream_paranoid
= true;
275 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
276 * of the 64bit timestamp bits to trigger reports from) but there's currently
277 * no known use case for sampling as infrequently as once per 47 thousand years.
279 * Since the timestamps included in OA reports are only 32bits it seems
280 * reasonable to limit the OA exponent where it's still possible to account for
281 * overflow in OA report timestamps.
283 #define OA_EXPONENT_MAX 31
285 #define INVALID_CTX_ID 0xffffffff
287 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
288 #define OAREPORT_REASON_MASK 0x3f
289 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
290 #define OAREPORT_REASON_SHIFT 19
291 #define OAREPORT_REASON_TIMER (1<<0)
292 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
293 #define OAREPORT_REASON_CLK_RATIO (1<<5)
295 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
297 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
299 * The highest sampling frequency we can theoretically program the OA unit
300 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
302 * Initialized just before we register the sysctl parameter.
304 static int oa_sample_rate_hard_limit
;
306 /* Theoretically we can program the OA unit to sample every 160ns but don't
307 * allow that by default unless root...
309 * The default threshold of 100000Hz is based on perf's similar
310 * kernel.perf_event_max_sample_rate sysctl parameter.
312 static u32 i915_oa_max_sample_rate
= 100000;
314 /* XXX: beware if future OA HW adds new report formats that the current
315 * code assumes all reports have a power-of-two size and ~(size - 1) can
316 * be used as a mask to align the OA tail pointer.
318 static const struct i915_oa_format oa_formats
[I915_OA_FORMAT_MAX
] = {
319 [I915_OA_FORMAT_A13
] = { 0, 64 },
320 [I915_OA_FORMAT_A29
] = { 1, 128 },
321 [I915_OA_FORMAT_A13_B8_C8
] = { 2, 128 },
322 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
323 [I915_OA_FORMAT_B4_C8
] = { 4, 64 },
324 [I915_OA_FORMAT_A45_B8_C8
] = { 5, 256 },
325 [I915_OA_FORMAT_B4_C8_A16
] = { 6, 128 },
326 [I915_OA_FORMAT_C4_B8
] = { 7, 64 },
327 [I915_OA_FORMAT_A12
] = { 0, 64 },
328 [I915_OA_FORMAT_A12_B8_C8
] = { 2, 128 },
329 [I915_OA_FORMAT_A32u40_A4u32_B8_C8
] = { 5, 256 },
330 [I915_OAR_FORMAT_A32u40_A4u32_B8_C8
] = { 5, 256 },
331 [I915_OA_FORMAT_A24u40_A14u32_B8_C8
] = { 5, 256 },
332 [I915_OAM_FORMAT_MPEC8u64_B8_C8
] = { 1, 192, TYPE_OAM
, HDR_64_BIT
},
333 [I915_OAM_FORMAT_MPEC8u32_B8_C8
] = { 2, 128, TYPE_OAM
, HDR_64_BIT
},
336 static const u32 mtl_oa_base
[] = {
337 [PERF_GROUP_OAM_SAMEDIA_0
] = 0x393000,
340 #define SAMPLE_OA_REPORT (1<<0)
343 * struct perf_open_properties - for validated properties given to open a stream
344 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
345 * @single_context: Whether a single or all gpu contexts should be monitored
346 * @hold_preemption: Whether the preemption is disabled for the filtered
348 * @ctx_handle: A gem ctx handle for use with @single_context
349 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
350 * @oa_format: An OA unit HW report format
351 * @oa_periodic: Whether to enable periodic OA unit sampling
352 * @oa_period_exponent: The OA unit sampling period is derived from this
353 * @engine: The engine (typically rcs0) being monitored by the OA unit
354 * @has_sseu: Whether @sseu was specified by userspace
355 * @sseu: internal SSEU configuration computed either from the userspace
356 * specified configuration in the opening parameters or a default value
357 * (see get_default_sseu_config())
358 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
361 * As read_properties_unlocked() enumerates and validates the properties given
362 * to open a stream of metrics the configuration is built up in the structure
363 * which starts out zero initialized.
365 struct perf_open_properties
{
368 u64 single_context
:1;
369 u64 hold_preemption
:1;
372 /* OA sampling state */
376 int oa_period_exponent
;
378 struct intel_engine_cs
*engine
;
381 struct intel_sseu sseu
;
386 struct i915_oa_config_bo
{
387 struct llist_node node
;
389 struct i915_oa_config
*oa_config
;
390 struct i915_vma
*vma
;
393 static struct ctl_table_header
*sysctl_header
;
395 static enum hrtimer_restart
oa_poll_check_timer_cb(struct hrtimer
*hrtimer
);
397 void i915_oa_config_release(struct kref
*ref
)
399 struct i915_oa_config
*oa_config
=
400 container_of(ref
, typeof(*oa_config
), ref
);
402 kfree(oa_config
->flex_regs
);
403 kfree(oa_config
->b_counter_regs
);
404 kfree(oa_config
->mux_regs
);
406 kfree_rcu(oa_config
, rcu
);
409 struct i915_oa_config
*
410 i915_perf_get_oa_config(struct i915_perf
*perf
, int metrics_set
)
412 struct i915_oa_config
*oa_config
;
415 oa_config
= idr_find(&perf
->metrics_idr
, metrics_set
);
417 oa_config
= i915_oa_config_get(oa_config
);
423 static void free_oa_config_bo(struct i915_oa_config_bo
*oa_bo
)
425 i915_oa_config_put(oa_bo
->oa_config
);
426 i915_vma_put(oa_bo
->vma
);
431 struct i915_perf_regs
*__oa_regs(struct i915_perf_stream
*stream
)
433 return &stream
->engine
->oa_group
->regs
;
436 static u32
gen12_oa_hw_tail_read(struct i915_perf_stream
*stream
)
438 struct intel_uncore
*uncore
= stream
->uncore
;
440 return intel_uncore_read(uncore
, __oa_regs(stream
)->oa_tail_ptr
) &
441 GEN12_OAG_OATAILPTR_MASK
;
444 static u32
gen8_oa_hw_tail_read(struct i915_perf_stream
*stream
)
446 struct intel_uncore
*uncore
= stream
->uncore
;
448 return intel_uncore_read(uncore
, GEN8_OATAILPTR
) & GEN8_OATAILPTR_MASK
;
451 static u32
gen7_oa_hw_tail_read(struct i915_perf_stream
*stream
)
453 struct intel_uncore
*uncore
= stream
->uncore
;
454 u32 oastatus1
= intel_uncore_read(uncore
, GEN7_OASTATUS1
);
456 return oastatus1
& GEN7_OASTATUS1_TAIL_MASK
;
459 #define oa_report_header_64bit(__s) \
460 ((__s)->oa_buffer.format->header == HDR_64_BIT)
462 static u64
oa_report_id(struct i915_perf_stream
*stream
, void *report
)
464 return oa_report_header_64bit(stream
) ? *(u64
*)report
: *(u32
*)report
;
467 static u64
oa_report_reason(struct i915_perf_stream
*stream
, void *report
)
469 return (oa_report_id(stream
, report
) >> OAREPORT_REASON_SHIFT
) &
470 (GRAPHICS_VER(stream
->perf
->i915
) == 12 ?
471 OAREPORT_REASON_MASK_EXTENDED
:
472 OAREPORT_REASON_MASK
);
475 static void oa_report_id_clear(struct i915_perf_stream
*stream
, u32
*report
)
477 if (oa_report_header_64bit(stream
))
483 static bool oa_report_ctx_invalid(struct i915_perf_stream
*stream
, void *report
)
485 return !(oa_report_id(stream
, report
) &
486 stream
->perf
->gen8_valid_ctx_bit
);
489 static u64
oa_timestamp(struct i915_perf_stream
*stream
, void *report
)
491 return oa_report_header_64bit(stream
) ?
492 *((u64
*)report
+ 1) :
493 *((u32
*)report
+ 1);
496 static void oa_timestamp_clear(struct i915_perf_stream
*stream
, u32
*report
)
498 if (oa_report_header_64bit(stream
))
499 *(u64
*)&report
[2] = 0;
504 static u32
oa_context_id(struct i915_perf_stream
*stream
, u32
*report
)
506 u32 ctx_id
= oa_report_header_64bit(stream
) ? report
[4] : report
[2];
508 return ctx_id
& stream
->specific_ctx_id_mask
;
511 static void oa_context_id_squash(struct i915_perf_stream
*stream
, u32
*report
)
513 if (oa_report_header_64bit(stream
))
514 report
[4] = INVALID_CTX_ID
;
516 report
[2] = INVALID_CTX_ID
;
520 * oa_buffer_check_unlocked - check for data and update tail ptr state
521 * @stream: i915 stream instance
523 * This is either called via fops (for blocking reads in user ctx) or the poll
524 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
525 * if there is data available for userspace to read.
527 * This function is central to providing a workaround for the OA unit tail
528 * pointer having a race with respect to what data is visible to the CPU.
529 * It is responsible for reading tail pointers from the hardware and giving
530 * the pointers time to 'age' before they are made available for reading.
531 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
533 * Besides returning true when there is data available to read() this function
534 * also updates the tail in the oa_buffer object.
536 * Note: It's safe to read OA config state here unlocked, assuming that this is
537 * only called while the stream is enabled, while the global OA configuration
540 * Returns: %true if the OA buffer contains data, else %false
542 static bool oa_buffer_check_unlocked(struct i915_perf_stream
*stream
)
544 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
545 int report_size
= stream
->oa_buffer
.format
->size
;
549 u32 partial_report_size
;
551 /* We have to consider the (unlikely) possibility that read() errors
552 * could result in an OA buffer reset which might reset the head and
555 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
557 hw_tail
= stream
->perf
->ops
.oa_hw_tail_read(stream
);
558 hw_tail
-= gtt_offset
;
560 /* The tail pointer increases in 64 byte increments, not in report_size
561 * steps. Also the report size may not be a power of 2. Compute
562 * potentially partially landed report in the OA buffer
564 partial_report_size
= OA_TAKEN(hw_tail
, stream
->oa_buffer
.tail
);
565 partial_report_size
%= report_size
;
567 /* Subtract partial amount off the tail */
568 hw_tail
= OA_TAKEN(hw_tail
, partial_report_size
);
572 /* Walk the stream backward until we find a report with report
573 * id and timestmap not at 0. Since the circular buffer pointers
574 * progress by increments of 64 bytes and that reports can be up
575 * to 256 bytes long, we can't tell whether a report has fully
576 * landed in memory before the report id and timestamp of the
577 * following report have effectively landed.
579 * This is assuming that the writes of the OA unit land in
580 * memory in the order they were written to.
581 * If not : (╯°□°)╯︵ ┻━┻
583 while (OA_TAKEN(tail
, stream
->oa_buffer
.tail
) >= report_size
) {
584 void *report
= stream
->oa_buffer
.vaddr
+ tail
;
586 if (oa_report_id(stream
, report
) ||
587 oa_timestamp(stream
, report
))
590 tail
= (tail
- report_size
) & (OA_BUFFER_SIZE
- 1);
593 if (OA_TAKEN(hw_tail
, tail
) > report_size
&&
594 __ratelimit(&stream
->perf
->tail_pointer_race
))
595 drm_notice(&stream
->uncore
->i915
->drm
,
596 "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
597 stream
->oa_buffer
.head
, tail
, hw_tail
);
599 stream
->oa_buffer
.tail
= tail
;
601 pollin
= OA_TAKEN(stream
->oa_buffer
.tail
,
602 stream
->oa_buffer
.head
) >= report_size
;
604 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
610 * append_oa_status - Appends a status record to a userspace read() buffer.
611 * @stream: An i915-perf stream opened for OA metrics
612 * @buf: destination buffer given by userspace
613 * @count: the number of bytes userspace wants to read
614 * @offset: (inout): the current position for writing into @buf
615 * @type: The kind of status to report to userspace
617 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
618 * into the userspace read() buffer.
620 * The @buf @offset will only be updated on success.
622 * Returns: 0 on success, negative error code on failure.
624 static int append_oa_status(struct i915_perf_stream
*stream
,
628 enum drm_i915_perf_record_type type
)
630 struct drm_i915_perf_record_header header
= { type
, 0, sizeof(header
) };
632 if ((count
- *offset
) < header
.size
)
635 if (copy_to_user(buf
+ *offset
, &header
, sizeof(header
)))
638 (*offset
) += header
.size
;
644 * append_oa_sample - Copies single OA report into userspace read() buffer.
645 * @stream: An i915-perf stream opened for OA metrics
646 * @buf: destination buffer given by userspace
647 * @count: the number of bytes userspace wants to read
648 * @offset: (inout): the current position for writing into @buf
649 * @report: A single OA report to (optionally) include as part of the sample
651 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
652 * properties when opening a stream, tracked as `stream->sample_flags`. This
653 * function copies the requested components of a single sample to the given
656 * The @buf @offset will only be updated on success.
658 * Returns: 0 on success, negative error code on failure.
660 static int append_oa_sample(struct i915_perf_stream
*stream
,
666 int report_size
= stream
->oa_buffer
.format
->size
;
667 struct drm_i915_perf_record_header header
;
668 int report_size_partial
;
671 header
.type
= DRM_I915_PERF_RECORD_SAMPLE
;
673 header
.size
= stream
->sample_size
;
675 if ((count
- *offset
) < header
.size
)
679 if (copy_to_user(buf
, &header
, sizeof(header
)))
681 buf
+= sizeof(header
);
683 oa_buf_end
= stream
->oa_buffer
.vaddr
+ OA_BUFFER_SIZE
;
684 report_size_partial
= oa_buf_end
- report
;
686 if (report_size_partial
< report_size
) {
687 if (copy_to_user(buf
, report
, report_size_partial
))
689 buf
+= report_size_partial
;
691 if (copy_to_user(buf
, stream
->oa_buffer
.vaddr
,
692 report_size
- report_size_partial
))
694 } else if (copy_to_user(buf
, report
, report_size
)) {
698 (*offset
) += header
.size
;
704 * gen8_append_oa_reports - Copies all buffered OA reports into
705 * userspace read() buffer.
706 * @stream: An i915-perf stream opened for OA metrics
707 * @buf: destination buffer given by userspace
708 * @count: the number of bytes userspace wants to read
709 * @offset: (inout): the current position for writing into @buf
711 * Notably any error condition resulting in a short read (-%ENOSPC or
712 * -%EFAULT) will be returned even though one or more records may
713 * have been successfully copied. In this case it's up to the caller
714 * to decide if the error should be squashed before returning to
717 * Note: reports are consumed from the head, and appended to the
718 * tail, so the tail chases the head?... If you think that's mad
719 * and back-to-front you're not alone, but this follows the
720 * Gen PRM naming convention.
722 * Returns: 0 on success, negative error code on failure.
724 static int gen8_append_oa_reports(struct i915_perf_stream
*stream
,
729 struct intel_uncore
*uncore
= stream
->uncore
;
730 int report_size
= stream
->oa_buffer
.format
->size
;
731 u8
*oa_buf_base
= stream
->oa_buffer
.vaddr
;
732 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
733 u32 mask
= (OA_BUFFER_SIZE
- 1);
734 size_t start_offset
= *offset
;
739 if (drm_WARN_ON(&uncore
->i915
->drm
, !stream
->enabled
))
742 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
744 head
= stream
->oa_buffer
.head
;
745 tail
= stream
->oa_buffer
.tail
;
747 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
750 * An out of bounds or misaligned head or tail pointer implies a driver
751 * bug since we validate + align the tail pointers we read from the
752 * hardware and we are in full control of the head pointer which should
753 * only be incremented by multiples of the report size.
755 if (drm_WARN_ONCE(&uncore
->i915
->drm
,
756 head
> OA_BUFFER_SIZE
||
757 tail
> OA_BUFFER_SIZE
,
758 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
764 OA_TAKEN(tail
, head
);
765 head
= (head
+ report_size
) & mask
) {
766 u8
*report
= oa_buf_base
+ head
;
767 u32
*report32
= (void *)report
;
772 * The reason field includes flags identifying what
773 * triggered this specific report (mostly timer
774 * triggered or e.g. due to a context switch).
776 reason
= oa_report_reason(stream
, report
);
777 ctx_id
= oa_context_id(stream
, report32
);
780 * Squash whatever is in the CTX_ID field if it's marked as
781 * invalid to be sure we avoid false-positive, single-context
784 * Note: that we don't clear the valid_ctx_bit so userspace can
785 * understand that the ID has been squashed by the kernel.
789 * On XEHP platforms the behavior of context id valid bit has
790 * changed compared to prior platforms. To describe this, we
791 * define a few terms:
793 * context-switch-report: This is a report with the reason type
794 * being context-switch. It is generated when a context switches
797 * context-valid-bit: A bit that is set in the report ID field
798 * to indicate that a valid context has been loaded.
800 * gpu-idle: A condition characterized by a
801 * context-switch-report with context-valid-bit set to 0.
803 * On prior platforms, context-id-valid bit is set to 0 only
804 * when GPU goes idle. In all other reports, it is set to 1.
806 * On XEHP platforms, context-valid-bit is set to 1 in a context
807 * switch report if a new context switched in. For all other
808 * reports it is set to 0.
810 * This change in behavior causes an issue with MMIO triggered
811 * reports. MMIO triggered reports have the markers in the
812 * context ID field and the context-valid-bit is 0. The logic
813 * below to squash the context ID would render the report
814 * useless since the user will not be able to find it in the OA
815 * buffer. Since MMIO triggered reports exist only on XEHP,
816 * we should avoid squashing these for XEHP platforms.
819 if (oa_report_ctx_invalid(stream
, report
) &&
820 GRAPHICS_VER_FULL(stream
->engine
->i915
) < IP_VER(12, 50)) {
821 ctx_id
= INVALID_CTX_ID
;
822 oa_context_id_squash(stream
, report32
);
826 * NB: For Gen 8 the OA unit no longer supports clock gating
827 * off for a specific context and the kernel can't securely
828 * stop the counters from updating as system-wide / global
831 * Automatic reports now include a context ID so reports can be
832 * filtered on the cpu but it's not worth trying to
833 * automatically subtract/hide counter progress for other
834 * contexts while filtering since we can't stop userspace
835 * issuing MI_REPORT_PERF_COUNT commands which would still
836 * provide a side-band view of the real values.
838 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
839 * to normalize counters for a single filtered context then it
840 * needs be forwarded bookend context-switch reports so that it
841 * can track switches in between MI_REPORT_PERF_COUNT commands
842 * and can itself subtract/ignore the progress of counters
843 * associated with other contexts. Note that the hardware
844 * automatically triggers reports when switching to a new
845 * context which are tagged with the ID of the newly active
846 * context. To avoid the complexity (and likely fragility) of
847 * reading ahead while parsing reports to try and minimize
848 * forwarding redundant context switch reports (i.e. between
849 * other, unrelated contexts) we simply elect to forward them
852 * We don't rely solely on the reason field to identify context
853 * switches since it's not-uncommon for periodic samples to
854 * identify a switch before any 'context switch' report.
857 stream
->specific_ctx_id
== ctx_id
||
858 stream
->oa_buffer
.last_ctx_id
== stream
->specific_ctx_id
||
859 reason
& OAREPORT_REASON_CTX_SWITCH
) {
862 * While filtering for a single context we avoid
863 * leaking the IDs of other contexts.
866 stream
->specific_ctx_id
!= ctx_id
) {
867 oa_context_id_squash(stream
, report32
);
870 ret
= append_oa_sample(stream
, buf
, count
, offset
,
875 stream
->oa_buffer
.last_ctx_id
= ctx_id
;
878 if (is_power_of_2(report_size
)) {
880 * Clear out the report id and timestamp as a means
881 * to detect unlanded reports.
883 oa_report_id_clear(stream
, report32
);
884 oa_timestamp_clear(stream
, report32
);
886 u8
*oa_buf_end
= stream
->oa_buffer
.vaddr
+
888 u32 part
= oa_buf_end
- (u8
*)report32
;
890 /* Zero out the entire report */
891 if (report_size
<= part
) {
892 memset(report32
, 0, report_size
);
894 memset(report32
, 0, part
);
895 memset(oa_buf_base
, 0, report_size
- part
);
900 if (start_offset
!= *offset
) {
901 i915_reg_t oaheadptr
;
903 oaheadptr
= GRAPHICS_VER(stream
->perf
->i915
) == 12 ?
904 __oa_regs(stream
)->oa_head_ptr
:
907 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
910 * We removed the gtt_offset for the copy loop above, indexing
911 * relative to oa_buf_base so put back here...
913 intel_uncore_write(uncore
, oaheadptr
,
914 (head
+ gtt_offset
) & GEN12_OAG_OAHEADPTR_MASK
);
915 stream
->oa_buffer
.head
= head
;
917 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
924 * gen8_oa_read - copy status records then buffered OA reports
925 * @stream: An i915-perf stream opened for OA metrics
926 * @buf: destination buffer given by userspace
927 * @count: the number of bytes userspace wants to read
928 * @offset: (inout): the current position for writing into @buf
930 * Checks OA unit status registers and if necessary appends corresponding
931 * status records for userspace (such as for a buffer full condition) and then
932 * initiate appending any buffered OA reports.
934 * Updates @offset according to the number of bytes successfully copied into
935 * the userspace buffer.
937 * NB: some data may be successfully copied to the userspace buffer
938 * even if an error is returned, and this is reflected in the
941 * Returns: zero on success or a negative error code
943 static int gen8_oa_read(struct i915_perf_stream
*stream
,
948 struct intel_uncore
*uncore
= stream
->uncore
;
950 i915_reg_t oastatus_reg
;
953 if (drm_WARN_ON(&uncore
->i915
->drm
, !stream
->oa_buffer
.vaddr
))
956 oastatus_reg
= GRAPHICS_VER(stream
->perf
->i915
) == 12 ?
957 __oa_regs(stream
)->oa_status
:
960 oastatus
= intel_uncore_read(uncore
, oastatus_reg
);
963 * We treat OABUFFER_OVERFLOW as a significant error:
965 * Although theoretically we could handle this more gracefully
966 * sometimes, some Gens don't correctly suppress certain
967 * automatically triggered reports in this condition and so we
968 * have to assume that old reports are now being trampled
971 * Considering how we don't currently give userspace control
972 * over the OA buffer size and always configure a large 16MB
973 * buffer, then a buffer overflow does anyway likely indicate
974 * that something has gone quite badly wrong.
976 if (oastatus
& GEN8_OASTATUS_OABUFFER_OVERFLOW
) {
977 ret
= append_oa_status(stream
, buf
, count
, offset
,
978 DRM_I915_PERF_RECORD_OA_BUFFER_LOST
);
982 drm_dbg(&stream
->perf
->i915
->drm
,
983 "OA buffer overflow (exponent = %d): force restart\n",
984 stream
->period_exponent
);
986 stream
->perf
->ops
.oa_disable(stream
);
987 stream
->perf
->ops
.oa_enable(stream
);
990 * Note: .oa_enable() is expected to re-init the oabuffer and
991 * reset GEN8_OASTATUS for us
993 oastatus
= intel_uncore_read(uncore
, oastatus_reg
);
996 if (oastatus
& GEN8_OASTATUS_REPORT_LOST
) {
997 ret
= append_oa_status(stream
, buf
, count
, offset
,
998 DRM_I915_PERF_RECORD_OA_REPORT_LOST
);
1002 intel_uncore_rmw(uncore
, oastatus_reg
,
1003 GEN8_OASTATUS_COUNTER_OVERFLOW
|
1004 GEN8_OASTATUS_REPORT_LOST
,
1005 IS_GRAPHICS_VER(uncore
->i915
, 8, 11) ?
1006 (GEN8_OASTATUS_HEAD_POINTER_WRAP
|
1007 GEN8_OASTATUS_TAIL_POINTER_WRAP
) : 0);
1010 return gen8_append_oa_reports(stream
, buf
, count
, offset
);
1014 * gen7_append_oa_reports - Copies all buffered OA reports into
1015 * userspace read() buffer.
1016 * @stream: An i915-perf stream opened for OA metrics
1017 * @buf: destination buffer given by userspace
1018 * @count: the number of bytes userspace wants to read
1019 * @offset: (inout): the current position for writing into @buf
1021 * Notably any error condition resulting in a short read (-%ENOSPC or
1022 * -%EFAULT) will be returned even though one or more records may
1023 * have been successfully copied. In this case it's up to the caller
1024 * to decide if the error should be squashed before returning to
1027 * Note: reports are consumed from the head, and appended to the
1028 * tail, so the tail chases the head?... If you think that's mad
1029 * and back-to-front you're not alone, but this follows the
1030 * Gen PRM naming convention.
1032 * Returns: 0 on success, negative error code on failure.
1034 static int gen7_append_oa_reports(struct i915_perf_stream
*stream
,
1039 struct intel_uncore
*uncore
= stream
->uncore
;
1040 int report_size
= stream
->oa_buffer
.format
->size
;
1041 u8
*oa_buf_base
= stream
->oa_buffer
.vaddr
;
1042 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
1043 u32 mask
= (OA_BUFFER_SIZE
- 1);
1044 size_t start_offset
= *offset
;
1045 unsigned long flags
;
1049 if (drm_WARN_ON(&uncore
->i915
->drm
, !stream
->enabled
))
1052 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
1054 head
= stream
->oa_buffer
.head
;
1055 tail
= stream
->oa_buffer
.tail
;
1057 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
1059 /* An out of bounds or misaligned head or tail pointer implies a driver
1060 * bug since we validate + align the tail pointers we read from the
1061 * hardware and we are in full control of the head pointer which should
1062 * only be incremented by multiples of the report size (notably also
1063 * all a power of two).
1065 if (drm_WARN_ONCE(&uncore
->i915
->drm
,
1066 head
> OA_BUFFER_SIZE
|| head
% report_size
||
1067 tail
> OA_BUFFER_SIZE
|| tail
% report_size
,
1068 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
1074 OA_TAKEN(tail
, head
);
1075 head
= (head
+ report_size
) & mask
) {
1076 u8
*report
= oa_buf_base
+ head
;
1077 u32
*report32
= (void *)report
;
1079 /* All the report sizes factor neatly into the buffer
1080 * size so we never expect to see a report split
1081 * between the beginning and end of the buffer.
1083 * Given the initial alignment check a misalignment
1084 * here would imply a driver bug that would result
1087 if (drm_WARN_ON(&uncore
->i915
->drm
,
1088 (OA_BUFFER_SIZE
- head
) < report_size
)) {
1089 drm_err(&uncore
->i915
->drm
,
1090 "Spurious OA head ptr: non-integral report offset\n");
1094 /* The report-ID field for periodic samples includes
1095 * some undocumented flags related to what triggered
1096 * the report and is never expected to be zero so we
1097 * can check that the report isn't invalid before
1098 * copying it to userspace...
1100 if (report32
[0] == 0) {
1101 if (__ratelimit(&stream
->perf
->spurious_report_rs
))
1102 drm_notice(&uncore
->i915
->drm
,
1103 "Skipping spurious, invalid OA report\n");
1107 ret
= append_oa_sample(stream
, buf
, count
, offset
, report
);
1111 /* Clear out the first 2 dwords as a mean to detect unlanded
1118 if (start_offset
!= *offset
) {
1119 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
1121 intel_uncore_write(uncore
, GEN7_OASTATUS2
,
1122 ((head
+ gtt_offset
) & GEN7_OASTATUS2_HEAD_MASK
) |
1123 GEN7_OASTATUS2_MEM_SELECT_GGTT
);
1124 stream
->oa_buffer
.head
= head
;
1126 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
1133 * gen7_oa_read - copy status records then buffered OA reports
1134 * @stream: An i915-perf stream opened for OA metrics
1135 * @buf: destination buffer given by userspace
1136 * @count: the number of bytes userspace wants to read
1137 * @offset: (inout): the current position for writing into @buf
1139 * Checks Gen 7 specific OA unit status registers and if necessary appends
1140 * corresponding status records for userspace (such as for a buffer full
1141 * condition) and then initiate appending any buffered OA reports.
1143 * Updates @offset according to the number of bytes successfully copied into
1144 * the userspace buffer.
1146 * Returns: zero on success or a negative error code
1148 static int gen7_oa_read(struct i915_perf_stream
*stream
,
1153 struct intel_uncore
*uncore
= stream
->uncore
;
1157 if (drm_WARN_ON(&uncore
->i915
->drm
, !stream
->oa_buffer
.vaddr
))
1160 oastatus1
= intel_uncore_read(uncore
, GEN7_OASTATUS1
);
1162 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1163 * bits while the OA unit is enabled (while the tail pointer
1164 * may be updated asynchronously) so we ignore status bits
1165 * that have already been reported to userspace.
1167 oastatus1
&= ~stream
->perf
->gen7_latched_oastatus1
;
1169 /* We treat OABUFFER_OVERFLOW as a significant error:
1171 * - The status can be interpreted to mean that the buffer is
1172 * currently full (with a higher precedence than OA_TAKEN()
1173 * which will start to report a near-empty buffer after an
1174 * overflow) but it's awkward that we can't clear the status
1175 * on Haswell, so without a reset we won't be able to catch
1178 * - Since it also implies the HW has started overwriting old
1179 * reports it may also affect our sanity checks for invalid
1180 * reports when copying to userspace that assume new reports
1181 * are being written to cleared memory.
1183 * - In the future we may want to introduce a flight recorder
1184 * mode where the driver will automatically maintain a safe
1185 * guard band between head/tail, avoiding this overflow
1186 * condition, but we avoid the added driver complexity for
1189 if (unlikely(oastatus1
& GEN7_OASTATUS1_OABUFFER_OVERFLOW
)) {
1190 ret
= append_oa_status(stream
, buf
, count
, offset
,
1191 DRM_I915_PERF_RECORD_OA_BUFFER_LOST
);
1195 drm_dbg(&stream
->perf
->i915
->drm
,
1196 "OA buffer overflow (exponent = %d): force restart\n",
1197 stream
->period_exponent
);
1199 stream
->perf
->ops
.oa_disable(stream
);
1200 stream
->perf
->ops
.oa_enable(stream
);
1202 oastatus1
= intel_uncore_read(uncore
, GEN7_OASTATUS1
);
1205 if (unlikely(oastatus1
& GEN7_OASTATUS1_REPORT_LOST
)) {
1206 ret
= append_oa_status(stream
, buf
, count
, offset
,
1207 DRM_I915_PERF_RECORD_OA_REPORT_LOST
);
1210 stream
->perf
->gen7_latched_oastatus1
|=
1211 GEN7_OASTATUS1_REPORT_LOST
;
1214 return gen7_append_oa_reports(stream
, buf
, count
, offset
);
1218 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1219 * @stream: An i915-perf stream opened for OA metrics
1221 * Called when userspace tries to read() from a blocking stream FD opened
1222 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1223 * OA buffer and wakes us.
1225 * Note: it's acceptable to have this return with some false positives
1226 * since any subsequent read handling will return -EAGAIN if there isn't
1227 * really data ready for userspace yet.
1229 * Returns: zero on success or a negative error code
1231 static int i915_oa_wait_unlocked(struct i915_perf_stream
*stream
)
1233 /* We would wait indefinitely if periodic sampling is not enabled */
1234 if (!stream
->periodic
)
1237 return wait_event_interruptible(stream
->poll_wq
,
1238 oa_buffer_check_unlocked(stream
));
1242 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1243 * @stream: An i915-perf stream opened for OA metrics
1244 * @file: An i915 perf stream file
1245 * @wait: poll() state table
1247 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1248 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1249 * when it sees data ready to read in the circular OA buffer.
1251 static void i915_oa_poll_wait(struct i915_perf_stream
*stream
,
1255 poll_wait(file
, &stream
->poll_wq
, wait
);
1259 * i915_oa_read - just calls through to &i915_oa_ops->read
1260 * @stream: An i915-perf stream opened for OA metrics
1261 * @buf: destination buffer given by userspace
1262 * @count: the number of bytes userspace wants to read
1263 * @offset: (inout): the current position for writing into @buf
1265 * Updates @offset according to the number of bytes successfully copied into
1266 * the userspace buffer.
1268 * Returns: zero on success or a negative error code
1270 static int i915_oa_read(struct i915_perf_stream
*stream
,
1275 return stream
->perf
->ops
.read(stream
, buf
, count
, offset
);
1278 static struct intel_context
*oa_pin_context(struct i915_perf_stream
*stream
)
1280 struct i915_gem_engines_iter it
;
1281 struct i915_gem_context
*ctx
= stream
->ctx
;
1282 struct intel_context
*ce
;
1283 struct i915_gem_ww_ctx ww
;
1286 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
) {
1287 if (ce
->engine
!= stream
->engine
) /* first match! */
1293 i915_gem_context_unlock_engines(ctx
);
1296 return ERR_PTR(err
);
1298 i915_gem_ww_ctx_init(&ww
, true);
1301 * As the ID is the gtt offset of the context's vma we
1302 * pin the vma to ensure the ID remains fixed.
1304 err
= intel_context_pin_ww(ce
, &ww
);
1305 if (err
== -EDEADLK
) {
1306 err
= i915_gem_ww_ctx_backoff(&ww
);
1310 i915_gem_ww_ctx_fini(&ww
);
1313 return ERR_PTR(err
);
1315 stream
->pinned_ctx
= ce
;
1316 return stream
->pinned_ctx
;
1320 __store_reg_to_mem(struct i915_request
*rq
, i915_reg_t reg
, u32 ggtt_offset
)
1324 cmd
= MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1325 if (GRAPHICS_VER(rq
->i915
) >= 8)
1328 cs
= intel_ring_begin(rq
, 4);
1333 *cs
++ = i915_mmio_reg_offset(reg
);
1334 *cs
++ = ggtt_offset
;
1337 intel_ring_advance(rq
, cs
);
1343 __read_reg(struct intel_context
*ce
, i915_reg_t reg
, u32 ggtt_offset
)
1345 struct i915_request
*rq
;
1348 rq
= i915_request_create(ce
);
1352 i915_request_get(rq
);
1354 err
= __store_reg_to_mem(rq
, reg
, ggtt_offset
);
1356 i915_request_add(rq
);
1357 if (!err
&& i915_request_wait(rq
, 0, HZ
/ 2) < 0)
1360 i915_request_put(rq
);
1366 gen12_guc_sw_ctx_id(struct intel_context
*ce
, u32
*ctx_id
)
1368 struct i915_vma
*scratch
;
1372 scratch
= __vm_create_scratch_for_read_pinned(&ce
->engine
->gt
->ggtt
->vm
, 4);
1373 if (IS_ERR(scratch
))
1374 return PTR_ERR(scratch
);
1376 err
= i915_vma_sync(scratch
);
1380 err
= __read_reg(ce
, RING_EXECLIST_STATUS_HI(ce
->engine
->mmio_base
),
1381 i915_ggtt_offset(scratch
));
1385 val
= i915_gem_object_pin_map_unlocked(scratch
->obj
, I915_MAP_WB
);
1392 i915_gem_object_unpin_map(scratch
->obj
);
1395 i915_vma_unpin_and_release(&scratch
, 0);
1400 * For execlist mode of submission, pick an unused context id
1401 * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1402 * XXX_MAX_CONTEXT_HW_ID is used by idle context
1404 * For GuC mode of submission read context id from the upper dword of the
1405 * EXECLIST_STATUS register. Note that we read this value only once and expect
1406 * that the value stays fixed for the entire OA use case. There are cases where
1407 * GuC KMD implementation may deregister a context to reuse it's context id, but
1408 * we prevent that from happening to the OA context by pinning it.
1410 static int gen12_get_render_context_id(struct i915_perf_stream
*stream
)
1415 if (intel_engine_uses_guc(stream
->engine
)) {
1416 ret
= gen12_guc_sw_ctx_id(stream
->pinned_ctx
, &ctx_id
);
1420 mask
= ((1U << GEN12_GUC_SW_CTX_ID_WIDTH
) - 1) <<
1421 (GEN12_GUC_SW_CTX_ID_SHIFT
- 32);
1422 } else if (GRAPHICS_VER_FULL(stream
->engine
->i915
) >= IP_VER(12, 50)) {
1423 ctx_id
= (XEHP_MAX_CONTEXT_HW_ID
- 1) <<
1424 (XEHP_SW_CTX_ID_SHIFT
- 32);
1426 mask
= ((1U << XEHP_SW_CTX_ID_WIDTH
) - 1) <<
1427 (XEHP_SW_CTX_ID_SHIFT
- 32);
1429 ctx_id
= (GEN12_MAX_CONTEXT_HW_ID
- 1) <<
1430 (GEN11_SW_CTX_ID_SHIFT
- 32);
1432 mask
= ((1U << GEN11_SW_CTX_ID_WIDTH
) - 1) <<
1433 (GEN11_SW_CTX_ID_SHIFT
- 32);
1435 stream
->specific_ctx_id
= ctx_id
& mask
;
1436 stream
->specific_ctx_id_mask
= mask
;
1441 static bool oa_find_reg_in_lri(u32
*state
, u32 reg
, u32
*offset
, u32 end
)
1444 u32 len
= min(MI_LRI_LEN(state
[idx
]) + idx
, end
);
1448 for (; idx
< len
; idx
+= 2) {
1449 if (state
[idx
] == reg
) {
1459 static u32
oa_context_image_offset(struct intel_context
*ce
, u32 reg
)
1461 u32 offset
, len
= (ce
->engine
->context_size
- PAGE_SIZE
) / 4;
1462 u32
*state
= ce
->lrc_reg_state
;
1464 if (drm_WARN_ON(&ce
->engine
->i915
->drm
, !state
))
1467 for (offset
= 0; offset
< len
; ) {
1468 if (IS_MI_LRI_CMD(state
[offset
])) {
1470 * We expect reg-value pairs in MI_LRI command, so
1471 * MI_LRI_LEN() should be even, if not, issue a warning.
1473 drm_WARN_ON(&ce
->engine
->i915
->drm
,
1474 MI_LRI_LEN(state
[offset
]) & 0x1);
1476 if (oa_find_reg_in_lri(state
, reg
, &offset
, len
))
1483 return offset
< len
? offset
: U32_MAX
;
1486 static int set_oa_ctx_ctrl_offset(struct intel_context
*ce
)
1488 i915_reg_t reg
= GEN12_OACTXCONTROL(ce
->engine
->mmio_base
);
1489 struct i915_perf
*perf
= &ce
->engine
->i915
->perf
;
1490 u32 offset
= perf
->ctx_oactxctrl_offset
;
1492 /* Do this only once. Failure is stored as offset of U32_MAX */
1496 offset
= oa_context_image_offset(ce
, i915_mmio_reg_offset(reg
));
1497 perf
->ctx_oactxctrl_offset
= offset
;
1499 drm_dbg(&ce
->engine
->i915
->drm
,
1500 "%s oa ctx control at 0x%08x dword offset\n",
1501 ce
->engine
->name
, offset
);
1504 return offset
&& offset
!= U32_MAX
? 0 : -ENODEV
;
1507 static bool engine_supports_mi_query(struct intel_engine_cs
*engine
)
1509 return engine
->class == RENDER_CLASS
;
1513 * oa_get_render_ctx_id - determine and hold ctx hw id
1514 * @stream: An i915-perf stream opened for OA metrics
1516 * Determine the render context hw id, and ensure it remains fixed for the
1517 * lifetime of the stream. This ensures that we don't have to worry about
1518 * updating the context ID in OACONTROL on the fly.
1520 * Returns: zero on success or a negative error code
1522 static int oa_get_render_ctx_id(struct i915_perf_stream
*stream
)
1524 struct intel_context
*ce
;
1527 ce
= oa_pin_context(stream
);
1531 if (engine_supports_mi_query(stream
->engine
) &&
1532 HAS_LOGICAL_RING_CONTEXTS(stream
->perf
->i915
)) {
1534 * We are enabling perf query here. If we don't find the context
1535 * offset here, just return an error.
1537 ret
= set_oa_ctx_ctrl_offset(ce
);
1539 intel_context_unpin(ce
);
1540 drm_err(&stream
->perf
->i915
->drm
,
1541 "Enabling perf query failed for %s\n",
1542 stream
->engine
->name
);
1547 switch (GRAPHICS_VER(ce
->engine
->i915
)) {
1550 * On Haswell we don't do any post processing of the reports
1551 * and don't need to use the mask.
1553 stream
->specific_ctx_id
= i915_ggtt_offset(ce
->state
);
1554 stream
->specific_ctx_id_mask
= 0;
1560 if (intel_engine_uses_guc(ce
->engine
)) {
1562 * When using GuC, the context descriptor we write in
1563 * i915 is read by GuC and rewritten before it's
1564 * actually written into the hardware. The LRCA is
1565 * what is put into the context id field of the
1566 * context descriptor by GuC. Because it's aligned to
1567 * a page, the lower 12bits are always at 0 and
1568 * dropped by GuC. They won't be part of the context
1569 * ID in the OA reports, so squash those lower bits.
1571 stream
->specific_ctx_id
= ce
->lrc
.lrca
>> 12;
1574 * GuC uses the top bit to signal proxy submission, so
1577 stream
->specific_ctx_id_mask
=
1578 (1U << (GEN8_CTX_ID_WIDTH
- 1)) - 1;
1580 stream
->specific_ctx_id_mask
=
1581 (1U << GEN8_CTX_ID_WIDTH
) - 1;
1582 stream
->specific_ctx_id
= stream
->specific_ctx_id_mask
;
1588 ret
= gen12_get_render_context_id(stream
);
1592 MISSING_CASE(GRAPHICS_VER(ce
->engine
->i915
));
1595 ce
->tag
= stream
->specific_ctx_id
;
1597 drm_dbg(&stream
->perf
->i915
->drm
,
1598 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1599 stream
->specific_ctx_id
,
1600 stream
->specific_ctx_id_mask
);
1606 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1607 * @stream: An i915-perf stream opened for OA metrics
1609 * In case anything needed doing to ensure the context HW ID would remain valid
1610 * for the lifetime of the stream, then that can be undone here.
1612 static void oa_put_render_ctx_id(struct i915_perf_stream
*stream
)
1614 struct intel_context
*ce
;
1616 ce
= fetch_and_zero(&stream
->pinned_ctx
);
1618 ce
->tag
= 0; /* recomputed on next submission after parking */
1619 intel_context_unpin(ce
);
1622 stream
->specific_ctx_id
= INVALID_CTX_ID
;
1623 stream
->specific_ctx_id_mask
= 0;
1627 free_oa_buffer(struct i915_perf_stream
*stream
)
1629 i915_vma_unpin_and_release(&stream
->oa_buffer
.vma
,
1630 I915_VMA_RELEASE_MAP
);
1632 stream
->oa_buffer
.vaddr
= NULL
;
1636 free_oa_configs(struct i915_perf_stream
*stream
)
1638 struct i915_oa_config_bo
*oa_bo
, *tmp
;
1640 i915_oa_config_put(stream
->oa_config
);
1641 llist_for_each_entry_safe(oa_bo
, tmp
, stream
->oa_config_bos
.first
, node
)
1642 free_oa_config_bo(oa_bo
);
1646 free_noa_wait(struct i915_perf_stream
*stream
)
1648 i915_vma_unpin_and_release(&stream
->noa_wait
, 0);
1651 static bool engine_supports_oa(const struct intel_engine_cs
*engine
)
1653 return engine
->oa_group
;
1656 static bool engine_supports_oa_format(struct intel_engine_cs
*engine
, int type
)
1658 return engine
->oa_group
&& engine
->oa_group
->type
== type
;
1661 static void i915_oa_stream_destroy(struct i915_perf_stream
*stream
)
1663 struct i915_perf
*perf
= stream
->perf
;
1664 struct intel_gt
*gt
= stream
->engine
->gt
;
1665 struct i915_perf_group
*g
= stream
->engine
->oa_group
;
1667 if (WARN_ON(stream
!= g
->exclusive_stream
))
1671 * Unset exclusive_stream first, it will be checked while disabling
1672 * the metric set on gen8+.
1674 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1676 WRITE_ONCE(g
->exclusive_stream
, NULL
);
1677 perf
->ops
.disable_metric_set(stream
);
1679 free_oa_buffer(stream
);
1681 intel_uncore_forcewake_put(stream
->uncore
, FORCEWAKE_ALL
);
1682 intel_engine_pm_put(stream
->engine
);
1685 oa_put_render_ctx_id(stream
);
1687 free_oa_configs(stream
);
1688 free_noa_wait(stream
);
1690 if (perf
->spurious_report_rs
.missed
) {
1691 gt_notice(gt
, "%d spurious OA report notices suppressed due to ratelimiting\n",
1692 perf
->spurious_report_rs
.missed
);
1696 static void gen7_init_oa_buffer(struct i915_perf_stream
*stream
)
1698 struct intel_uncore
*uncore
= stream
->uncore
;
1699 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
1700 unsigned long flags
;
1702 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
1704 /* Pre-DevBDW: OABUFFER must be set with counters off,
1705 * before OASTATUS1, but after OASTATUS2
1707 intel_uncore_write(uncore
, GEN7_OASTATUS2
, /* head */
1708 gtt_offset
| GEN7_OASTATUS2_MEM_SELECT_GGTT
);
1709 stream
->oa_buffer
.head
= 0;
1711 intel_uncore_write(uncore
, GEN7_OABUFFER
, gtt_offset
);
1713 intel_uncore_write(uncore
, GEN7_OASTATUS1
, /* tail */
1714 gtt_offset
| OABUFFER_SIZE_16M
);
1716 /* Mark that we need updated tail pointers to read from... */
1717 stream
->oa_buffer
.tail
= 0;
1719 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
1721 /* On Haswell we have to track which OASTATUS1 flags we've
1722 * already seen since they can't be cleared while periodic
1723 * sampling is enabled.
1725 stream
->perf
->gen7_latched_oastatus1
= 0;
1727 /* NB: although the OA buffer will initially be allocated
1728 * zeroed via shmfs (and so this memset is redundant when
1729 * first allocating), we may re-init the OA buffer, either
1730 * when re-enabling a stream or in error/reset paths.
1732 * The reason we clear the buffer for each re-init is for the
1733 * sanity check in gen7_append_oa_reports() that looks at the
1734 * report-id field to make sure it's non-zero which relies on
1735 * the assumption that new reports are being written to zeroed
1738 memset(stream
->oa_buffer
.vaddr
, 0, OA_BUFFER_SIZE
);
1741 static void gen8_init_oa_buffer(struct i915_perf_stream
*stream
)
1743 struct intel_uncore
*uncore
= stream
->uncore
;
1744 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
1745 unsigned long flags
;
1747 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
1749 intel_uncore_write(uncore
, GEN8_OASTATUS
, 0);
1750 intel_uncore_write(uncore
, GEN8_OAHEADPTR
, gtt_offset
);
1751 stream
->oa_buffer
.head
= 0;
1753 intel_uncore_write(uncore
, GEN8_OABUFFER_UDW
, 0);
1758 * "This MMIO must be set before the OATAILPTR
1759 * register and after the OAHEADPTR register. This is
1760 * to enable proper functionality of the overflow
1763 intel_uncore_write(uncore
, GEN8_OABUFFER
, gtt_offset
|
1764 OABUFFER_SIZE_16M
| GEN8_OABUFFER_MEM_SELECT_GGTT
);
1765 intel_uncore_write(uncore
, GEN8_OATAILPTR
, gtt_offset
& GEN8_OATAILPTR_MASK
);
1767 /* Mark that we need updated tail pointers to read from... */
1768 stream
->oa_buffer
.tail
= 0;
1771 * Reset state used to recognise context switches, affecting which
1772 * reports we will forward to userspace while filtering for a single
1775 stream
->oa_buffer
.last_ctx_id
= INVALID_CTX_ID
;
1777 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
1780 * NB: although the OA buffer will initially be allocated
1781 * zeroed via shmfs (and so this memset is redundant when
1782 * first allocating), we may re-init the OA buffer, either
1783 * when re-enabling a stream or in error/reset paths.
1785 * The reason we clear the buffer for each re-init is for the
1786 * sanity check in gen8_append_oa_reports() that looks at the
1787 * reason field to make sure it's non-zero which relies on
1788 * the assumption that new reports are being written to zeroed
1791 memset(stream
->oa_buffer
.vaddr
, 0, OA_BUFFER_SIZE
);
1794 static void gen12_init_oa_buffer(struct i915_perf_stream
*stream
)
1796 struct intel_uncore
*uncore
= stream
->uncore
;
1797 u32 gtt_offset
= i915_ggtt_offset(stream
->oa_buffer
.vma
);
1798 unsigned long flags
;
1800 spin_lock_irqsave(&stream
->oa_buffer
.ptr_lock
, flags
);
1802 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_status
, 0);
1803 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_head_ptr
,
1804 gtt_offset
& GEN12_OAG_OAHEADPTR_MASK
);
1805 stream
->oa_buffer
.head
= 0;
1810 * "This MMIO must be set before the OATAILPTR
1811 * register and after the OAHEADPTR register. This is
1812 * to enable proper functionality of the overflow
1815 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_buffer
, gtt_offset
|
1816 OABUFFER_SIZE_16M
| GEN8_OABUFFER_MEM_SELECT_GGTT
);
1817 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_tail_ptr
,
1818 gtt_offset
& GEN12_OAG_OATAILPTR_MASK
);
1820 /* Mark that we need updated tail pointers to read from... */
1821 stream
->oa_buffer
.tail
= 0;
1824 * Reset state used to recognise context switches, affecting which
1825 * reports we will forward to userspace while filtering for a single
1828 stream
->oa_buffer
.last_ctx_id
= INVALID_CTX_ID
;
1830 spin_unlock_irqrestore(&stream
->oa_buffer
.ptr_lock
, flags
);
1833 * NB: although the OA buffer will initially be allocated
1834 * zeroed via shmfs (and so this memset is redundant when
1835 * first allocating), we may re-init the OA buffer, either
1836 * when re-enabling a stream or in error/reset paths.
1838 * The reason we clear the buffer for each re-init is for the
1839 * sanity check in gen8_append_oa_reports() that looks at the
1840 * reason field to make sure it's non-zero which relies on
1841 * the assumption that new reports are being written to zeroed
1844 memset(stream
->oa_buffer
.vaddr
, 0,
1845 stream
->oa_buffer
.vma
->size
);
1848 static int alloc_oa_buffer(struct i915_perf_stream
*stream
)
1850 struct drm_i915_private
*i915
= stream
->perf
->i915
;
1851 struct intel_gt
*gt
= stream
->engine
->gt
;
1852 struct drm_i915_gem_object
*bo
;
1853 struct i915_vma
*vma
;
1856 if (drm_WARN_ON(&i915
->drm
, stream
->oa_buffer
.vma
))
1859 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE
);
1860 BUILD_BUG_ON(OA_BUFFER_SIZE
< SZ_128K
|| OA_BUFFER_SIZE
> SZ_16M
);
1862 bo
= i915_gem_object_create_shmem(stream
->perf
->i915
, OA_BUFFER_SIZE
);
1864 drm_err(&i915
->drm
, "Failed to allocate OA buffer\n");
1868 i915_gem_object_set_cache_coherency(bo
, I915_CACHE_LLC
);
1870 /* PreHSW required 512K alignment, HSW requires 16M */
1871 vma
= i915_vma_instance(bo
, >
->ggtt
->vm
, NULL
);
1878 * PreHSW required 512K alignment.
1879 * HSW and onwards, align to requested size of OA buffer.
1881 ret
= i915_vma_pin(vma
, 0, SZ_16M
, PIN_GLOBAL
| PIN_HIGH
);
1883 gt_err(gt
, "Failed to pin OA buffer %d\n", ret
);
1887 stream
->oa_buffer
.vma
= vma
;
1889 stream
->oa_buffer
.vaddr
=
1890 i915_gem_object_pin_map_unlocked(bo
, I915_MAP_WB
);
1891 if (IS_ERR(stream
->oa_buffer
.vaddr
)) {
1892 ret
= PTR_ERR(stream
->oa_buffer
.vaddr
);
1899 __i915_vma_unpin(vma
);
1902 i915_gem_object_put(bo
);
1904 stream
->oa_buffer
.vaddr
= NULL
;
1905 stream
->oa_buffer
.vma
= NULL
;
1910 static u32
*save_restore_register(struct i915_perf_stream
*stream
, u32
*cs
,
1911 bool save
, i915_reg_t reg
, u32 offset
,
1917 cmd
= save
? MI_STORE_REGISTER_MEM
: MI_LOAD_REGISTER_MEM
;
1918 cmd
|= MI_SRM_LRM_GLOBAL_GTT
;
1919 if (GRAPHICS_VER(stream
->perf
->i915
) >= 8)
1922 for (d
= 0; d
< dword_count
; d
++) {
1924 *cs
++ = i915_mmio_reg_offset(reg
) + 4 * d
;
1925 *cs
++ = i915_ggtt_offset(stream
->noa_wait
) + offset
+ 4 * d
;
1932 static int alloc_noa_wait(struct i915_perf_stream
*stream
)
1934 struct drm_i915_private
*i915
= stream
->perf
->i915
;
1935 struct intel_gt
*gt
= stream
->engine
->gt
;
1936 struct drm_i915_gem_object
*bo
;
1937 struct i915_vma
*vma
;
1938 const u64 delay_ticks
= 0xffffffffffffffff -
1939 intel_gt_ns_to_clock_interval(to_gt(stream
->perf
->i915
),
1940 atomic64_read(&stream
->perf
->noa_programming_delay
));
1941 const u32 base
= stream
->engine
->mmio_base
;
1942 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1943 u32
*batch
, *ts0
, *cs
, *jump
;
1944 struct i915_gem_ww_ctx ww
;
1954 i915_reg_t mi_predicate_result
= HAS_MI_SET_PREDICATE(i915
) ?
1955 MI_PREDICATE_RESULT_2_ENGINE(base
) :
1956 MI_PREDICATE_RESULT_1(RENDER_RING_BASE
);
1959 * gt->scratch was being used to save/restore the GPR registers, but on
1960 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
1961 * causes an engine hang. Instead allocate an additional page here to
1962 * save/restore GPR registers
1964 bo
= i915_gem_object_create_internal(i915
, 8192);
1967 "Failed to allocate NOA wait batchbuffer\n");
1971 i915_gem_ww_ctx_init(&ww
, true);
1973 ret
= i915_gem_object_lock(bo
, &ww
);
1978 * We pin in GGTT because we jump into this buffer now because
1979 * multiple OA config BOs will have a jump to this address and it
1980 * needs to be fixed during the lifetime of the i915/perf stream.
1982 vma
= i915_vma_instance(bo
, >
->ggtt
->vm
, NULL
);
1988 ret
= i915_vma_pin_ww(vma
, &ww
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
1992 batch
= cs
= i915_gem_object_pin_map(bo
, I915_MAP_WB
);
1993 if (IS_ERR(batch
)) {
1994 ret
= PTR_ERR(batch
);
1998 stream
->noa_wait
= vma
;
2000 #define GPR_SAVE_OFFSET 4096
2001 #define PREDICATE_SAVE_OFFSET 4160
2003 /* Save registers. */
2004 for (i
= 0; i
< N_CS_GPR
; i
++)
2005 cs
= save_restore_register(
2006 stream
, cs
, true /* save */, CS_GPR(i
),
2007 GPR_SAVE_OFFSET
+ 8 * i
, 2);
2008 cs
= save_restore_register(
2009 stream
, cs
, true /* save */, mi_predicate_result
,
2010 PREDICATE_SAVE_OFFSET
, 1);
2012 /* First timestamp snapshot location. */
2016 * Initial snapshot of the timestamp register to implement the wait.
2017 * We work with 32b values, so clear out the top 32b bits of the
2018 * register because the ALU works 64bits.
2020 *cs
++ = MI_LOAD_REGISTER_IMM(1);
2021 *cs
++ = i915_mmio_reg_offset(CS_GPR(START_TS
)) + 4;
2023 *cs
++ = MI_LOAD_REGISTER_REG
| (3 - 2);
2024 *cs
++ = i915_mmio_reg_offset(RING_TIMESTAMP(base
));
2025 *cs
++ = i915_mmio_reg_offset(CS_GPR(START_TS
));
2028 * This is the location we're going to jump back into until the
2029 * required amount of time has passed.
2034 * Take another snapshot of the timestamp register. Take care to clear
2035 * up the top 32bits of CS_GPR(1) as we're using it for other
2038 *cs
++ = MI_LOAD_REGISTER_IMM(1);
2039 *cs
++ = i915_mmio_reg_offset(CS_GPR(NOW_TS
)) + 4;
2041 *cs
++ = MI_LOAD_REGISTER_REG
| (3 - 2);
2042 *cs
++ = i915_mmio_reg_offset(RING_TIMESTAMP(base
));
2043 *cs
++ = i915_mmio_reg_offset(CS_GPR(NOW_TS
));
2046 * Do a diff between the 2 timestamps and store the result back into
2050 *cs
++ = MI_MATH_LOAD(MI_MATH_REG_SRCA
, MI_MATH_REG(NOW_TS
));
2051 *cs
++ = MI_MATH_LOAD(MI_MATH_REG_SRCB
, MI_MATH_REG(START_TS
));
2052 *cs
++ = MI_MATH_SUB
;
2053 *cs
++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS
), MI_MATH_REG_ACCU
);
2054 *cs
++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE
), MI_MATH_REG_CF
);
2057 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
2058 * timestamp have rolled over the 32bits) into the predicate register
2059 * to be used for the predicated jump.
2061 *cs
++ = MI_LOAD_REGISTER_REG
| (3 - 2);
2062 *cs
++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE
));
2063 *cs
++ = i915_mmio_reg_offset(mi_predicate_result
);
2065 if (HAS_MI_SET_PREDICATE(i915
))
2066 *cs
++ = MI_SET_PREDICATE
| 1;
2068 /* Restart from the beginning if we had timestamps roll over. */
2069 *cs
++ = (GRAPHICS_VER(i915
) < 8 ?
2070 MI_BATCH_BUFFER_START
:
2071 MI_BATCH_BUFFER_START_GEN8
) |
2073 *cs
++ = i915_ggtt_offset(vma
) + (ts0
- batch
) * 4;
2076 if (HAS_MI_SET_PREDICATE(i915
))
2077 *cs
++ = MI_SET_PREDICATE
;
2080 * Now add the diff between to previous timestamps and add it to :
2081 * (((1 * << 64) - 1) - delay_ns)
2083 * When the Carry Flag contains 1 this means the elapsed time is
2084 * longer than the expected delay, and we can exit the wait loop.
2086 *cs
++ = MI_LOAD_REGISTER_IMM(2);
2087 *cs
++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET
));
2088 *cs
++ = lower_32_bits(delay_ticks
);
2089 *cs
++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET
)) + 4;
2090 *cs
++ = upper_32_bits(delay_ticks
);
2093 *cs
++ = MI_MATH_LOAD(MI_MATH_REG_SRCA
, MI_MATH_REG(DELTA_TS
));
2094 *cs
++ = MI_MATH_LOAD(MI_MATH_REG_SRCB
, MI_MATH_REG(DELTA_TARGET
));
2095 *cs
++ = MI_MATH_ADD
;
2096 *cs
++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE
), MI_MATH_REG_CF
);
2098 *cs
++ = MI_ARB_CHECK
;
2101 * Transfer the result into the predicate register to be used for the
2104 *cs
++ = MI_LOAD_REGISTER_REG
| (3 - 2);
2105 *cs
++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE
));
2106 *cs
++ = i915_mmio_reg_offset(mi_predicate_result
);
2108 if (HAS_MI_SET_PREDICATE(i915
))
2109 *cs
++ = MI_SET_PREDICATE
| 1;
2111 /* Predicate the jump. */
2112 *cs
++ = (GRAPHICS_VER(i915
) < 8 ?
2113 MI_BATCH_BUFFER_START
:
2114 MI_BATCH_BUFFER_START_GEN8
) |
2116 *cs
++ = i915_ggtt_offset(vma
) + (jump
- batch
) * 4;
2119 if (HAS_MI_SET_PREDICATE(i915
))
2120 *cs
++ = MI_SET_PREDICATE
;
2122 /* Restore registers. */
2123 for (i
= 0; i
< N_CS_GPR
; i
++)
2124 cs
= save_restore_register(
2125 stream
, cs
, false /* restore */, CS_GPR(i
),
2126 GPR_SAVE_OFFSET
+ 8 * i
, 2);
2127 cs
= save_restore_register(
2128 stream
, cs
, false /* restore */, mi_predicate_result
,
2129 PREDICATE_SAVE_OFFSET
, 1);
2131 /* And return to the ring. */
2132 *cs
++ = MI_BATCH_BUFFER_END
;
2134 GEM_BUG_ON(cs
- batch
> PAGE_SIZE
/ sizeof(*batch
));
2136 i915_gem_object_flush_map(bo
);
2137 __i915_gem_object_release_map(bo
);
2142 i915_vma_unpin_and_release(&vma
, 0);
2144 if (ret
== -EDEADLK
) {
2145 ret
= i915_gem_ww_ctx_backoff(&ww
);
2149 i915_gem_ww_ctx_fini(&ww
);
2151 i915_gem_object_put(bo
);
2155 static u32
*write_cs_mi_lri(u32
*cs
,
2156 const struct i915_oa_reg
*reg_data
,
2161 for (i
= 0; i
< n_regs
; i
++) {
2162 if ((i
% MI_LOAD_REGISTER_IMM_MAX_REGS
) == 0) {
2163 u32 n_lri
= min_t(u32
,
2165 MI_LOAD_REGISTER_IMM_MAX_REGS
);
2167 *cs
++ = MI_LOAD_REGISTER_IMM(n_lri
);
2169 *cs
++ = i915_mmio_reg_offset(reg_data
[i
].addr
);
2170 *cs
++ = reg_data
[i
].value
;
2176 static int num_lri_dwords(int num_regs
)
2181 count
+= DIV_ROUND_UP(num_regs
, MI_LOAD_REGISTER_IMM_MAX_REGS
);
2182 count
+= num_regs
* 2;
2188 static struct i915_oa_config_bo
*
2189 alloc_oa_config_buffer(struct i915_perf_stream
*stream
,
2190 struct i915_oa_config
*oa_config
)
2192 struct drm_i915_gem_object
*obj
;
2193 struct i915_oa_config_bo
*oa_bo
;
2194 struct i915_gem_ww_ctx ww
;
2195 size_t config_length
= 0;
2199 oa_bo
= kzalloc(sizeof(*oa_bo
), GFP_KERNEL
);
2201 return ERR_PTR(-ENOMEM
);
2203 config_length
+= num_lri_dwords(oa_config
->mux_regs_len
);
2204 config_length
+= num_lri_dwords(oa_config
->b_counter_regs_len
);
2205 config_length
+= num_lri_dwords(oa_config
->flex_regs_len
);
2206 config_length
+= 3; /* MI_BATCH_BUFFER_START */
2207 config_length
= ALIGN(sizeof(u32
) * config_length
, I915_GTT_PAGE_SIZE
);
2209 obj
= i915_gem_object_create_shmem(stream
->perf
->i915
, config_length
);
2215 i915_gem_ww_ctx_init(&ww
, true);
2217 err
= i915_gem_object_lock(obj
, &ww
);
2221 cs
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
2227 cs
= write_cs_mi_lri(cs
,
2228 oa_config
->mux_regs
,
2229 oa_config
->mux_regs_len
);
2230 cs
= write_cs_mi_lri(cs
,
2231 oa_config
->b_counter_regs
,
2232 oa_config
->b_counter_regs_len
);
2233 cs
= write_cs_mi_lri(cs
,
2234 oa_config
->flex_regs
,
2235 oa_config
->flex_regs_len
);
2237 /* Jump into the active wait. */
2238 *cs
++ = (GRAPHICS_VER(stream
->perf
->i915
) < 8 ?
2239 MI_BATCH_BUFFER_START
:
2240 MI_BATCH_BUFFER_START_GEN8
);
2241 *cs
++ = i915_ggtt_offset(stream
->noa_wait
);
2244 i915_gem_object_flush_map(obj
);
2245 __i915_gem_object_release_map(obj
);
2247 oa_bo
->vma
= i915_vma_instance(obj
,
2248 &stream
->engine
->gt
->ggtt
->vm
,
2250 if (IS_ERR(oa_bo
->vma
)) {
2251 err
= PTR_ERR(oa_bo
->vma
);
2255 oa_bo
->oa_config
= i915_oa_config_get(oa_config
);
2256 llist_add(&oa_bo
->node
, &stream
->oa_config_bos
);
2259 if (err
== -EDEADLK
) {
2260 err
= i915_gem_ww_ctx_backoff(&ww
);
2264 i915_gem_ww_ctx_fini(&ww
);
2267 i915_gem_object_put(obj
);
2271 return ERR_PTR(err
);
2276 static struct i915_vma
*
2277 get_oa_vma(struct i915_perf_stream
*stream
, struct i915_oa_config
*oa_config
)
2279 struct i915_oa_config_bo
*oa_bo
;
2282 * Look for the buffer in the already allocated BOs attached
2285 llist_for_each_entry(oa_bo
, stream
->oa_config_bos
.first
, node
) {
2286 if (oa_bo
->oa_config
== oa_config
&&
2287 memcmp(oa_bo
->oa_config
->uuid
,
2289 sizeof(oa_config
->uuid
)) == 0)
2293 oa_bo
= alloc_oa_config_buffer(stream
, oa_config
);
2295 return ERR_CAST(oa_bo
);
2298 return i915_vma_get(oa_bo
->vma
);
2302 emit_oa_config(struct i915_perf_stream
*stream
,
2303 struct i915_oa_config
*oa_config
,
2304 struct intel_context
*ce
,
2305 struct i915_active
*active
)
2307 struct i915_request
*rq
;
2308 struct i915_vma
*vma
;
2309 struct i915_gem_ww_ctx ww
;
2312 vma
= get_oa_vma(stream
, oa_config
);
2314 return PTR_ERR(vma
);
2316 i915_gem_ww_ctx_init(&ww
, true);
2318 err
= i915_gem_object_lock(vma
->obj
, &ww
);
2322 err
= i915_vma_pin_ww(vma
, &ww
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
2326 intel_engine_pm_get(ce
->engine
);
2327 rq
= i915_request_create(ce
);
2328 intel_engine_pm_put(ce
->engine
);
2334 if (!IS_ERR_OR_NULL(active
)) {
2335 /* After all individual context modifications */
2336 err
= i915_request_await_active(rq
, active
,
2337 I915_ACTIVE_AWAIT_ACTIVE
);
2339 goto err_add_request
;
2341 err
= i915_active_add_request(active
, rq
);
2343 goto err_add_request
;
2346 err
= i915_vma_move_to_active(vma
, rq
, 0);
2348 goto err_add_request
;
2350 err
= rq
->engine
->emit_bb_start(rq
,
2351 i915_vma_offset(vma
), 0,
2352 I915_DISPATCH_SECURE
);
2354 goto err_add_request
;
2357 i915_request_add(rq
);
2359 i915_vma_unpin(vma
);
2361 if (err
== -EDEADLK
) {
2362 err
= i915_gem_ww_ctx_backoff(&ww
);
2367 i915_gem_ww_ctx_fini(&ww
);
2372 static struct intel_context
*oa_context(struct i915_perf_stream
*stream
)
2374 return stream
->pinned_ctx
?: stream
->engine
->kernel_context
;
2378 hsw_enable_metric_set(struct i915_perf_stream
*stream
,
2379 struct i915_active
*active
)
2381 struct intel_uncore
*uncore
= stream
->uncore
;
2386 * OA unit is using “crclk” for its functionality. When trunk
2387 * level clock gating takes place, OA clock would be gated,
2388 * unable to count the events from non-render clock domain.
2389 * Render clock gating must be disabled when OA is enabled to
2390 * count the events from non-render domain. Unit level clock
2391 * gating for RCS should also be disabled.
2393 intel_uncore_rmw(uncore
, GEN7_MISCCPCTL
,
2394 GEN7_DOP_CLOCK_GATE_ENABLE
, 0);
2395 intel_uncore_rmw(uncore
, GEN6_UCGCTL1
,
2396 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
2398 return emit_oa_config(stream
,
2399 stream
->oa_config
, oa_context(stream
),
2403 static void hsw_disable_metric_set(struct i915_perf_stream
*stream
)
2405 struct intel_uncore
*uncore
= stream
->uncore
;
2407 intel_uncore_rmw(uncore
, GEN6_UCGCTL1
,
2408 GEN6_CSUNIT_CLOCK_GATE_DISABLE
, 0);
2409 intel_uncore_rmw(uncore
, GEN7_MISCCPCTL
,
2410 0, GEN7_DOP_CLOCK_GATE_ENABLE
);
2412 intel_uncore_rmw(uncore
, GDT_CHICKEN_BITS
, GT_NOA_ENABLE
, 0);
2415 static u32
oa_config_flex_reg(const struct i915_oa_config
*oa_config
,
2418 u32 mmio
= i915_mmio_reg_offset(reg
);
2422 * This arbitrary default will select the 'EU FPU0 Pipeline
2423 * Active' event. In the future it's anticipated that there
2424 * will be an explicit 'No Event' we can select, but not yet...
2429 for (i
= 0; i
< oa_config
->flex_regs_len
; i
++) {
2430 if (i915_mmio_reg_offset(oa_config
->flex_regs
[i
].addr
) == mmio
)
2431 return oa_config
->flex_regs
[i
].value
;
2437 * NB: It must always remain pointer safe to run this even if the OA unit
2438 * has been disabled.
2440 * It's fine to put out-of-date values into these per-context registers
2441 * in the case that the OA unit has been disabled.
2444 gen8_update_reg_state_unlocked(const struct intel_context
*ce
,
2445 const struct i915_perf_stream
*stream
)
2447 u32 ctx_oactxctrl
= stream
->perf
->ctx_oactxctrl_offset
;
2448 u32 ctx_flexeu0
= stream
->perf
->ctx_flexeu0_offset
;
2449 /* The MMIO offsets for Flex EU registers aren't contiguous */
2450 static const i915_reg_t flex_regs
[] = {
2459 u32
*reg_state
= ce
->lrc_reg_state
;
2462 reg_state
[ctx_oactxctrl
+ 1] =
2463 (stream
->period_exponent
<< GEN8_OA_TIMER_PERIOD_SHIFT
) |
2464 (stream
->periodic
? GEN8_OA_TIMER_ENABLE
: 0) |
2465 GEN8_OA_COUNTER_RESUME
;
2467 for (i
= 0; i
< ARRAY_SIZE(flex_regs
); i
++)
2468 reg_state
[ctx_flexeu0
+ i
* 2 + 1] =
2469 oa_config_flex_reg(stream
->oa_config
, flex_regs
[i
]);
2479 gen8_store_flex(struct i915_request
*rq
,
2480 struct intel_context
*ce
,
2481 const struct flex
*flex
, unsigned int count
)
2486 cs
= intel_ring_begin(rq
, 4 * count
);
2490 offset
= i915_ggtt_offset(ce
->state
) + LRC_STATE_OFFSET
;
2492 *cs
++ = MI_STORE_DWORD_IMM_GEN4
| MI_USE_GGTT
;
2493 *cs
++ = offset
+ flex
->offset
* sizeof(u32
);
2495 *cs
++ = flex
->value
;
2496 } while (flex
++, --count
);
2498 intel_ring_advance(rq
, cs
);
2504 gen8_load_flex(struct i915_request
*rq
,
2505 struct intel_context
*ce
,
2506 const struct flex
*flex
, unsigned int count
)
2510 GEM_BUG_ON(!count
|| count
> 63);
2512 cs
= intel_ring_begin(rq
, 2 * count
+ 2);
2516 *cs
++ = MI_LOAD_REGISTER_IMM(count
);
2518 *cs
++ = i915_mmio_reg_offset(flex
->reg
);
2519 *cs
++ = flex
->value
;
2520 } while (flex
++, --count
);
2523 intel_ring_advance(rq
, cs
);
2528 static int gen8_modify_context(struct intel_context
*ce
,
2529 const struct flex
*flex
, unsigned int count
)
2531 struct i915_request
*rq
;
2534 rq
= intel_engine_create_kernel_request(ce
->engine
);
2538 /* Serialise with the remote context */
2539 err
= intel_context_prepare_remote_request(ce
, rq
);
2541 err
= gen8_store_flex(rq
, ce
, flex
, count
);
2543 i915_request_add(rq
);
2548 gen8_modify_self(struct intel_context
*ce
,
2549 const struct flex
*flex
, unsigned int count
,
2550 struct i915_active
*active
)
2552 struct i915_request
*rq
;
2555 intel_engine_pm_get(ce
->engine
);
2556 rq
= i915_request_create(ce
);
2557 intel_engine_pm_put(ce
->engine
);
2561 if (!IS_ERR_OR_NULL(active
)) {
2562 err
= i915_active_add_request(active
, rq
);
2564 goto err_add_request
;
2567 err
= gen8_load_flex(rq
, ce
, flex
, count
);
2569 goto err_add_request
;
2572 i915_request_add(rq
);
2576 static int gen8_configure_context(struct i915_perf_stream
*stream
,
2577 struct i915_gem_context
*ctx
,
2578 struct flex
*flex
, unsigned int count
)
2580 struct i915_gem_engines_iter it
;
2581 struct intel_context
*ce
;
2584 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
) {
2585 GEM_BUG_ON(ce
== ce
->engine
->kernel_context
);
2587 if (ce
->engine
->class != RENDER_CLASS
)
2590 /* Otherwise OA settings will be set upon first use */
2591 if (!intel_context_pin_if_active(ce
))
2594 flex
->value
= intel_sseu_make_rpcs(ce
->engine
->gt
, &ce
->sseu
);
2595 err
= gen8_modify_context(ce
, flex
, count
);
2597 intel_context_unpin(ce
);
2601 i915_gem_context_unlock_engines(ctx
);
2606 static int gen12_configure_oar_context(struct i915_perf_stream
*stream
,
2607 struct i915_active
*active
)
2610 struct intel_context
*ce
= stream
->pinned_ctx
;
2611 u32 format
= stream
->oa_buffer
.format
->format
;
2612 u32 offset
= stream
->perf
->ctx_oactxctrl_offset
;
2613 struct flex regs_context
[] = {
2617 active
? GEN8_OA_COUNTER_RESUME
: 0,
2620 /* Offsets in regs_lri are not used since this configuration is only
2621 * applied using LRI. Initialize the correct offsets for posterity.
2623 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2624 struct flex regs_lri
[] = {
2626 GEN12_OAR_OACONTROL
,
2627 GEN12_OAR_OACONTROL_OFFSET
+ 1,
2628 (format
<< GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT
) |
2629 (active
? GEN12_OAR_OACONTROL_COUNTER_ENABLE
: 0)
2632 RING_CONTEXT_CONTROL(ce
->engine
->mmio_base
),
2633 CTX_CONTEXT_CONTROL
,
2634 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE
,
2636 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE
:
2641 /* Modify the context image of pinned context with regs_context */
2642 err
= intel_context_lock_pinned(ce
);
2646 err
= gen8_modify_context(ce
, regs_context
,
2647 ARRAY_SIZE(regs_context
));
2648 intel_context_unlock_pinned(ce
);
2652 /* Apply regs_lri using LRI with pinned context */
2653 return gen8_modify_self(ce
, regs_lri
, ARRAY_SIZE(regs_lri
), active
);
2657 * Manages updating the per-context aspects of the OA stream
2658 * configuration across all contexts.
2660 * The awkward consideration here is that OACTXCONTROL controls the
2661 * exponent for periodic sampling which is primarily used for system
2662 * wide profiling where we'd like a consistent sampling period even in
2663 * the face of context switches.
2665 * Our approach of updating the register state context (as opposed to
2666 * say using a workaround batch buffer) ensures that the hardware
2667 * won't automatically reload an out-of-date timer exponent even
2668 * transiently before a WA BB could be parsed.
2670 * This function needs to:
2671 * - Ensure the currently running context's per-context OA state is
2673 * - Ensure that all existing contexts will have the correct per-context
2674 * OA state if they are scheduled for use.
2675 * - Ensure any new contexts will be initialized with the correct
2676 * per-context OA state.
2678 * Note: it's only the RCS/Render context that has any OA state.
2679 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2682 oa_configure_all_contexts(struct i915_perf_stream
*stream
,
2685 struct i915_active
*active
)
2687 struct drm_i915_private
*i915
= stream
->perf
->i915
;
2688 struct intel_engine_cs
*engine
;
2689 struct intel_gt
*gt
= stream
->engine
->gt
;
2690 struct i915_gem_context
*ctx
, *cn
;
2693 lockdep_assert_held(>
->perf
.lock
);
2696 * The OA register config is setup through the context image. This image
2697 * might be written to by the GPU on context switch (in particular on
2698 * lite-restore). This means we can't safely update a context's image,
2699 * if this context is scheduled/submitted to run on the GPU.
2701 * We could emit the OA register config through the batch buffer but
2702 * this might leave small interval of time where the OA unit is
2703 * configured at an invalid sampling period.
2705 * Note that since we emit all requests from a single ring, there
2706 * is still an implicit global barrier here that may cause a high
2707 * priority context to wait for an otherwise independent low priority
2708 * context. Contexts idle at the time of reconfiguration are not
2709 * trapped behind the barrier.
2711 spin_lock(&i915
->gem
.contexts
.lock
);
2712 list_for_each_entry_safe(ctx
, cn
, &i915
->gem
.contexts
.list
, link
) {
2713 if (!kref_get_unless_zero(&ctx
->ref
))
2716 spin_unlock(&i915
->gem
.contexts
.lock
);
2718 err
= gen8_configure_context(stream
, ctx
, regs
, num_regs
);
2720 i915_gem_context_put(ctx
);
2724 spin_lock(&i915
->gem
.contexts
.lock
);
2725 list_safe_reset_next(ctx
, cn
, link
);
2726 i915_gem_context_put(ctx
);
2728 spin_unlock(&i915
->gem
.contexts
.lock
);
2731 * After updating all other contexts, we need to modify ourselves.
2732 * If we don't modify the kernel_context, we do not get events while
2735 for_each_uabi_engine(engine
, i915
) {
2736 struct intel_context
*ce
= engine
->kernel_context
;
2738 if (engine
->class != RENDER_CLASS
)
2741 regs
[0].value
= intel_sseu_make_rpcs(engine
->gt
, &ce
->sseu
);
2743 err
= gen8_modify_self(ce
, regs
, num_regs
, active
);
2752 gen12_configure_all_contexts(struct i915_perf_stream
*stream
,
2753 const struct i915_oa_config
*oa_config
,
2754 struct i915_active
*active
)
2756 struct flex regs
[] = {
2758 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE
),
2759 CTX_R_PWR_CLK_STATE
,
2763 if (stream
->engine
->class != RENDER_CLASS
)
2766 return oa_configure_all_contexts(stream
,
2767 regs
, ARRAY_SIZE(regs
),
2772 lrc_configure_all_contexts(struct i915_perf_stream
*stream
,
2773 const struct i915_oa_config
*oa_config
,
2774 struct i915_active
*active
)
2776 u32 ctx_oactxctrl
= stream
->perf
->ctx_oactxctrl_offset
;
2777 /* The MMIO offsets for Flex EU registers aren't contiguous */
2778 const u32 ctx_flexeu0
= stream
->perf
->ctx_flexeu0_offset
;
2779 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2780 struct flex regs
[] = {
2782 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE
),
2783 CTX_R_PWR_CLK_STATE
,
2789 { EU_PERF_CNTL0
, ctx_flexeuN(0) },
2790 { EU_PERF_CNTL1
, ctx_flexeuN(1) },
2791 { EU_PERF_CNTL2
, ctx_flexeuN(2) },
2792 { EU_PERF_CNTL3
, ctx_flexeuN(3) },
2793 { EU_PERF_CNTL4
, ctx_flexeuN(4) },
2794 { EU_PERF_CNTL5
, ctx_flexeuN(5) },
2795 { EU_PERF_CNTL6
, ctx_flexeuN(6) },
2801 (stream
->period_exponent
<< GEN8_OA_TIMER_PERIOD_SHIFT
) |
2802 (stream
->periodic
? GEN8_OA_TIMER_ENABLE
: 0) |
2803 GEN8_OA_COUNTER_RESUME
;
2805 for (i
= 2; i
< ARRAY_SIZE(regs
); i
++)
2806 regs
[i
].value
= oa_config_flex_reg(oa_config
, regs
[i
].reg
);
2808 return oa_configure_all_contexts(stream
,
2809 regs
, ARRAY_SIZE(regs
),
2814 gen8_enable_metric_set(struct i915_perf_stream
*stream
,
2815 struct i915_active
*active
)
2817 struct intel_uncore
*uncore
= stream
->uncore
;
2818 struct i915_oa_config
*oa_config
= stream
->oa_config
;
2822 * We disable slice/unslice clock ratio change reports on SKL since
2823 * they are too noisy. The HW generates a lot of redundant reports
2824 * where the ratio hasn't really changed causing a lot of redundant
2825 * work to processes and increasing the chances we'll hit buffer
2828 * Although we don't currently use the 'disable overrun' OABUFFER
2829 * feature it's worth noting that clock ratio reports have to be
2830 * disabled before considering to use that feature since the HW doesn't
2831 * correctly block these reports.
2833 * Currently none of the high-level metrics we have depend on knowing
2834 * this ratio to normalize.
2836 * Note: This register is not power context saved and restored, but
2837 * that's OK considering that we disable RC6 while the OA unit is
2840 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2841 * be read back from automatically triggered reports, as part of the
2844 if (IS_GRAPHICS_VER(stream
->perf
->i915
, 9, 11)) {
2845 intel_uncore_write(uncore
, GEN8_OA_DEBUG
,
2846 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS
|
2847 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO
));
2851 * Update all contexts prior writing the mux configurations as we need
2852 * to make sure all slices/subslices are ON before writing to NOA
2855 ret
= lrc_configure_all_contexts(stream
, oa_config
, active
);
2859 return emit_oa_config(stream
,
2860 stream
->oa_config
, oa_context(stream
),
2864 static u32
oag_report_ctx_switches(const struct i915_perf_stream
*stream
)
2866 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS
,
2867 (stream
->sample_flags
& SAMPLE_OA_REPORT
) ?
2868 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS
);
2872 gen12_enable_metric_set(struct i915_perf_stream
*stream
,
2873 struct i915_active
*active
)
2875 struct drm_i915_private
*i915
= stream
->perf
->i915
;
2876 struct intel_uncore
*uncore
= stream
->uncore
;
2877 struct i915_oa_config
*oa_config
= stream
->oa_config
;
2878 bool periodic
= stream
->periodic
;
2879 u32 period_exponent
= stream
->period_exponent
;
2884 * Wa_1508761755:xehpsdv, dg2
2885 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2886 * Disable thread stall DOP gating and EU DOP gating.
2888 if (IS_XEHPSDV(i915
) || IS_DG2(i915
)) {
2889 intel_gt_mcr_multicast_write(uncore
->gt
, GEN8_ROW_CHICKEN
,
2890 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE
));
2891 intel_uncore_write(uncore
, GEN7_ROW_CHICKEN2
,
2892 _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING
));
2895 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_debug
,
2896 /* Disable clk ratio reports, like previous Gens. */
2897 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS
|
2898 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO
) |
2900 * If the user didn't require OA reports, instruct
2901 * the hardware not to emit ctx switch reports.
2903 oag_report_ctx_switches(stream
));
2905 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_ctx_ctrl
, periodic
?
2906 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME
|
2907 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE
|
2908 (period_exponent
<< GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT
))
2912 * Initialize Super Queue Internal Cnt Register
2913 * Set PMON Enable in order to collect valid metrics.
2914 * Enable byets per clock reporting in OA for XEHPSDV onward.
2916 sqcnt1
= GEN12_SQCNT1_PMON_ENABLE
|
2917 (HAS_OA_BPC_REPORTING(i915
) ? GEN12_SQCNT1_OABPC
: 0);
2919 intel_uncore_rmw(uncore
, GEN12_SQCNT1
, 0, sqcnt1
);
2922 * Update all contexts prior writing the mux configurations as we need
2923 * to make sure all slices/subslices are ON before writing to NOA
2926 ret
= gen12_configure_all_contexts(stream
, oa_config
, active
);
2931 * For Gen12, performance counters are context
2932 * saved/restored. Only enable it for the context that
2936 ret
= gen12_configure_oar_context(stream
, active
);
2941 return emit_oa_config(stream
,
2942 stream
->oa_config
, oa_context(stream
),
2946 static void gen8_disable_metric_set(struct i915_perf_stream
*stream
)
2948 struct intel_uncore
*uncore
= stream
->uncore
;
2950 /* Reset all contexts' slices/subslices configurations. */
2951 lrc_configure_all_contexts(stream
, NULL
, NULL
);
2953 intel_uncore_rmw(uncore
, GDT_CHICKEN_BITS
, GT_NOA_ENABLE
, 0);
2956 static void gen11_disable_metric_set(struct i915_perf_stream
*stream
)
2958 struct intel_uncore
*uncore
= stream
->uncore
;
2960 /* Reset all contexts' slices/subslices configurations. */
2961 lrc_configure_all_contexts(stream
, NULL
, NULL
);
2963 /* Make sure we disable noa to save power. */
2964 intel_uncore_rmw(uncore
, RPM_CONFIG1
, GEN10_GT_NOA_ENABLE
, 0);
2967 static void gen12_disable_metric_set(struct i915_perf_stream
*stream
)
2969 struct intel_uncore
*uncore
= stream
->uncore
;
2970 struct drm_i915_private
*i915
= stream
->perf
->i915
;
2974 * Wa_1508761755:xehpsdv, dg2
2975 * Enable thread stall DOP gating and EU DOP gating.
2977 if (IS_XEHPSDV(i915
) || IS_DG2(i915
)) {
2978 intel_gt_mcr_multicast_write(uncore
->gt
, GEN8_ROW_CHICKEN
,
2979 _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE
));
2980 intel_uncore_write(uncore
, GEN7_ROW_CHICKEN2
,
2981 _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING
));
2984 /* Reset all contexts' slices/subslices configurations. */
2985 gen12_configure_all_contexts(stream
, NULL
, NULL
);
2987 /* disable the context save/restore or OAR counters */
2989 gen12_configure_oar_context(stream
, NULL
);
2991 /* Make sure we disable noa to save power. */
2992 intel_uncore_rmw(uncore
, RPM_CONFIG1
, GEN10_GT_NOA_ENABLE
, 0);
2994 sqcnt1
= GEN12_SQCNT1_PMON_ENABLE
|
2995 (HAS_OA_BPC_REPORTING(i915
) ? GEN12_SQCNT1_OABPC
: 0);
2997 /* Reset PMON Enable to save power. */
2998 intel_uncore_rmw(uncore
, GEN12_SQCNT1
, sqcnt1
, 0);
3001 static void gen7_oa_enable(struct i915_perf_stream
*stream
)
3003 struct intel_uncore
*uncore
= stream
->uncore
;
3004 struct i915_gem_context
*ctx
= stream
->ctx
;
3005 u32 ctx_id
= stream
->specific_ctx_id
;
3006 bool periodic
= stream
->periodic
;
3007 u32 period_exponent
= stream
->period_exponent
;
3008 u32 report_format
= stream
->oa_buffer
.format
->format
;
3011 * Reset buf pointers so we don't forward reports from before now.
3013 * Think carefully if considering trying to avoid this, since it
3014 * also ensures status flags and the buffer itself are cleared
3015 * in error paths, and we have checks for invalid reports based
3016 * on the assumption that certain fields are written to zeroed
3017 * memory which this helps maintains.
3019 gen7_init_oa_buffer(stream
);
3021 intel_uncore_write(uncore
, GEN7_OACONTROL
,
3022 (ctx_id
& GEN7_OACONTROL_CTX_MASK
) |
3024 GEN7_OACONTROL_TIMER_PERIOD_SHIFT
) |
3025 (periodic
? GEN7_OACONTROL_TIMER_ENABLE
: 0) |
3026 (report_format
<< GEN7_OACONTROL_FORMAT_SHIFT
) |
3027 (ctx
? GEN7_OACONTROL_PER_CTX_ENABLE
: 0) |
3028 GEN7_OACONTROL_ENABLE
);
3031 static void gen8_oa_enable(struct i915_perf_stream
*stream
)
3033 struct intel_uncore
*uncore
= stream
->uncore
;
3034 u32 report_format
= stream
->oa_buffer
.format
->format
;
3037 * Reset buf pointers so we don't forward reports from before now.
3039 * Think carefully if considering trying to avoid this, since it
3040 * also ensures status flags and the buffer itself are cleared
3041 * in error paths, and we have checks for invalid reports based
3042 * on the assumption that certain fields are written to zeroed
3043 * memory which this helps maintains.
3045 gen8_init_oa_buffer(stream
);
3048 * Note: we don't rely on the hardware to perform single context
3049 * filtering and instead filter on the cpu based on the context-id
3052 intel_uncore_write(uncore
, GEN8_OACONTROL
,
3053 (report_format
<< GEN8_OA_REPORT_FORMAT_SHIFT
) |
3054 GEN8_OA_COUNTER_ENABLE
);
3057 static void gen12_oa_enable(struct i915_perf_stream
*stream
)
3059 const struct i915_perf_regs
*regs
;
3063 * If we don't want OA reports from the OA buffer, then we don't even
3064 * need to program the OAG unit.
3066 if (!(stream
->sample_flags
& SAMPLE_OA_REPORT
))
3069 gen12_init_oa_buffer(stream
);
3071 regs
= __oa_regs(stream
);
3072 val
= (stream
->oa_buffer
.format
->format
<< regs
->oa_ctrl_counter_format_shift
) |
3073 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE
;
3075 intel_uncore_write(stream
->uncore
, regs
->oa_ctrl
, val
);
3079 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
3080 * @stream: An i915 perf stream opened for OA metrics
3082 * [Re]enables hardware periodic sampling according to the period configured
3083 * when opening the stream. This also starts a hrtimer that will periodically
3084 * check for data in the circular OA buffer for notifying userspace (e.g.
3085 * during a read() or poll()).
3087 static void i915_oa_stream_enable(struct i915_perf_stream
*stream
)
3089 stream
->pollin
= false;
3091 stream
->perf
->ops
.oa_enable(stream
);
3093 if (stream
->sample_flags
& SAMPLE_OA_REPORT
)
3094 hrtimer_start(&stream
->poll_check_timer
,
3095 ns_to_ktime(stream
->poll_oa_period
),
3096 HRTIMER_MODE_REL_PINNED
);
3099 static void gen7_oa_disable(struct i915_perf_stream
*stream
)
3101 struct intel_uncore
*uncore
= stream
->uncore
;
3103 intel_uncore_write(uncore
, GEN7_OACONTROL
, 0);
3104 if (intel_wait_for_register(uncore
,
3105 GEN7_OACONTROL
, GEN7_OACONTROL_ENABLE
, 0,
3107 drm_err(&stream
->perf
->i915
->drm
,
3108 "wait for OA to be disabled timed out\n");
3111 static void gen8_oa_disable(struct i915_perf_stream
*stream
)
3113 struct intel_uncore
*uncore
= stream
->uncore
;
3115 intel_uncore_write(uncore
, GEN8_OACONTROL
, 0);
3116 if (intel_wait_for_register(uncore
,
3117 GEN8_OACONTROL
, GEN8_OA_COUNTER_ENABLE
, 0,
3119 drm_err(&stream
->perf
->i915
->drm
,
3120 "wait for OA to be disabled timed out\n");
3123 static void gen12_oa_disable(struct i915_perf_stream
*stream
)
3125 struct intel_uncore
*uncore
= stream
->uncore
;
3127 intel_uncore_write(uncore
, __oa_regs(stream
)->oa_ctrl
, 0);
3128 if (intel_wait_for_register(uncore
,
3129 __oa_regs(stream
)->oa_ctrl
,
3130 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE
, 0,
3132 drm_err(&stream
->perf
->i915
->drm
,
3133 "wait for OA to be disabled timed out\n");
3135 intel_uncore_write(uncore
, GEN12_OA_TLB_INV_CR
, 1);
3136 if (intel_wait_for_register(uncore
,
3137 GEN12_OA_TLB_INV_CR
,
3140 drm_err(&stream
->perf
->i915
->drm
,
3141 "wait for OA tlb invalidate timed out\n");
3145 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3146 * @stream: An i915 perf stream opened for OA metrics
3148 * Stops the OA unit from periodically writing counter reports into the
3149 * circular OA buffer. This also stops the hrtimer that periodically checks for
3150 * data in the circular OA buffer, for notifying userspace.
3152 static void i915_oa_stream_disable(struct i915_perf_stream
*stream
)
3154 stream
->perf
->ops
.oa_disable(stream
);
3156 if (stream
->sample_flags
& SAMPLE_OA_REPORT
)
3157 hrtimer_cancel(&stream
->poll_check_timer
);
3160 static const struct i915_perf_stream_ops i915_oa_stream_ops
= {
3161 .destroy
= i915_oa_stream_destroy
,
3162 .enable
= i915_oa_stream_enable
,
3163 .disable
= i915_oa_stream_disable
,
3164 .wait_unlocked
= i915_oa_wait_unlocked
,
3165 .poll_wait
= i915_oa_poll_wait
,
3166 .read
= i915_oa_read
,
3169 static int i915_perf_stream_enable_sync(struct i915_perf_stream
*stream
)
3171 struct i915_active
*active
;
3174 active
= i915_active_create();
3178 err
= stream
->perf
->ops
.enable_metric_set(stream
, active
);
3180 __i915_active_wait(active
, TASK_UNINTERRUPTIBLE
);
3182 i915_active_put(active
);
3187 get_default_sseu_config(struct intel_sseu
*out_sseu
,
3188 struct intel_engine_cs
*engine
)
3190 const struct sseu_dev_info
*devinfo_sseu
= &engine
->gt
->info
.sseu
;
3192 *out_sseu
= intel_sseu_from_device_info(devinfo_sseu
);
3194 if (GRAPHICS_VER(engine
->i915
) == 11) {
3196 * We only need subslice count so it doesn't matter which ones
3197 * we select - just turn off low bits in the amount of half of
3198 * all available subslices per slice.
3200 out_sseu
->subslice_mask
=
3201 ~(~0 << (hweight8(out_sseu
->subslice_mask
) / 2));
3202 out_sseu
->slice_mask
= 0x1;
3207 get_sseu_config(struct intel_sseu
*out_sseu
,
3208 struct intel_engine_cs
*engine
,
3209 const struct drm_i915_gem_context_param_sseu
*drm_sseu
)
3211 if (drm_sseu
->engine
.engine_class
!= engine
->uabi_class
||
3212 drm_sseu
->engine
.engine_instance
!= engine
->uabi_instance
)
3215 return i915_gem_user_to_context_sseu(engine
->gt
, drm_sseu
, out_sseu
);
3219 * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3220 * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3221 * cases, return the adjusted CS timestamp frequency to the user.
3223 u32
i915_perf_oa_timestamp_frequency(struct drm_i915_private
*i915
)
3225 struct intel_gt
*gt
= to_gt(i915
);
3227 /* Wa_18013179988 */
3228 if (IS_DG2(i915
) || IS_GFX_GT_IP_RANGE(gt
, IP_VER(12, 70), IP_VER(12, 71))) {
3229 intel_wakeref_t wakeref
;
3232 with_intel_runtime_pm(to_gt(i915
)->uncore
->rpm
, wakeref
)
3233 reg
= intel_uncore_read(to_gt(i915
)->uncore
, RPM_CONFIG0
);
3235 shift
= REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK
,
3238 return to_gt(i915
)->clock_frequency
<< (3 - shift
);
3241 return to_gt(i915
)->clock_frequency
;
3245 * i915_oa_stream_init - validate combined props for OA stream and init
3246 * @stream: An i915 perf stream
3247 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3248 * @props: The property state that configures stream (individually validated)
3250 * While read_properties_unlocked() validates properties in isolation it
3251 * doesn't ensure that the combination necessarily makes sense.
3253 * At this point it has been determined that userspace wants a stream of
3254 * OA metrics, but still we need to further validate the combined
3255 * properties are OK.
3257 * If the configuration makes sense then we can allocate memory for
3258 * a circular OA buffer and apply the requested metric set configuration.
3260 * Returns: zero on success or a negative error code.
3262 static int i915_oa_stream_init(struct i915_perf_stream
*stream
,
3263 struct drm_i915_perf_open_param
*param
,
3264 struct perf_open_properties
*props
)
3266 struct drm_i915_private
*i915
= stream
->perf
->i915
;
3267 struct i915_perf
*perf
= stream
->perf
;
3268 struct i915_perf_group
*g
;
3271 if (!props
->engine
) {
3272 drm_dbg(&stream
->perf
->i915
->drm
,
3273 "OA engine not specified\n");
3276 g
= props
->engine
->oa_group
;
3279 * If the sysfs metrics/ directory wasn't registered for some
3280 * reason then don't let userspace try their luck with config
3283 if (!perf
->metrics_kobj
) {
3284 drm_dbg(&stream
->perf
->i915
->drm
,
3285 "OA metrics weren't advertised via sysfs\n");
3289 if (!(props
->sample_flags
& SAMPLE_OA_REPORT
) &&
3290 (GRAPHICS_VER(perf
->i915
) < 12 || !stream
->ctx
)) {
3291 drm_dbg(&stream
->perf
->i915
->drm
,
3292 "Only OA report sampling supported\n");
3296 if (!perf
->ops
.enable_metric_set
) {
3297 drm_dbg(&stream
->perf
->i915
->drm
,
3298 "OA unit not supported\n");
3303 * To avoid the complexity of having to accurately filter
3304 * counter reports and marshal to the appropriate client
3305 * we currently only allow exclusive access
3307 if (g
->exclusive_stream
) {
3308 drm_dbg(&stream
->perf
->i915
->drm
,
3309 "OA unit already in use\n");
3313 if (!props
->oa_format
) {
3314 drm_dbg(&stream
->perf
->i915
->drm
,
3315 "OA report format not specified\n");
3319 stream
->engine
= props
->engine
;
3320 stream
->uncore
= stream
->engine
->gt
->uncore
;
3322 stream
->sample_size
= sizeof(struct drm_i915_perf_record_header
);
3324 stream
->oa_buffer
.format
= &perf
->oa_formats
[props
->oa_format
];
3325 if (drm_WARN_ON(&i915
->drm
, stream
->oa_buffer
.format
->size
== 0))
3328 stream
->sample_flags
= props
->sample_flags
;
3329 stream
->sample_size
+= stream
->oa_buffer
.format
->size
;
3331 stream
->hold_preemption
= props
->hold_preemption
;
3333 stream
->periodic
= props
->oa_periodic
;
3334 if (stream
->periodic
)
3335 stream
->period_exponent
= props
->oa_period_exponent
;
3338 ret
= oa_get_render_ctx_id(stream
);
3340 drm_dbg(&stream
->perf
->i915
->drm
,
3341 "Invalid context id to filter with\n");
3346 ret
= alloc_noa_wait(stream
);
3348 drm_dbg(&stream
->perf
->i915
->drm
,
3349 "Unable to allocate NOA wait batch buffer\n");
3350 goto err_noa_wait_alloc
;
3353 stream
->oa_config
= i915_perf_get_oa_config(perf
, props
->metrics_set
);
3354 if (!stream
->oa_config
) {
3355 drm_dbg(&stream
->perf
->i915
->drm
,
3356 "Invalid OA config id=%i\n", props
->metrics_set
);
3361 /* PRM - observability performance counters:
3363 * OACONTROL, performance counter enable, note:
3365 * "When this bit is set, in order to have coherent counts,
3366 * RC6 power state and trunk clock gating must be disabled.
3367 * This can be achieved by programming MMIO registers as
3368 * 0xA094=0 and 0xA090[31]=1"
3370 * In our case we are expecting that taking pm + FORCEWAKE
3371 * references will effectively disable RC6.
3373 intel_engine_pm_get(stream
->engine
);
3374 intel_uncore_forcewake_get(stream
->uncore
, FORCEWAKE_ALL
);
3376 ret
= alloc_oa_buffer(stream
);
3378 goto err_oa_buf_alloc
;
3380 stream
->ops
= &i915_oa_stream_ops
;
3382 stream
->engine
->gt
->perf
.sseu
= props
->sseu
;
3383 WRITE_ONCE(g
->exclusive_stream
, stream
);
3385 ret
= i915_perf_stream_enable_sync(stream
);
3387 drm_dbg(&stream
->perf
->i915
->drm
,
3388 "Unable to enable metric set\n");
3392 drm_dbg(&stream
->perf
->i915
->drm
,
3393 "opening stream oa config uuid=%s\n",
3394 stream
->oa_config
->uuid
);
3396 hrtimer_init(&stream
->poll_check_timer
,
3397 CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3398 stream
->poll_check_timer
.function
= oa_poll_check_timer_cb
;
3399 init_waitqueue_head(&stream
->poll_wq
);
3400 spin_lock_init(&stream
->oa_buffer
.ptr_lock
);
3401 mutex_init(&stream
->lock
);
3406 WRITE_ONCE(g
->exclusive_stream
, NULL
);
3407 perf
->ops
.disable_metric_set(stream
);
3409 free_oa_buffer(stream
);
3412 intel_uncore_forcewake_put(stream
->uncore
, FORCEWAKE_ALL
);
3413 intel_engine_pm_put(stream
->engine
);
3415 free_oa_configs(stream
);
3418 free_noa_wait(stream
);
3422 oa_put_render_ctx_id(stream
);
3427 void i915_oa_init_reg_state(const struct intel_context
*ce
,
3428 const struct intel_engine_cs
*engine
)
3430 struct i915_perf_stream
*stream
;
3432 if (engine
->class != RENDER_CLASS
)
3435 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3436 stream
= READ_ONCE(engine
->oa_group
->exclusive_stream
);
3437 if (stream
&& GRAPHICS_VER(stream
->perf
->i915
) < 12)
3438 gen8_update_reg_state_unlocked(ce
, stream
);
3442 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3443 * @file: An i915 perf stream file
3444 * @buf: destination buffer given by userspace
3445 * @count: the number of bytes userspace wants to read
3446 * @ppos: (inout) file seek position (unused)
3448 * The entry point for handling a read() on a stream file descriptor from
3449 * userspace. Most of the work is left to the i915_perf_read_locked() and
3450 * &i915_perf_stream_ops->read but to save having stream implementations (of
3451 * which we might have multiple later) we handle blocking read here.
3453 * We can also consistently treat trying to read from a disabled stream
3454 * as an IO error so implementations can assume the stream is enabled
3457 * Returns: The number of bytes copied or a negative error code on failure.
3459 static ssize_t
i915_perf_read(struct file
*file
,
3464 struct i915_perf_stream
*stream
= file
->private_data
;
3468 /* To ensure it's handled consistently we simply treat all reads of a
3469 * disabled stream as an error. In particular it might otherwise lead
3470 * to a deadlock for blocking file descriptors...
3472 if (!stream
->enabled
|| !(stream
->sample_flags
& SAMPLE_OA_REPORT
))
3475 if (!(file
->f_flags
& O_NONBLOCK
)) {
3476 /* There's the small chance of false positives from
3477 * stream->ops->wait_unlocked.
3479 * E.g. with single context filtering since we only wait until
3480 * oabuffer has >= 1 report we don't immediately know whether
3481 * any reports really belong to the current context
3484 ret
= stream
->ops
->wait_unlocked(stream
);
3488 mutex_lock(&stream
->lock
);
3489 ret
= stream
->ops
->read(stream
, buf
, count
, &offset
);
3490 mutex_unlock(&stream
->lock
);
3491 } while (!offset
&& !ret
);
3493 mutex_lock(&stream
->lock
);
3494 ret
= stream
->ops
->read(stream
, buf
, count
, &offset
);
3495 mutex_unlock(&stream
->lock
);
3498 /* We allow the poll checking to sometimes report false positive EPOLLIN
3499 * events where we might actually report EAGAIN on read() if there's
3500 * not really any data available. In this situation though we don't
3501 * want to enter a busy loop between poll() reporting a EPOLLIN event
3502 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3503 * effectively ensures we back off until the next hrtimer callback
3504 * before reporting another EPOLLIN event.
3505 * The exception to this is if ops->read() returned -ENOSPC which means
3506 * that more OA data is available than could fit in the user provided
3507 * buffer. In this case we want the next poll() call to not block.
3510 stream
->pollin
= false;
3512 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3513 return offset
?: (ret
?: -EAGAIN
);
3516 static enum hrtimer_restart
oa_poll_check_timer_cb(struct hrtimer
*hrtimer
)
3518 struct i915_perf_stream
*stream
=
3519 container_of(hrtimer
, typeof(*stream
), poll_check_timer
);
3521 if (oa_buffer_check_unlocked(stream
)) {
3522 stream
->pollin
= true;
3523 wake_up(&stream
->poll_wq
);
3526 hrtimer_forward_now(hrtimer
,
3527 ns_to_ktime(stream
->poll_oa_period
));
3529 return HRTIMER_RESTART
;
3533 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3534 * @stream: An i915 perf stream
3535 * @file: An i915 perf stream file
3536 * @wait: poll() state table
3538 * For handling userspace polling on an i915 perf stream, this calls through to
3539 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3540 * will be woken for new stream data.
3542 * Returns: any poll events that are ready without sleeping
3544 static __poll_t
i915_perf_poll_locked(struct i915_perf_stream
*stream
,
3548 __poll_t events
= 0;
3550 stream
->ops
->poll_wait(stream
, file
, wait
);
3552 /* Note: we don't explicitly check whether there's something to read
3553 * here since this path may be very hot depending on what else
3554 * userspace is polling, or on the timeout in use. We rely solely on
3555 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3565 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3566 * @file: An i915 perf stream file
3567 * @wait: poll() state table
3569 * For handling userspace polling on an i915 perf stream, this ensures
3570 * poll_wait() gets called with a wait queue that will be woken for new stream
3573 * Note: Implementation deferred to i915_perf_poll_locked()
3575 * Returns: any poll events that are ready without sleeping
3577 static __poll_t
i915_perf_poll(struct file
*file
, poll_table
*wait
)
3579 struct i915_perf_stream
*stream
= file
->private_data
;
3582 mutex_lock(&stream
->lock
);
3583 ret
= i915_perf_poll_locked(stream
, file
, wait
);
3584 mutex_unlock(&stream
->lock
);
3590 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3591 * @stream: A disabled i915 perf stream
3593 * [Re]enables the associated capture of data for this stream.
3595 * If a stream was previously enabled then there's currently no intention
3596 * to provide userspace any guarantee about the preservation of previously
3599 static void i915_perf_enable_locked(struct i915_perf_stream
*stream
)
3601 if (stream
->enabled
)
3604 /* Allow stream->ops->enable() to refer to this */
3605 stream
->enabled
= true;
3607 if (stream
->ops
->enable
)
3608 stream
->ops
->enable(stream
);
3610 if (stream
->hold_preemption
)
3611 intel_context_set_nopreempt(stream
->pinned_ctx
);
3615 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3616 * @stream: An enabled i915 perf stream
3618 * Disables the associated capture of data for this stream.
3620 * The intention is that disabling an re-enabling a stream will ideally be
3621 * cheaper than destroying and re-opening a stream with the same configuration,
3622 * though there are no formal guarantees about what state or buffered data
3623 * must be retained between disabling and re-enabling a stream.
3625 * Note: while a stream is disabled it's considered an error for userspace
3626 * to attempt to read from the stream (-EIO).
3628 static void i915_perf_disable_locked(struct i915_perf_stream
*stream
)
3630 if (!stream
->enabled
)
3633 /* Allow stream->ops->disable() to refer to this */
3634 stream
->enabled
= false;
3636 if (stream
->hold_preemption
)
3637 intel_context_clear_nopreempt(stream
->pinned_ctx
);
3639 if (stream
->ops
->disable
)
3640 stream
->ops
->disable(stream
);
3643 static long i915_perf_config_locked(struct i915_perf_stream
*stream
,
3644 unsigned long metrics_set
)
3646 struct i915_oa_config
*config
;
3647 long ret
= stream
->oa_config
->id
;
3649 config
= i915_perf_get_oa_config(stream
->perf
, metrics_set
);
3653 if (config
!= stream
->oa_config
) {
3657 * If OA is bound to a specific context, emit the
3658 * reconfiguration inline from that context. The update
3659 * will then be ordered with respect to submission on that
3662 * When set globally, we use a low priority kernel context,
3663 * so it will effectively take effect when idle.
3665 err
= emit_oa_config(stream
, config
, oa_context(stream
), NULL
);
3667 config
= xchg(&stream
->oa_config
, config
);
3672 i915_oa_config_put(config
);
3678 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3679 * @stream: An i915 perf stream
3680 * @cmd: the ioctl request
3681 * @arg: the ioctl data
3683 * Returns: zero on success or a negative error code. Returns -EINVAL for
3684 * an unknown ioctl request.
3686 static long i915_perf_ioctl_locked(struct i915_perf_stream
*stream
,
3691 case I915_PERF_IOCTL_ENABLE
:
3692 i915_perf_enable_locked(stream
);
3694 case I915_PERF_IOCTL_DISABLE
:
3695 i915_perf_disable_locked(stream
);
3697 case I915_PERF_IOCTL_CONFIG
:
3698 return i915_perf_config_locked(stream
, arg
);
3705 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3706 * @file: An i915 perf stream file
3707 * @cmd: the ioctl request
3708 * @arg: the ioctl data
3710 * Implementation deferred to i915_perf_ioctl_locked().
3712 * Returns: zero on success or a negative error code. Returns -EINVAL for
3713 * an unknown ioctl request.
3715 static long i915_perf_ioctl(struct file
*file
,
3719 struct i915_perf_stream
*stream
= file
->private_data
;
3722 mutex_lock(&stream
->lock
);
3723 ret
= i915_perf_ioctl_locked(stream
, cmd
, arg
);
3724 mutex_unlock(&stream
->lock
);
3730 * i915_perf_destroy_locked - destroy an i915 perf stream
3731 * @stream: An i915 perf stream
3733 * Frees all resources associated with the given i915 perf @stream, disabling
3734 * any associated data capture in the process.
3736 * Note: The >->perf.lock mutex has been taken to serialize
3737 * with any non-file-operation driver hooks.
3739 static void i915_perf_destroy_locked(struct i915_perf_stream
*stream
)
3741 if (stream
->enabled
)
3742 i915_perf_disable_locked(stream
);
3744 if (stream
->ops
->destroy
)
3745 stream
->ops
->destroy(stream
);
3748 i915_gem_context_put(stream
->ctx
);
3754 * i915_perf_release - handles userspace close() of a stream file
3755 * @inode: anonymous inode associated with file
3756 * @file: An i915 perf stream file
3758 * Cleans up any resources associated with an open i915 perf stream file.
3760 * NB: close() can't really fail from the userspace point of view.
3762 * Returns: zero on success or a negative error code.
3764 static int i915_perf_release(struct inode
*inode
, struct file
*file
)
3766 struct i915_perf_stream
*stream
= file
->private_data
;
3767 struct i915_perf
*perf
= stream
->perf
;
3768 struct intel_gt
*gt
= stream
->engine
->gt
;
3771 * Within this call, we know that the fd is being closed and we have no
3772 * other user of stream->lock. Use the perf lock to destroy the stream
3775 mutex_lock(>
->perf
.lock
);
3776 i915_perf_destroy_locked(stream
);
3777 mutex_unlock(>
->perf
.lock
);
3779 /* Release the reference the perf stream kept on the driver. */
3780 drm_dev_put(&perf
->i915
->drm
);
3786 static const struct file_operations fops
= {
3787 .owner
= THIS_MODULE
,
3788 .llseek
= no_llseek
,
3789 .release
= i915_perf_release
,
3790 .poll
= i915_perf_poll
,
3791 .read
= i915_perf_read
,
3792 .unlocked_ioctl
= i915_perf_ioctl
,
3793 /* Our ioctl have no arguments, so it's safe to use the same function
3794 * to handle 32bits compatibility.
3796 .compat_ioctl
= i915_perf_ioctl
,
3801 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3802 * @perf: i915 perf instance
3803 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3804 * @props: individually validated u64 property value pairs
3807 * See i915_perf_ioctl_open() for interface details.
3809 * Implements further stream config validation and stream initialization on
3810 * behalf of i915_perf_open_ioctl() with the >->perf.lock mutex
3811 * taken to serialize with any non-file-operation driver hooks.
3813 * Note: at this point the @props have only been validated in isolation and
3814 * it's still necessary to validate that the combination of properties makes
3817 * In the case where userspace is interested in OA unit metrics then further
3818 * config validation and stream initialization details will be handled by
3819 * i915_oa_stream_init(). The code here should only validate config state that
3820 * will be relevant to all stream types / backends.
3822 * Returns: zero on success or a negative error code.
3825 i915_perf_open_ioctl_locked(struct i915_perf
*perf
,
3826 struct drm_i915_perf_open_param
*param
,
3827 struct perf_open_properties
*props
,
3828 struct drm_file
*file
)
3830 struct i915_gem_context
*specific_ctx
= NULL
;
3831 struct i915_perf_stream
*stream
= NULL
;
3832 unsigned long f_flags
= 0;
3833 bool privileged_op
= true;
3837 if (props
->single_context
) {
3838 u32 ctx_handle
= props
->ctx_handle
;
3839 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
3841 specific_ctx
= i915_gem_context_lookup(file_priv
, ctx_handle
);
3842 if (IS_ERR(specific_ctx
)) {
3843 drm_dbg(&perf
->i915
->drm
,
3844 "Failed to look up context with ID %u for opening perf stream\n",
3846 ret
= PTR_ERR(specific_ctx
);
3852 * On Haswell the OA unit supports clock gating off for a specific
3853 * context and in this mode there's no visibility of metrics for the
3854 * rest of the system, which we consider acceptable for a
3855 * non-privileged client.
3857 * For Gen8->11 the OA unit no longer supports clock gating off for a
3858 * specific context and the kernel can't securely stop the counters
3859 * from updating as system-wide / global values. Even though we can
3860 * filter reports based on the included context ID we can't block
3861 * clients from seeing the raw / global counter values via
3862 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3863 * enable the OA unit by default.
3865 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3866 * per context basis. So we can relax requirements there if the user
3867 * doesn't request global stream access (i.e. query based sampling
3868 * using MI_RECORD_PERF_COUNT.
3870 if (IS_HASWELL(perf
->i915
) && specific_ctx
)
3871 privileged_op
= false;
3872 else if (GRAPHICS_VER(perf
->i915
) == 12 && specific_ctx
&&
3873 (props
->sample_flags
& SAMPLE_OA_REPORT
) == 0)
3874 privileged_op
= false;
3876 if (props
->hold_preemption
) {
3877 if (!props
->single_context
) {
3878 drm_dbg(&perf
->i915
->drm
,
3879 "preemption disable with no context\n");
3883 privileged_op
= true;
3887 * Asking for SSEU configuration is a priviliged operation.
3889 if (props
->has_sseu
)
3890 privileged_op
= true;
3892 get_default_sseu_config(&props
->sseu
, props
->engine
);
3894 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3895 * we check a dev.i915.perf_stream_paranoid sysctl option
3896 * to determine if it's ok to access system wide OA counters
3897 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3899 if (privileged_op
&&
3900 i915_perf_stream_paranoid
&& !perfmon_capable()) {
3901 drm_dbg(&perf
->i915
->drm
,
3902 "Insufficient privileges to open i915 perf stream\n");
3907 stream
= kzalloc(sizeof(*stream
), GFP_KERNEL
);
3913 stream
->perf
= perf
;
3914 stream
->ctx
= specific_ctx
;
3915 stream
->poll_oa_period
= props
->poll_oa_period
;
3917 ret
= i915_oa_stream_init(stream
, param
, props
);
3921 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3922 * to have _stream_init check the combination of sample flags more
3923 * thoroughly, but still this is the expected result at this point.
3925 if (WARN_ON(stream
->sample_flags
!= props
->sample_flags
)) {
3930 if (param
->flags
& I915_PERF_FLAG_FD_CLOEXEC
)
3931 f_flags
|= O_CLOEXEC
;
3932 if (param
->flags
& I915_PERF_FLAG_FD_NONBLOCK
)
3933 f_flags
|= O_NONBLOCK
;
3935 stream_fd
= anon_inode_getfd("[i915_perf]", &fops
, stream
, f_flags
);
3936 if (stream_fd
< 0) {
3941 if (!(param
->flags
& I915_PERF_FLAG_DISABLED
))
3942 i915_perf_enable_locked(stream
);
3944 /* Take a reference on the driver that will be kept with stream_fd
3945 * until its release.
3947 drm_dev_get(&perf
->i915
->drm
);
3952 if (stream
->ops
->destroy
)
3953 stream
->ops
->destroy(stream
);
3958 i915_gem_context_put(specific_ctx
);
3963 static u64
oa_exponent_to_ns(struct i915_perf
*perf
, int exponent
)
3965 u64 nom
= (2ULL << exponent
) * NSEC_PER_SEC
;
3966 u32 den
= i915_perf_oa_timestamp_frequency(perf
->i915
);
3968 return div_u64(nom
+ den
- 1, den
);
3971 static __always_inline
bool
3972 oa_format_valid(struct i915_perf
*perf
, enum drm_i915_oa_format format
)
3974 return test_bit(format
, perf
->format_mask
);
3977 static __always_inline
void
3978 oa_format_add(struct i915_perf
*perf
, enum drm_i915_oa_format format
)
3980 __set_bit(format
, perf
->format_mask
);
3984 * read_properties_unlocked - validate + copy userspace stream open properties
3985 * @perf: i915 perf instance
3986 * @uprops: The array of u64 key value pairs given by userspace
3987 * @n_props: The number of key value pairs expected in @uprops
3988 * @props: The stream configuration built up while validating properties
3990 * Note this function only validates properties in isolation it doesn't
3991 * validate that the combination of properties makes sense or that all
3992 * properties necessary for a particular kind of stream have been set.
3994 * Note that there currently aren't any ordering requirements for properties so
3995 * we shouldn't validate or assume anything about ordering here. This doesn't
3996 * rule out defining new properties with ordering requirements in the future.
3998 static int read_properties_unlocked(struct i915_perf
*perf
,
4001 struct perf_open_properties
*props
)
4003 struct drm_i915_gem_context_param_sseu user_sseu
;
4004 const struct i915_oa_format
*f
;
4005 u64 __user
*uprop
= uprops
;
4006 bool config_instance
= false;
4007 bool config_class
= false;
4008 bool config_sseu
= false;
4013 memset(props
, 0, sizeof(struct perf_open_properties
));
4014 props
->poll_oa_period
= DEFAULT_POLL_PERIOD_NS
;
4016 /* Considering that ID = 0 is reserved and assuming that we don't
4017 * (currently) expect any configurations to ever specify duplicate
4018 * values for a particular property ID then the last _PROP_MAX value is
4019 * one greater than the maximum number of properties we expect to get
4022 if (!n_props
|| n_props
>= DRM_I915_PERF_PROP_MAX
) {
4023 drm_dbg(&perf
->i915
->drm
,
4024 "Invalid number of i915 perf properties given\n");
4028 /* Defaults when class:instance is not passed */
4029 class = I915_ENGINE_CLASS_RENDER
;
4032 for (i
= 0; i
< n_props
; i
++) {
4033 u64 oa_period
, oa_freq_hz
;
4036 ret
= get_user(id
, uprop
);
4040 ret
= get_user(value
, uprop
+ 1);
4044 if (id
== 0 || id
>= DRM_I915_PERF_PROP_MAX
) {
4045 drm_dbg(&perf
->i915
->drm
,
4046 "Unknown i915 perf property ID\n");
4050 switch ((enum drm_i915_perf_property_id
)id
) {
4051 case DRM_I915_PERF_PROP_CTX_HANDLE
:
4052 props
->single_context
= 1;
4053 props
->ctx_handle
= value
;
4055 case DRM_I915_PERF_PROP_SAMPLE_OA
:
4057 props
->sample_flags
|= SAMPLE_OA_REPORT
;
4059 case DRM_I915_PERF_PROP_OA_METRICS_SET
:
4061 drm_dbg(&perf
->i915
->drm
,
4062 "Unknown OA metric set ID\n");
4065 props
->metrics_set
= value
;
4067 case DRM_I915_PERF_PROP_OA_FORMAT
:
4068 if (value
== 0 || value
>= I915_OA_FORMAT_MAX
) {
4069 drm_dbg(&perf
->i915
->drm
,
4070 "Out-of-range OA report format %llu\n",
4074 if (!oa_format_valid(perf
, value
)) {
4075 drm_dbg(&perf
->i915
->drm
,
4076 "Unsupported OA report format %llu\n",
4080 props
->oa_format
= value
;
4082 case DRM_I915_PERF_PROP_OA_EXPONENT
:
4083 if (value
> OA_EXPONENT_MAX
) {
4084 drm_dbg(&perf
->i915
->drm
,
4085 "OA timer exponent too high (> %u)\n",
4090 /* Theoretically we can program the OA unit to sample
4091 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4092 * for BXT. We don't allow such high sampling
4093 * frequencies by default unless root.
4096 BUILD_BUG_ON(sizeof(oa_period
) != 8);
4097 oa_period
= oa_exponent_to_ns(perf
, value
);
4099 /* This check is primarily to ensure that oa_period <=
4100 * UINT32_MAX (before passing to do_div which only
4101 * accepts a u32 denominator), but we can also skip
4102 * checking anything < 1Hz which implicitly can't be
4103 * limited via an integer oa_max_sample_rate.
4105 if (oa_period
<= NSEC_PER_SEC
) {
4106 u64 tmp
= NSEC_PER_SEC
;
4107 do_div(tmp
, oa_period
);
4112 if (oa_freq_hz
> i915_oa_max_sample_rate
&& !perfmon_capable()) {
4113 drm_dbg(&perf
->i915
->drm
,
4114 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4115 i915_oa_max_sample_rate
);
4119 props
->oa_periodic
= true;
4120 props
->oa_period_exponent
= value
;
4122 case DRM_I915_PERF_PROP_HOLD_PREEMPTION
:
4123 props
->hold_preemption
= !!value
;
4125 case DRM_I915_PERF_PROP_GLOBAL_SSEU
: {
4126 if (GRAPHICS_VER_FULL(perf
->i915
) >= IP_VER(12, 50)) {
4127 drm_dbg(&perf
->i915
->drm
,
4128 "SSEU config not supported on gfx %x\n",
4129 GRAPHICS_VER_FULL(perf
->i915
));
4133 if (copy_from_user(&user_sseu
,
4134 u64_to_user_ptr(value
),
4135 sizeof(user_sseu
))) {
4136 drm_dbg(&perf
->i915
->drm
,
4137 "Unable to copy global sseu parameter\n");
4143 case DRM_I915_PERF_PROP_POLL_OA_PERIOD
:
4144 if (value
< 100000 /* 100us */) {
4145 drm_dbg(&perf
->i915
->drm
,
4146 "OA availability timer too small (%lluns < 100us)\n",
4150 props
->poll_oa_period
= value
;
4152 case DRM_I915_PERF_PROP_OA_ENGINE_CLASS
:
4154 config_class
= true;
4156 case DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
:
4157 instance
= (u8
)value
;
4158 config_instance
= true;
4168 if ((config_class
&& !config_instance
) ||
4169 (config_instance
&& !config_class
)) {
4170 drm_dbg(&perf
->i915
->drm
,
4171 "OA engine-class and engine-instance parameters must be passed together\n");
4175 props
->engine
= intel_engine_lookup_user(perf
->i915
, class, instance
);
4176 if (!props
->engine
) {
4177 drm_dbg(&perf
->i915
->drm
,
4178 "OA engine class and instance invalid %d:%d\n",
4183 if (!engine_supports_oa(props
->engine
)) {
4184 drm_dbg(&perf
->i915
->drm
,
4185 "Engine not supported by OA %d:%d\n",
4191 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
4192 * C6 disable in BIOS. Fail if Media C6 is enabled on steppings where OAM
4193 * does not work as expected.
4195 if (IS_MEDIA_GT_IP_STEP(props
->engine
->gt
, IP_VER(13, 0), STEP_A0
, STEP_C0
) &&
4196 props
->engine
->oa_group
->type
== TYPE_OAM
&&
4197 intel_check_bios_c6_setup(&props
->engine
->gt
->rc6
)) {
4198 drm_dbg(&perf
->i915
->drm
,
4199 "OAM requires media C6 to be disabled in BIOS\n");
4203 i
= array_index_nospec(props
->oa_format
, I915_OA_FORMAT_MAX
);
4204 f
= &perf
->oa_formats
[i
];
4205 if (!engine_supports_oa_format(props
->engine
, f
->type
)) {
4206 drm_dbg(&perf
->i915
->drm
,
4207 "Invalid OA format %d for class %d\n",
4208 f
->type
, props
->engine
->class);
4213 ret
= get_sseu_config(&props
->sseu
, props
->engine
, &user_sseu
);
4215 drm_dbg(&perf
->i915
->drm
,
4216 "Invalid SSEU configuration\n");
4219 props
->has_sseu
= true;
4226 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4228 * @data: ioctl data copied from userspace (unvalidated)
4231 * Validates the stream open parameters given by userspace including flags
4232 * and an array of u64 key, value pair properties.
4234 * Very little is assumed up front about the nature of the stream being
4235 * opened (for instance we don't assume it's for periodic OA unit metrics). An
4236 * i915-perf stream is expected to be a suitable interface for other forms of
4237 * buffered data written by the GPU besides periodic OA metrics.
4239 * Note we copy the properties from userspace outside of the i915 perf
4240 * mutex to avoid an awkward lockdep with mmap_lock.
4242 * Most of the implementation details are handled by
4243 * i915_perf_open_ioctl_locked() after taking the >->perf.lock
4244 * mutex for serializing with any non-file-operation driver hooks.
4246 * Return: A newly opened i915 Perf stream file descriptor or negative
4247 * error code on failure.
4249 int i915_perf_open_ioctl(struct drm_device
*dev
, void *data
,
4250 struct drm_file
*file
)
4252 struct i915_perf
*perf
= &to_i915(dev
)->perf
;
4253 struct drm_i915_perf_open_param
*param
= data
;
4254 struct intel_gt
*gt
;
4255 struct perf_open_properties props
;
4256 u32 known_open_flags
;
4262 known_open_flags
= I915_PERF_FLAG_FD_CLOEXEC
|
4263 I915_PERF_FLAG_FD_NONBLOCK
|
4264 I915_PERF_FLAG_DISABLED
;
4265 if (param
->flags
& ~known_open_flags
) {
4266 drm_dbg(&perf
->i915
->drm
,
4267 "Unknown drm_i915_perf_open_param flag\n");
4271 ret
= read_properties_unlocked(perf
,
4272 u64_to_user_ptr(param
->properties_ptr
),
4273 param
->num_properties
,
4278 gt
= props
.engine
->gt
;
4280 mutex_lock(>
->perf
.lock
);
4281 ret
= i915_perf_open_ioctl_locked(perf
, param
, &props
, file
);
4282 mutex_unlock(>
->perf
.lock
);
4288 * i915_perf_register - exposes i915-perf to userspace
4289 * @i915: i915 device instance
4291 * In particular OA metric sets are advertised under a sysfs metrics/
4292 * directory allowing userspace to enumerate valid IDs that can be
4293 * used to open an i915-perf stream.
4295 void i915_perf_register(struct drm_i915_private
*i915
)
4297 struct i915_perf
*perf
= &i915
->perf
;
4298 struct intel_gt
*gt
= to_gt(i915
);
4303 /* To be sure we're synchronized with an attempted
4304 * i915_perf_open_ioctl(); considering that we register after
4305 * being exposed to userspace.
4307 mutex_lock(>
->perf
.lock
);
4309 perf
->metrics_kobj
=
4310 kobject_create_and_add("metrics",
4311 &i915
->drm
.primary
->kdev
->kobj
);
4313 mutex_unlock(>
->perf
.lock
);
4317 * i915_perf_unregister - hide i915-perf from userspace
4318 * @i915: i915 device instance
4320 * i915-perf state cleanup is split up into an 'unregister' and
4321 * 'deinit' phase where the interface is first hidden from
4322 * userspace by i915_perf_unregister() before cleaning up
4323 * remaining state in i915_perf_fini().
4325 void i915_perf_unregister(struct drm_i915_private
*i915
)
4327 struct i915_perf
*perf
= &i915
->perf
;
4329 if (!perf
->metrics_kobj
)
4332 kobject_put(perf
->metrics_kobj
);
4333 perf
->metrics_kobj
= NULL
;
4336 static bool gen8_is_valid_flex_addr(struct i915_perf
*perf
, u32 addr
)
4338 static const i915_reg_t flex_eu_regs
[] = {
4349 for (i
= 0; i
< ARRAY_SIZE(flex_eu_regs
); i
++) {
4350 if (i915_mmio_reg_offset(flex_eu_regs
[i
]) == addr
)
4356 static bool reg_in_range_table(u32 addr
, const struct i915_range
*table
)
4358 while (table
->start
|| table
->end
) {
4359 if (addr
>= table
->start
&& addr
<= table
->end
)
4368 #define REG_EQUAL(addr, mmio) \
4369 ((addr) == i915_mmio_reg_offset(mmio))
4371 static const struct i915_range gen7_oa_b_counters
[] = {
4372 { .start
= 0x2710, .end
= 0x272c }, /* OASTARTTRIG[1-8] */
4373 { .start
= 0x2740, .end
= 0x275c }, /* OAREPORTTRIG[1-8] */
4374 { .start
= 0x2770, .end
= 0x27ac }, /* OACEC[0-7][0-1] */
4378 static const struct i915_range gen12_oa_b_counters
[] = {
4379 { .start
= 0x2b2c, .end
= 0x2b2c }, /* GEN12_OAG_OA_PESS */
4380 { .start
= 0xd900, .end
= 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
4381 { .start
= 0xd920, .end
= 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
4382 { .start
= 0xd940, .end
= 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */
4383 { .start
= 0xdc00, .end
= 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */
4384 { .start
= 0xdc40, .end
= 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */
4385 { .start
= 0xdc44, .end
= 0xdc44 }, /* GEN12_OAA_DBG_REG */
4389 static const struct i915_range mtl_oam_b_counters
[] = {
4390 { .start
= 0x393000, .end
= 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
4391 { .start
= 0x393020, .end
= 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
4392 { .start
= 0x393040, .end
= 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
4393 { .start
= 0x393200, .end
= 0x39323C }, /* MPES[0-7] */
4397 static const struct i915_range xehp_oa_b_counters
[] = {
4398 { .start
= 0xdc48, .end
= 0xdc48 }, /* OAA_ENABLE_REG */
4399 { .start
= 0xdd00, .end
= 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
4403 static const struct i915_range gen7_oa_mux_regs
[] = {
4404 { .start
= 0x91b8, .end
= 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
4405 { .start
= 0x9800, .end
= 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
4406 { .start
= 0xe180, .end
= 0xe180 }, /* HALF_SLICE_CHICKEN2 */
4410 static const struct i915_range hsw_oa_mux_regs
[] = {
4411 { .start
= 0x09e80, .end
= 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4412 { .start
= 0x09ec0, .end
= 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4413 { .start
= 0x25100, .end
= 0x2ff90 },
4417 static const struct i915_range chv_oa_mux_regs
[] = {
4418 { .start
= 0x182300, .end
= 0x1823a4 },
4422 static const struct i915_range gen8_oa_mux_regs
[] = {
4423 { .start
= 0x0d00, .end
= 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4424 { .start
= 0x20cc, .end
= 0x20cc }, /* WAIT_FOR_RC6_EXIT */
4428 static const struct i915_range gen11_oa_mux_regs
[] = {
4429 { .start
= 0x91c8, .end
= 0x91dc }, /* OA_PERFCNT[3-4] */
4433 static const struct i915_range gen12_oa_mux_regs
[] = {
4434 { .start
= 0x0d00, .end
= 0x0d04 }, /* RPM_CONFIG[0-1] */
4435 { .start
= 0x0d0c, .end
= 0x0d2c }, /* NOA_CONFIG[0-8] */
4436 { .start
= 0x9840, .end
= 0x9840 }, /* GDT_CHICKEN_BITS */
4437 { .start
= 0x9884, .end
= 0x9888 }, /* NOA_WRITE */
4438 { .start
= 0x20cc, .end
= 0x20cc }, /* WAIT_FOR_RC6_EXIT */
4444 * 0x20cc is repurposed on MTL, so use a separate array for MTL.
4446 static const struct i915_range mtl_oa_mux_regs
[] = {
4447 { .start
= 0x0d00, .end
= 0x0d04 }, /* RPM_CONFIG[0-1] */
4448 { .start
= 0x0d0c, .end
= 0x0d2c }, /* NOA_CONFIG[0-8] */
4449 { .start
= 0x9840, .end
= 0x9840 }, /* GDT_CHICKEN_BITS */
4450 { .start
= 0x9884, .end
= 0x9888 }, /* NOA_WRITE */
4451 { .start
= 0x38d100, .end
= 0x38d114}, /* VISACTL */
4455 static bool gen7_is_valid_b_counter_addr(struct i915_perf
*perf
, u32 addr
)
4457 return reg_in_range_table(addr
, gen7_oa_b_counters
);
4460 static bool gen8_is_valid_mux_addr(struct i915_perf
*perf
, u32 addr
)
4462 return reg_in_range_table(addr
, gen7_oa_mux_regs
) ||
4463 reg_in_range_table(addr
, gen8_oa_mux_regs
);
4466 static bool gen11_is_valid_mux_addr(struct i915_perf
*perf
, u32 addr
)
4468 return reg_in_range_table(addr
, gen7_oa_mux_regs
) ||
4469 reg_in_range_table(addr
, gen8_oa_mux_regs
) ||
4470 reg_in_range_table(addr
, gen11_oa_mux_regs
);
4473 static bool hsw_is_valid_mux_addr(struct i915_perf
*perf
, u32 addr
)
4475 return reg_in_range_table(addr
, gen7_oa_mux_regs
) ||
4476 reg_in_range_table(addr
, hsw_oa_mux_regs
);
4479 static bool chv_is_valid_mux_addr(struct i915_perf
*perf
, u32 addr
)
4481 return reg_in_range_table(addr
, gen7_oa_mux_regs
) ||
4482 reg_in_range_table(addr
, chv_oa_mux_regs
);
4485 static bool gen12_is_valid_b_counter_addr(struct i915_perf
*perf
, u32 addr
)
4487 return reg_in_range_table(addr
, gen12_oa_b_counters
);
4490 static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf
*perf
, u32 addr
)
4492 if (HAS_OAM(perf
->i915
) &&
4493 GRAPHICS_VER_FULL(perf
->i915
) >= IP_VER(12, 70))
4494 return reg_in_range_table(addr
, mtl_oam_b_counters
);
4499 static bool xehp_is_valid_b_counter_addr(struct i915_perf
*perf
, u32 addr
)
4501 return reg_in_range_table(addr
, xehp_oa_b_counters
) ||
4502 reg_in_range_table(addr
, gen12_oa_b_counters
) ||
4503 mtl_is_valid_oam_b_counter_addr(perf
, addr
);
4506 static bool gen12_is_valid_mux_addr(struct i915_perf
*perf
, u32 addr
)
4508 if (GRAPHICS_VER_FULL(perf
->i915
) >= IP_VER(12, 70))
4509 return reg_in_range_table(addr
, mtl_oa_mux_regs
);
4511 return reg_in_range_table(addr
, gen12_oa_mux_regs
);
4514 static u32
mask_reg_value(u32 reg
, u32 val
)
4516 /* HALF_SLICE_CHICKEN2 is programmed with a the
4517 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4518 * programmed by userspace doesn't change this.
4520 if (REG_EQUAL(reg
, HALF_SLICE_CHICKEN2
))
4521 val
= val
& ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE
);
4523 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4524 * indicated by its name and a bunch of selection fields used by OA
4527 if (REG_EQUAL(reg
, WAIT_FOR_RC6_EXIT
))
4528 val
= val
& ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE
);
4533 static struct i915_oa_reg
*alloc_oa_regs(struct i915_perf
*perf
,
4534 bool (*is_valid
)(struct i915_perf
*perf
, u32 addr
),
4538 struct i915_oa_reg
*oa_regs
;
4545 /* No is_valid function means we're not allowing any register to be programmed. */
4546 GEM_BUG_ON(!is_valid
);
4548 return ERR_PTR(-EINVAL
);
4550 oa_regs
= kmalloc_array(n_regs
, sizeof(*oa_regs
), GFP_KERNEL
);
4552 return ERR_PTR(-ENOMEM
);
4554 for (i
= 0; i
< n_regs
; i
++) {
4557 err
= get_user(addr
, regs
);
4561 if (!is_valid(perf
, addr
)) {
4562 drm_dbg(&perf
->i915
->drm
,
4563 "Invalid oa_reg address: %X\n", addr
);
4568 err
= get_user(value
, regs
+ 1);
4572 oa_regs
[i
].addr
= _MMIO(addr
);
4573 oa_regs
[i
].value
= mask_reg_value(addr
, value
);
4582 return ERR_PTR(err
);
4585 static ssize_t
show_dynamic_id(struct kobject
*kobj
,
4586 struct kobj_attribute
*attr
,
4589 struct i915_oa_config
*oa_config
=
4590 container_of(attr
, typeof(*oa_config
), sysfs_metric_id
);
4592 return sprintf(buf
, "%d\n", oa_config
->id
);
4595 static int create_dynamic_oa_sysfs_entry(struct i915_perf
*perf
,
4596 struct i915_oa_config
*oa_config
)
4598 sysfs_attr_init(&oa_config
->sysfs_metric_id
.attr
);
4599 oa_config
->sysfs_metric_id
.attr
.name
= "id";
4600 oa_config
->sysfs_metric_id
.attr
.mode
= S_IRUGO
;
4601 oa_config
->sysfs_metric_id
.show
= show_dynamic_id
;
4602 oa_config
->sysfs_metric_id
.store
= NULL
;
4604 oa_config
->attrs
[0] = &oa_config
->sysfs_metric_id
.attr
;
4605 oa_config
->attrs
[1] = NULL
;
4607 oa_config
->sysfs_metric
.name
= oa_config
->uuid
;
4608 oa_config
->sysfs_metric
.attrs
= oa_config
->attrs
;
4610 return sysfs_create_group(perf
->metrics_kobj
,
4611 &oa_config
->sysfs_metric
);
4615 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4617 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4618 * userspace (unvalidated)
4621 * Validates the submitted OA register to be saved into a new OA config that
4622 * can then be used for programming the OA unit and its NOA network.
4624 * Returns: A new allocated config number to be used with the perf open ioctl
4625 * or a negative error code on failure.
4627 int i915_perf_add_config_ioctl(struct drm_device
*dev
, void *data
,
4628 struct drm_file
*file
)
4630 struct i915_perf
*perf
= &to_i915(dev
)->perf
;
4631 struct drm_i915_perf_oa_config
*args
= data
;
4632 struct i915_oa_config
*oa_config
, *tmp
;
4633 struct i915_oa_reg
*regs
;
4639 if (!perf
->metrics_kobj
) {
4640 drm_dbg(&perf
->i915
->drm
,
4641 "OA metrics weren't advertised via sysfs\n");
4645 if (i915_perf_stream_paranoid
&& !perfmon_capable()) {
4646 drm_dbg(&perf
->i915
->drm
,
4647 "Insufficient privileges to add i915 OA config\n");
4651 if ((!args
->mux_regs_ptr
|| !args
->n_mux_regs
) &&
4652 (!args
->boolean_regs_ptr
|| !args
->n_boolean_regs
) &&
4653 (!args
->flex_regs_ptr
|| !args
->n_flex_regs
)) {
4654 drm_dbg(&perf
->i915
->drm
,
4655 "No OA registers given\n");
4659 oa_config
= kzalloc(sizeof(*oa_config
), GFP_KERNEL
);
4661 drm_dbg(&perf
->i915
->drm
,
4662 "Failed to allocate memory for the OA config\n");
4666 oa_config
->perf
= perf
;
4667 kref_init(&oa_config
->ref
);
4669 if (!uuid_is_valid(args
->uuid
)) {
4670 drm_dbg(&perf
->i915
->drm
,
4671 "Invalid uuid format for OA config\n");
4676 /* Last character in oa_config->uuid will be 0 because oa_config is
4679 memcpy(oa_config
->uuid
, args
->uuid
, sizeof(args
->uuid
));
4681 oa_config
->mux_regs_len
= args
->n_mux_regs
;
4682 regs
= alloc_oa_regs(perf
,
4683 perf
->ops
.is_valid_mux_reg
,
4684 u64_to_user_ptr(args
->mux_regs_ptr
),
4688 drm_dbg(&perf
->i915
->drm
,
4689 "Failed to create OA config for mux_regs\n");
4690 err
= PTR_ERR(regs
);
4693 oa_config
->mux_regs
= regs
;
4695 oa_config
->b_counter_regs_len
= args
->n_boolean_regs
;
4696 regs
= alloc_oa_regs(perf
,
4697 perf
->ops
.is_valid_b_counter_reg
,
4698 u64_to_user_ptr(args
->boolean_regs_ptr
),
4699 args
->n_boolean_regs
);
4702 drm_dbg(&perf
->i915
->drm
,
4703 "Failed to create OA config for b_counter_regs\n");
4704 err
= PTR_ERR(regs
);
4707 oa_config
->b_counter_regs
= regs
;
4709 if (GRAPHICS_VER(perf
->i915
) < 8) {
4710 if (args
->n_flex_regs
!= 0) {
4715 oa_config
->flex_regs_len
= args
->n_flex_regs
;
4716 regs
= alloc_oa_regs(perf
,
4717 perf
->ops
.is_valid_flex_reg
,
4718 u64_to_user_ptr(args
->flex_regs_ptr
),
4722 drm_dbg(&perf
->i915
->drm
,
4723 "Failed to create OA config for flex_regs\n");
4724 err
= PTR_ERR(regs
);
4727 oa_config
->flex_regs
= regs
;
4730 err
= mutex_lock_interruptible(&perf
->metrics_lock
);
4734 /* We shouldn't have too many configs, so this iteration shouldn't be
4737 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
4738 if (!strcmp(tmp
->uuid
, oa_config
->uuid
)) {
4739 drm_dbg(&perf
->i915
->drm
,
4740 "OA config already exists with this uuid\n");
4746 err
= create_dynamic_oa_sysfs_entry(perf
, oa_config
);
4748 drm_dbg(&perf
->i915
->drm
,
4749 "Failed to create sysfs entry for OA config\n");
4753 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4754 oa_config
->id
= idr_alloc(&perf
->metrics_idr
,
4757 if (oa_config
->id
< 0) {
4758 drm_dbg(&perf
->i915
->drm
,
4759 "Failed to create sysfs entry for OA config\n");
4760 err
= oa_config
->id
;
4765 drm_dbg(&perf
->i915
->drm
,
4766 "Added config %s id=%i\n", oa_config
->uuid
, oa_config
->id
);
4767 mutex_unlock(&perf
->metrics_lock
);
4772 mutex_unlock(&perf
->metrics_lock
);
4774 i915_oa_config_put(oa_config
);
4775 drm_dbg(&perf
->i915
->drm
,
4776 "Failed to add new OA config\n");
4781 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4783 * @data: ioctl data (pointer to u64 integer) copied from userspace
4786 * Configs can be removed while being used, the will stop appearing in sysfs
4787 * and their content will be freed when the stream using the config is closed.
4789 * Returns: 0 on success or a negative error code on failure.
4791 int i915_perf_remove_config_ioctl(struct drm_device
*dev
, void *data
,
4792 struct drm_file
*file
)
4794 struct i915_perf
*perf
= &to_i915(dev
)->perf
;
4796 struct i915_oa_config
*oa_config
;
4802 if (i915_perf_stream_paranoid
&& !perfmon_capable()) {
4803 drm_dbg(&perf
->i915
->drm
,
4804 "Insufficient privileges to remove i915 OA config\n");
4808 ret
= mutex_lock_interruptible(&perf
->metrics_lock
);
4812 oa_config
= idr_find(&perf
->metrics_idr
, *arg
);
4814 drm_dbg(&perf
->i915
->drm
,
4815 "Failed to remove unknown OA config\n");
4820 GEM_BUG_ON(*arg
!= oa_config
->id
);
4822 sysfs_remove_group(perf
->metrics_kobj
, &oa_config
->sysfs_metric
);
4824 idr_remove(&perf
->metrics_idr
, *arg
);
4826 mutex_unlock(&perf
->metrics_lock
);
4828 drm_dbg(&perf
->i915
->drm
,
4829 "Removed config %s id=%i\n", oa_config
->uuid
, oa_config
->id
);
4831 i915_oa_config_put(oa_config
);
4836 mutex_unlock(&perf
->metrics_lock
);
4840 static struct ctl_table oa_table
[] = {
4842 .procname
= "perf_stream_paranoid",
4843 .data
= &i915_perf_stream_paranoid
,
4844 .maxlen
= sizeof(i915_perf_stream_paranoid
),
4846 .proc_handler
= proc_dointvec_minmax
,
4847 .extra1
= SYSCTL_ZERO
,
4848 .extra2
= SYSCTL_ONE
,
4851 .procname
= "oa_max_sample_rate",
4852 .data
= &i915_oa_max_sample_rate
,
4853 .maxlen
= sizeof(i915_oa_max_sample_rate
),
4855 .proc_handler
= proc_dointvec_minmax
,
4856 .extra1
= SYSCTL_ZERO
,
4857 .extra2
= &oa_sample_rate_hard_limit
,
4861 static u32
num_perf_groups_per_gt(struct intel_gt
*gt
)
4866 static u32
__oam_engine_group(struct intel_engine_cs
*engine
)
4868 if (GRAPHICS_VER_FULL(engine
->i915
) >= IP_VER(12, 70)) {
4870 * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
4871 * within the gt use the same OAM. All MTL SKUs list 1 SA MEDIA.
4873 drm_WARN_ON(&engine
->i915
->drm
,
4874 engine
->gt
->type
!= GT_MEDIA
);
4876 return PERF_GROUP_OAM_SAMEDIA_0
;
4879 return PERF_GROUP_INVALID
;
4882 static u32
__oa_engine_group(struct intel_engine_cs
*engine
)
4884 switch (engine
->class) {
4886 return PERF_GROUP_OAG
;
4888 case VIDEO_DECODE_CLASS
:
4889 case VIDEO_ENHANCEMENT_CLASS
:
4890 return __oam_engine_group(engine
);
4893 return PERF_GROUP_INVALID
;
4897 static struct i915_perf_regs
__oam_regs(u32 base
)
4899 return (struct i915_perf_regs
) {
4901 GEN12_OAM_HEAD_POINTER(base
),
4902 GEN12_OAM_TAIL_POINTER(base
),
4903 GEN12_OAM_BUFFER(base
),
4904 GEN12_OAM_CONTEXT_CONTROL(base
),
4905 GEN12_OAM_CONTROL(base
),
4906 GEN12_OAM_DEBUG(base
),
4907 GEN12_OAM_STATUS(base
),
4908 GEN12_OAM_CONTROL_COUNTER_FORMAT_SHIFT
,
4912 static struct i915_perf_regs
__oag_regs(void)
4914 return (struct i915_perf_regs
) {
4916 GEN12_OAG_OAHEADPTR
,
4917 GEN12_OAG_OATAILPTR
,
4919 GEN12_OAG_OAGLBCTXCTRL
,
4920 GEN12_OAG_OACONTROL
,
4923 GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT
,
4927 static void oa_init_groups(struct intel_gt
*gt
)
4929 int i
, num_groups
= gt
->perf
.num_perf_groups
;
4931 for (i
= 0; i
< num_groups
; i
++) {
4932 struct i915_perf_group
*g
= >
->perf
.group
[i
];
4934 /* Fused off engines can result in a group with num_engines == 0 */
4935 if (g
->num_engines
== 0)
4938 if (i
== PERF_GROUP_OAG
&& gt
->type
!= GT_MEDIA
) {
4939 g
->regs
= __oag_regs();
4941 } else if (GRAPHICS_VER_FULL(gt
->i915
) >= IP_VER(12, 70)) {
4942 g
->regs
= __oam_regs(mtl_oa_base
[i
]);
4948 static int oa_init_gt(struct intel_gt
*gt
)
4950 u32 num_groups
= num_perf_groups_per_gt(gt
);
4951 struct intel_engine_cs
*engine
;
4952 struct i915_perf_group
*g
;
4953 intel_engine_mask_t tmp
;
4955 g
= kcalloc(num_groups
, sizeof(*g
), GFP_KERNEL
);
4959 for_each_engine_masked(engine
, gt
, ALL_ENGINES
, tmp
) {
4960 u32 index
= __oa_engine_group(engine
);
4962 engine
->oa_group
= NULL
;
4963 if (index
< num_groups
) {
4964 g
[index
].num_engines
++;
4965 engine
->oa_group
= &g
[index
];
4969 gt
->perf
.num_perf_groups
= num_groups
;
4977 static int oa_init_engine_groups(struct i915_perf
*perf
)
4979 struct intel_gt
*gt
;
4982 for_each_gt(gt
, perf
->i915
, i
) {
4983 ret
= oa_init_gt(gt
);
4991 static void oa_init_supported_formats(struct i915_perf
*perf
)
4993 struct drm_i915_private
*i915
= perf
->i915
;
4994 enum intel_platform platform
= INTEL_INFO(i915
)->platform
;
4998 oa_format_add(perf
, I915_OA_FORMAT_A13
);
4999 oa_format_add(perf
, I915_OA_FORMAT_A13
);
5000 oa_format_add(perf
, I915_OA_FORMAT_A29
);
5001 oa_format_add(perf
, I915_OA_FORMAT_A13_B8_C8
);
5002 oa_format_add(perf
, I915_OA_FORMAT_B4_C8
);
5003 oa_format_add(perf
, I915_OA_FORMAT_A45_B8_C8
);
5004 oa_format_add(perf
, I915_OA_FORMAT_B4_C8_A16
);
5005 oa_format_add(perf
, I915_OA_FORMAT_C4_B8
);
5008 case INTEL_BROADWELL
:
5009 case INTEL_CHERRYVIEW
:
5012 case INTEL_KABYLAKE
:
5013 case INTEL_GEMINILAKE
:
5014 case INTEL_COFFEELAKE
:
5015 case INTEL_COMETLAKE
:
5017 case INTEL_ELKHARTLAKE
:
5018 case INTEL_JASPERLAKE
:
5019 case INTEL_TIGERLAKE
:
5020 case INTEL_ROCKETLAKE
:
5022 case INTEL_ALDERLAKE_S
:
5023 case INTEL_ALDERLAKE_P
:
5024 oa_format_add(perf
, I915_OA_FORMAT_A12
);
5025 oa_format_add(perf
, I915_OA_FORMAT_A12_B8_C8
);
5026 oa_format_add(perf
, I915_OA_FORMAT_A32u40_A4u32_B8_C8
);
5027 oa_format_add(perf
, I915_OA_FORMAT_C4_B8
);
5031 oa_format_add(perf
, I915_OAR_FORMAT_A32u40_A4u32_B8_C8
);
5032 oa_format_add(perf
, I915_OA_FORMAT_A24u40_A14u32_B8_C8
);
5035 case INTEL_METEORLAKE
:
5036 oa_format_add(perf
, I915_OAR_FORMAT_A32u40_A4u32_B8_C8
);
5037 oa_format_add(perf
, I915_OA_FORMAT_A24u40_A14u32_B8_C8
);
5038 oa_format_add(perf
, I915_OAM_FORMAT_MPEC8u64_B8_C8
);
5039 oa_format_add(perf
, I915_OAM_FORMAT_MPEC8u32_B8_C8
);
5043 MISSING_CASE(platform
);
5047 static void i915_perf_init_info(struct drm_i915_private
*i915
)
5049 struct i915_perf
*perf
= &i915
->perf
;
5051 switch (GRAPHICS_VER(i915
)) {
5053 perf
->ctx_oactxctrl_offset
= 0x120;
5054 perf
->ctx_flexeu0_offset
= 0x2ce;
5055 perf
->gen8_valid_ctx_bit
= BIT(25);
5058 perf
->ctx_oactxctrl_offset
= 0x128;
5059 perf
->ctx_flexeu0_offset
= 0x3de;
5060 perf
->gen8_valid_ctx_bit
= BIT(16);
5063 perf
->ctx_oactxctrl_offset
= 0x124;
5064 perf
->ctx_flexeu0_offset
= 0x78e;
5065 perf
->gen8_valid_ctx_bit
= BIT(16);
5068 perf
->gen8_valid_ctx_bit
= BIT(16);
5070 * Calculate offset at runtime in oa_pin_context for gen12 and
5071 * cache the value in perf->ctx_oactxctrl_offset.
5075 MISSING_CASE(GRAPHICS_VER(i915
));
5080 * i915_perf_init - initialize i915-perf state on module bind
5081 * @i915: i915 device instance
5083 * Initializes i915-perf state without exposing anything to userspace.
5085 * Note: i915-perf initialization is split into an 'init' and 'register'
5086 * phase with the i915_perf_register() exposing state to userspace.
5088 int i915_perf_init(struct drm_i915_private
*i915
)
5090 struct i915_perf
*perf
= &i915
->perf
;
5092 perf
->oa_formats
= oa_formats
;
5093 if (IS_HASWELL(i915
)) {
5094 perf
->ops
.is_valid_b_counter_reg
= gen7_is_valid_b_counter_addr
;
5095 perf
->ops
.is_valid_mux_reg
= hsw_is_valid_mux_addr
;
5096 perf
->ops
.is_valid_flex_reg
= NULL
;
5097 perf
->ops
.enable_metric_set
= hsw_enable_metric_set
;
5098 perf
->ops
.disable_metric_set
= hsw_disable_metric_set
;
5099 perf
->ops
.oa_enable
= gen7_oa_enable
;
5100 perf
->ops
.oa_disable
= gen7_oa_disable
;
5101 perf
->ops
.read
= gen7_oa_read
;
5102 perf
->ops
.oa_hw_tail_read
= gen7_oa_hw_tail_read
;
5103 } else if (HAS_LOGICAL_RING_CONTEXTS(i915
)) {
5104 /* Note: that although we could theoretically also support the
5105 * legacy ringbuffer mode on BDW (and earlier iterations of
5106 * this driver, before upstreaming did this) it didn't seem
5107 * worth the complexity to maintain now that BDW+ enable
5108 * execlist mode by default.
5110 perf
->ops
.read
= gen8_oa_read
;
5111 i915_perf_init_info(i915
);
5113 if (IS_GRAPHICS_VER(i915
, 8, 9)) {
5114 perf
->ops
.is_valid_b_counter_reg
=
5115 gen7_is_valid_b_counter_addr
;
5116 perf
->ops
.is_valid_mux_reg
=
5117 gen8_is_valid_mux_addr
;
5118 perf
->ops
.is_valid_flex_reg
=
5119 gen8_is_valid_flex_addr
;
5121 if (IS_CHERRYVIEW(i915
)) {
5122 perf
->ops
.is_valid_mux_reg
=
5123 chv_is_valid_mux_addr
;
5126 perf
->ops
.oa_enable
= gen8_oa_enable
;
5127 perf
->ops
.oa_disable
= gen8_oa_disable
;
5128 perf
->ops
.enable_metric_set
= gen8_enable_metric_set
;
5129 perf
->ops
.disable_metric_set
= gen8_disable_metric_set
;
5130 perf
->ops
.oa_hw_tail_read
= gen8_oa_hw_tail_read
;
5131 } else if (GRAPHICS_VER(i915
) == 11) {
5132 perf
->ops
.is_valid_b_counter_reg
=
5133 gen7_is_valid_b_counter_addr
;
5134 perf
->ops
.is_valid_mux_reg
=
5135 gen11_is_valid_mux_addr
;
5136 perf
->ops
.is_valid_flex_reg
=
5137 gen8_is_valid_flex_addr
;
5139 perf
->ops
.oa_enable
= gen8_oa_enable
;
5140 perf
->ops
.oa_disable
= gen8_oa_disable
;
5141 perf
->ops
.enable_metric_set
= gen8_enable_metric_set
;
5142 perf
->ops
.disable_metric_set
= gen11_disable_metric_set
;
5143 perf
->ops
.oa_hw_tail_read
= gen8_oa_hw_tail_read
;
5144 } else if (GRAPHICS_VER(i915
) == 12) {
5145 perf
->ops
.is_valid_b_counter_reg
=
5146 HAS_OA_SLICE_CONTRIB_LIMITS(i915
) ?
5147 xehp_is_valid_b_counter_addr
:
5148 gen12_is_valid_b_counter_addr
;
5149 perf
->ops
.is_valid_mux_reg
=
5150 gen12_is_valid_mux_addr
;
5151 perf
->ops
.is_valid_flex_reg
=
5152 gen8_is_valid_flex_addr
;
5154 perf
->ops
.oa_enable
= gen12_oa_enable
;
5155 perf
->ops
.oa_disable
= gen12_oa_disable
;
5156 perf
->ops
.enable_metric_set
= gen12_enable_metric_set
;
5157 perf
->ops
.disable_metric_set
= gen12_disable_metric_set
;
5158 perf
->ops
.oa_hw_tail_read
= gen12_oa_hw_tail_read
;
5162 if (perf
->ops
.enable_metric_set
) {
5163 struct intel_gt
*gt
;
5166 for_each_gt(gt
, i915
, i
)
5167 mutex_init(>
->perf
.lock
);
5169 /* Choose a representative limit */
5170 oa_sample_rate_hard_limit
= to_gt(i915
)->clock_frequency
/ 2;
5172 mutex_init(&perf
->metrics_lock
);
5173 idr_init_base(&perf
->metrics_idr
, 1);
5175 /* We set up some ratelimit state to potentially throttle any
5176 * _NOTES about spurious, invalid OA reports which we don't
5177 * forward to userspace.
5179 * We print a _NOTE about any throttling when closing the
5180 * stream instead of waiting until driver _fini which no one
5183 * Using the same limiting factors as printk_ratelimit()
5185 ratelimit_state_init(&perf
->spurious_report_rs
, 5 * HZ
, 10);
5186 /* Since we use a DRM_NOTE for spurious reports it would be
5187 * inconsistent to let __ratelimit() automatically print a
5188 * warning for throttling.
5190 ratelimit_set_flags(&perf
->spurious_report_rs
,
5191 RATELIMIT_MSG_ON_RELEASE
);
5193 ratelimit_state_init(&perf
->tail_pointer_race
,
5195 ratelimit_set_flags(&perf
->tail_pointer_race
,
5196 RATELIMIT_MSG_ON_RELEASE
);
5198 atomic64_set(&perf
->noa_programming_delay
,
5199 500 * 1000 /* 500us */);
5203 ret
= oa_init_engine_groups(perf
);
5206 "OA initialization failed %d\n", ret
);
5210 oa_init_supported_formats(perf
);
5216 static int destroy_config(int id
, void *p
, void *data
)
5218 i915_oa_config_put(p
);
5222 int i915_perf_sysctl_register(void)
5224 sysctl_header
= register_sysctl("dev/i915", oa_table
);
5228 void i915_perf_sysctl_unregister(void)
5230 unregister_sysctl_table(sysctl_header
);
5234 * i915_perf_fini - Counter part to i915_perf_init()
5235 * @i915: i915 device instance
5237 void i915_perf_fini(struct drm_i915_private
*i915
)
5239 struct i915_perf
*perf
= &i915
->perf
;
5240 struct intel_gt
*gt
;
5246 for_each_gt(gt
, perf
->i915
, i
)
5247 kfree(gt
->perf
.group
);
5249 idr_for_each(&perf
->metrics_idr
, destroy_config
, perf
);
5250 idr_destroy(&perf
->metrics_idr
);
5252 memset(&perf
->ops
, 0, sizeof(perf
->ops
));
5257 * i915_perf_ioctl_version - Version of the i915-perf subsystem
5258 * @i915: The i915 device
5260 * This version number is used by userspace to detect available features.
5262 int i915_perf_ioctl_version(struct drm_i915_private
*i915
)
5265 * 1: Initial version
5266 * I915_PERF_IOCTL_ENABLE
5267 * I915_PERF_IOCTL_DISABLE
5269 * 2: Added runtime modification of OA config.
5270 * I915_PERF_IOCTL_CONFIG
5272 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
5273 * preemption on a particular context so that performance data is
5274 * accessible from a delta of MI_RPC reports without looking at the
5277 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
5278 * be run for the duration of the performance recording based on
5279 * their SSEU configuration.
5281 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
5282 * interval for the hrtimer used to check for OA data.
5284 * 6: Add DRM_I915_PERF_PROP_OA_ENGINE_CLASS and
5285 * DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
5287 * 7: Add support for video decode and enhancement classes.
5291 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
5292 * C6 disable in BIOS. If Media C6 is enabled in BIOS, return version 6
5293 * to indicate that OA media is not supported.
5295 if (IS_MEDIA_GT_IP_STEP(i915
->media_gt
, IP_VER(13, 0), STEP_A0
, STEP_C0
) &&
5296 intel_check_bios_c6_setup(&i915
->media_gt
->rc6
))
5302 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5303 #include "selftests/i915_perf.c"