]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/gt/intel_engine.h
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / gpu / drm / i915 / gt / intel_engine.h
CommitLineData
77c8fdae 1/* SPDX-License-Identifier: MIT */
8187a2b7
ZN
2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_
4
d78aa650
DV
5#include <drm/drm_util.h>
6
44e895a8 7#include <linux/hashtable.h>
52c0fdb2 8#include <linux/irq_work.h>
89531e7d 9#include <linux/random.h>
741258cd 10#include <linux/seqlock.h>
e61e0f51 11
b46a33e2 12#include "i915_pmu.h"
8a68d464 13#include "i915_reg.h"
e61e0f51 14#include "i915_request.h"
f97fbf96 15#include "i915_selftest.h"
f0c02c1b 16#include "gt/intel_timeline.h"
39e2f501 17#include "intel_engine_types.h"
c080363f 18#include "intel_gpu_commands.h"
4a15c75c 19#include "intel_workarounds.h"
44e895a8 20
f636edb2 21struct drm_printer;
adcb5264
TU
22struct intel_gt;
23
4712274c
OM
24/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26 * to give some inclination as to some of the magic values used in the various
27 * workarounds!
28 */
29#define CACHELINE_BYTES 64
739f3abd 30#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
4712274c 31
639f2f24
VSD
32#define ENGINE_TRACE(e, fmt, ...) do { \
33 const struct intel_engine_cs *e__ __maybe_unused = (e); \
34 GEM_TRACE("%s %s: " fmt, \
35 dev_name(e__->i915->drm.dev), e__->name, \
36 ##__VA_ARGS__); \
37} while (0)
38
baba6e57
DCS
39/*
40 * The register defines to be used with the following macros need to accept a
41 * base param, e.g:
42 *
43 * REG_FOO(base) _MMIO((base) + <relative offset>)
44 * ENGINE_READ(engine, REG_FOO);
45 *
46 * register arrays are to be defined and accessed as follows:
47 *
48 * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
49 * ENGINE_READ_IDX(engine, REG_BAR, i)
50 */
51
52#define __ENGINE_REG_OP(op__, engine__, ...) \
53 intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
54
55#define __ENGINE_READ_OP(op__, engine__, reg__) \
56 __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
cae5852d 57
baba6e57
DCS
58#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
59#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
60#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
21de5a9e 61#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
e44d62d1 62#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
cae5852d 63
baba6e57
DCS
64#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
65 __ENGINE_REG_OP(read64_2x32, (engine__), \
66 lower_reg__((engine__)->mmio_base), \
67 upper_reg__((engine__)->mmio_base))
cae5852d 68
baba6e57
DCS
69#define ENGINE_READ_IDX(engine__, reg__, idx__) \
70 __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
cae5852d 71
baba6e57
DCS
72#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
73 __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
870e86dd 74
baba6e57
DCS
75#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
76#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
77#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
e9fea574 78
77a302e0
TU
79#define GEN6_RING_FAULT_REG_READ(engine__) \
80 intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
81
82#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
83 intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
84
85#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
86({ \
87 u32 __val; \
88\
89 __val = intel_uncore_read((engine__)->uncore, \
90 RING_FAULT_REG(engine__)); \
91 __val &= ~(clear__); \
92 __val |= (set__); \
93 intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
94 __val); \
95})
96
3e78998a
BW
97/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
98 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
99 */
b6b0fac0 100
22b7a426
CW
101static inline unsigned int
102execlists_num_ports(const struct intel_engine_execlists * const execlists)
0051163a 103{
22b7a426 104 return execlists->port_mask + 1;
0051163a
CW
105}
106
22b7a426
CW
107static inline struct i915_request *
108execlists_active(const struct intel_engine_execlists *execlists)
4a118ecb 109{
f494960d
CW
110 struct i915_request * const *cur, * const *old, *active;
111
112 cur = READ_ONCE(execlists->active);
113 smp_rmb(); /* pairs with overwrite protection in process_csb() */
114 do {
115 old = cur;
116
117 active = READ_ONCE(*cur);
118 cur = READ_ONCE(execlists->active);
119
120 smp_rmb(); /* and complete the seqlock retry */
121 } while (unlikely(cur != old));
122
123 return active;
4a118ecb
CW
124}
125
c36eebd9
CW
126static inline void
127execlists_active_lock_bh(struct intel_engine_execlists *execlists)
128{
129 local_bh_disable(); /* prevent local softirq and lock recursion */
130 tasklet_lock(&execlists->tasklet);
131}
132
133static inline void
134execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
135{
136 tasklet_unlock(&execlists->tasklet);
137 local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
138}
139
292ad25c 140struct i915_request *
c41937fd
MW
141execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
142
8187a2b7 143static inline u32
3ceda3a4 144intel_read_status_page(const struct intel_engine_cs *engine, int reg)
8187a2b7 145{
4225d0f2 146 /* Ensure that the compiler doesn't optimize away the load. */
0ca88ba0 147 return READ_ONCE(engine->status_page.addr[reg]);
8187a2b7
ZN
148}
149
b70ec5bf 150static inline void
9a29dd85 151intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
b70ec5bf 152{
9a29dd85
CW
153 /* Writing into the status page should be done sparingly. Since
154 * we do when we are uncertain of the device state, we take a bit
155 * of extra paranoia to try and ensure that the HWS takes the value
156 * we give and that it doesn't end up trapped inside the CPU!
157 */
158 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159 mb();
0ca88ba0
CW
160 clflush(&engine->status_page.addr[reg]);
161 engine->status_page.addr[reg] = value;
162 clflush(&engine->status_page.addr[reg]);
9a29dd85
CW
163 mb();
164 } else {
0ca88ba0 165 WRITE_ONCE(engine->status_page.addr[reg], value);
9a29dd85 166 }
b70ec5bf
MK
167}
168
e2828914 169/*
311bd68e
CW
170 * Reads a dword out of the status page, which is written to from the command
171 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
172 * MI_STORE_DATA_IMM.
173 *
174 * The following dwords have a reserved meaning:
175 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
176 * 0x04: ring 0 head pointer
177 * 0x05: ring 1 head pointer (915-class)
178 * 0x06: ring 2 head pointer (915-class)
179 * 0x10-0x1b: Context status DWords (GM45)
180 * 0x1f: Last written status offset. (GM45)
b07da53c 181 * 0x20-0x2f: Reserved (Gen6+)
311bd68e 182 *
b07da53c 183 * The area from dword 0x30 to 0x3ff is available for driver usage.
311bd68e 184 */
832a67bd
CW
185#define I915_GEM_HWS_PREEMPT 0x32
186#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
52954edd
CW
187#define I915_GEM_HWS_SEQNO 0x40
188#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
189#define I915_GEM_HWS_SCRATCH 0x80
832a67bd 190#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
311bd68e 191
6d2cb5aa 192#define I915_HWS_CSB_BUF0_INDEX 0x10
767a983a
CW
193#define I915_HWS_CSB_WRITE_INDEX 0x1f
194#define CNL_HWS_CSB_WRITE_INDEX 0x2f
6d2cb5aa 195
7e37f889
CW
196void intel_engine_stop(struct intel_engine_cs *engine);
197void intel_engine_cleanup(struct intel_engine_cs *engine);
96f298aa 198
adcb5264 199int intel_engines_init_mmio(struct intel_gt *gt);
7841fcbd 200int intel_engines_init(struct intel_gt *gt);
e26b6d43
CW
201
202void intel_engines_release(struct intel_gt *gt);
203void intel_engines_free(struct intel_gt *gt);
45b9c968 204
019bf277 205int intel_engine_init_common(struct intel_engine_cs *engine);
96a945aa 206void intel_engine_cleanup_common(struct intel_engine_cs *engine);
019bf277 207
faea1792
DCS
208int intel_engine_resume(struct intel_engine_cs *engine);
209
11334c6a 210int intel_ring_submission_setup(struct intel_engine_cs *engine);
8187a2b7 211
3f6e9822 212int intel_engine_stop_cs(struct intel_engine_cs *engine);
a99b32a6 213void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
3f6e9822 214
060f2322
CW
215void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
216
3ceda3a4
CW
217u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
218u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
1b36595f 219
742379c0 220void intel_engine_get_instdone(const struct intel_engine_cs *engine,
0e704476
CW
221 struct intel_instdone *instdone);
222
79ffac85
CW
223void intel_engine_init_execlists(struct intel_engine_cs *engine);
224
52c0fdb2
CW
225void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
226void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 227
52c0fdb2 228void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 229
52c0fdb2 230static inline void
54400257 231intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
688e6c72 232{
52c0fdb2 233 irq_work_queue(&engine->breadcrumbs.irq_work);
688e6c72
CW
234}
235
ad07dfcd 236void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 237void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 238
52c0fdb2
CW
239void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
240 struct drm_printer *p);
241
9f235dfa
TU
242static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
243{
244 memset(batch, 0, 6 * sizeof(u32));
245
246 batch[0] = GFX_OP_PIPE_CONTROL(6);
247 batch[1] = flags;
248 batch[2] = offset;
249
250 return batch + 6;
251}
252
df77cd83 253static inline u32 *
6a623729 254gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
df77cd83
MW
255{
256 /* We're using qword write, offset should be aligned to 8 bytes. */
257 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
258
259 /* w/a for post sync ops following a GPGPU operation we
260 * need a prior CS_STALL, which is emitted by the flush
261 * following the batch.
262 */
263 *cs++ = GFX_OP_PIPE_CONTROL(6);
6a623729 264 *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
df77cd83
MW
265 *cs++ = gtt_offset;
266 *cs++ = 0;
267 *cs++ = value;
268 /* We're thrashing one dword of HWS. */
269 *cs++ = 0;
270
271 return cs;
272}
273
274static inline u32 *
54939ea0 275gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
df77cd83
MW
276{
277 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
278 GEM_BUG_ON(gtt_offset & (1 << 5));
279 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
280 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
281
54939ea0 282 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
df77cd83
MW
283 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
284 *cs++ = 0;
285 *cs++ = value;
286
287 return cs;
288}
289
cb823ed9
CW
290static inline void __intel_engine_reset(struct intel_engine_cs *engine,
291 bool stalled)
eb8d0f5a 292{
e26b6d43
CW
293 if (engine->reset.rewind)
294 engine->reset.rewind(engine, stalled);
79ffac85 295 engine->serial++; /* contexts lost */
eb8d0f5a
CW
296}
297
cb823ed9 298bool intel_engines_are_idle(struct intel_gt *gt);
d99f7b07 299bool intel_engine_is_idle(struct intel_engine_cs *engine);
30084b14 300void intel_engine_flush_submission(struct intel_engine_cs *engine);
5400367a 301
cb823ed9 302void intel_engines_reset_default_submission(struct intel_gt *gt);
ff44ad51 303
90cad095 304bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
f2f5c061 305
0db18b17
CW
306__printf(3, 4)
307void intel_engine_dump(struct intel_engine_cs *engine,
308 struct drm_printer *m,
309 const char *header, ...);
f636edb2 310
30e17b78
TU
311int intel_enable_engine_stats(struct intel_engine_cs *engine);
312void intel_disable_engine_stats(struct intel_engine_cs *engine);
313
314ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
315
cf4331dd
CW
316struct i915_request *
317intel_engine_find_active_request(struct intel_engine_cs *engine);
318
92c964ca 319u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
ffd5ce22 320
422d7df4
CW
321void intel_engine_init_active(struct intel_engine_cs *engine,
322 unsigned int subclass);
323#define ENGINE_PHYSICAL 0
324#define ENGINE_MOCK 1
325#define ENGINE_VIRTUAL 2
326
3a7a92ab
CW
327static inline bool
328intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
329{
b79029b2
CW
330 if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
331 return false;
3a7a92ab
CW
332
333 return intel_engine_has_preemption(engine);
334}
335
8187a2b7 336#endif /* _INTEL_RINGBUFFER_H_ */