]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/selftests/igt_spinner.c
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "igt_spinner.h"
9 int igt_spinner_init(struct igt_spinner
*spin
, struct drm_i915_private
*i915
)
15 GEM_BUG_ON(INTEL_GEN(i915
) < 8);
17 memset(spin
, 0, sizeof(*spin
));
20 spin
->hws
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
21 if (IS_ERR(spin
->hws
)) {
22 err
= PTR_ERR(spin
->hws
);
26 spin
->obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
27 if (IS_ERR(spin
->obj
)) {
28 err
= PTR_ERR(spin
->obj
);
32 i915_gem_object_set_cache_level(spin
->hws
, I915_CACHE_LLC
);
33 vaddr
= i915_gem_object_pin_map(spin
->hws
, I915_MAP_WB
);
38 spin
->seqno
= memset(vaddr
, 0xff, PAGE_SIZE
);
40 mode
= i915_coherent_map_type(i915
);
41 vaddr
= i915_gem_object_pin_map(spin
->obj
, mode
);
51 i915_gem_object_unpin_map(spin
->hws
);
53 i915_gem_object_put(spin
->obj
);
55 i915_gem_object_put(spin
->hws
);
60 static unsigned int seqno_offset(u64 fence
)
62 return offset_in_page(sizeof(u32
) * fence
);
65 static u64
hws_address(const struct i915_vma
*hws
,
66 const struct i915_request
*rq
)
68 return hws
->node
.start
+ seqno_offset(rq
->fence
.context
);
71 static int emit_recurse_batch(struct igt_spinner
*spin
,
72 struct i915_request
*rq
,
73 u32 arbitration_command
)
75 struct i915_address_space
*vm
= &rq
->gem_context
->ppgtt
->vm
;
76 struct i915_vma
*hws
, *vma
;
80 vma
= i915_vma_instance(spin
->obj
, vm
, NULL
);
84 hws
= i915_vma_instance(spin
->hws
, vm
, NULL
);
88 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
92 err
= i915_vma_pin(hws
, 0, 0, PIN_USER
);
96 err
= i915_vma_move_to_active(vma
, rq
, 0);
100 if (!i915_gem_object_has_active_reference(vma
->obj
)) {
101 i915_gem_object_get(vma
->obj
);
102 i915_gem_object_set_active_reference(vma
->obj
);
105 err
= i915_vma_move_to_active(hws
, rq
, 0);
109 if (!i915_gem_object_has_active_reference(hws
->obj
)) {
110 i915_gem_object_get(hws
->obj
);
111 i915_gem_object_set_active_reference(hws
->obj
);
116 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
117 *batch
++ = lower_32_bits(hws_address(hws
, rq
));
118 *batch
++ = upper_32_bits(hws_address(hws
, rq
));
119 *batch
++ = rq
->fence
.seqno
;
121 *batch
++ = arbitration_command
;
123 *batch
++ = MI_BATCH_BUFFER_START
| 1 << 8 | 1;
124 *batch
++ = lower_32_bits(vma
->node
.start
);
125 *batch
++ = upper_32_bits(vma
->node
.start
);
126 *batch
++ = MI_BATCH_BUFFER_END
; /* not reached */
128 i915_gem_chipset_flush(spin
->i915
);
130 err
= rq
->engine
->emit_bb_start(rq
, vma
->node
.start
, PAGE_SIZE
, 0);
139 struct i915_request
*
140 igt_spinner_create_request(struct igt_spinner
*spin
,
141 struct i915_gem_context
*ctx
,
142 struct intel_engine_cs
*engine
,
143 u32 arbitration_command
)
145 struct i915_request
*rq
;
148 rq
= i915_request_alloc(engine
, ctx
);
152 err
= emit_recurse_batch(spin
, rq
, arbitration_command
);
154 i915_request_add(rq
);
162 hws_seqno(const struct igt_spinner
*spin
, const struct i915_request
*rq
)
164 u32
*seqno
= spin
->seqno
+ seqno_offset(rq
->fence
.context
);
166 return READ_ONCE(*seqno
);
169 void igt_spinner_end(struct igt_spinner
*spin
)
171 *spin
->batch
= MI_BATCH_BUFFER_END
;
172 i915_gem_chipset_flush(spin
->i915
);
175 void igt_spinner_fini(struct igt_spinner
*spin
)
177 igt_spinner_end(spin
);
179 i915_gem_object_unpin_map(spin
->obj
);
180 i915_gem_object_put(spin
->obj
);
182 i915_gem_object_unpin_map(spin
->hws
);
183 i915_gem_object_put(spin
->hws
);
186 bool igt_wait_for_spinner(struct igt_spinner
*spin
, struct i915_request
*rq
)
188 if (!wait_event_timeout(rq
->execute
,
189 READ_ONCE(rq
->global_seqno
),
190 msecs_to_jiffies(10)))
193 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin
, rq
),
196 wait_for(i915_seqno_passed(hws_seqno(spin
, rq
),