2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
38 #include "gem/i915_gem_pm.h"
39 #include "gt/intel_context.h"
40 #include "gt/intel_ring.h"
43 #include "i915_gem_gtt.h"
46 #define RING_CTX_OFF(x) \
47 offsetof(struct execlist_ring_context, x)
49 static void set_context_pdp_root_pointer(
50 struct execlist_ring_context
*ring_context
,
55 for (i
= 0; i
< 8; i
++)
56 ring_context
->pdps
[i
].val
= pdp
[7 - i
];
59 static void update_shadow_pdps(struct intel_vgpu_workload
*workload
)
61 struct execlist_ring_context
*shadow_ring_context
;
62 struct intel_context
*ctx
= workload
->req
->context
;
64 if (WARN_ON(!workload
->shadow_mm
))
67 if (WARN_ON(!atomic_read(&workload
->shadow_mm
->pincount
)))
70 shadow_ring_context
= (struct execlist_ring_context
*)ctx
->lrc_reg_state
;
71 set_context_pdp_root_pointer(shadow_ring_context
,
72 (void *)workload
->shadow_mm
->ppgtt_mm
.shadow_pdps
);
76 * when populating shadow ctx from guest, we should not overrride oa related
77 * registers, so that they will not be overlapped by guest oa configs. Thus
78 * made it possible to capture oa data from host for both host and guests.
80 static void sr_oa_regs(struct intel_vgpu_workload
*workload
,
81 u32
*reg_state
, bool save
)
83 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->gt
->i915
;
84 u32 ctx_oactxctrl
= dev_priv
->perf
.ctx_oactxctrl_offset
;
85 u32 ctx_flexeu0
= dev_priv
->perf
.ctx_flexeu0_offset
;
88 i915_mmio_reg_offset(EU_PERF_CNTL0
),
89 i915_mmio_reg_offset(EU_PERF_CNTL1
),
90 i915_mmio_reg_offset(EU_PERF_CNTL2
),
91 i915_mmio_reg_offset(EU_PERF_CNTL3
),
92 i915_mmio_reg_offset(EU_PERF_CNTL4
),
93 i915_mmio_reg_offset(EU_PERF_CNTL5
),
94 i915_mmio_reg_offset(EU_PERF_CNTL6
),
97 if (workload
->engine
->id
!= RCS0
)
101 workload
->oactxctrl
= reg_state
[ctx_oactxctrl
+ 1];
103 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
104 u32 state_offset
= ctx_flexeu0
+ i
* 2;
106 workload
->flex_mmio
[i
] = reg_state
[state_offset
+ 1];
109 reg_state
[ctx_oactxctrl
] =
110 i915_mmio_reg_offset(GEN8_OACTXCONTROL
);
111 reg_state
[ctx_oactxctrl
+ 1] = workload
->oactxctrl
;
113 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
114 u32 state_offset
= ctx_flexeu0
+ i
* 2;
115 u32 mmio
= flex_mmio
[i
];
117 reg_state
[state_offset
] = mmio
;
118 reg_state
[state_offset
+ 1] = workload
->flex_mmio
[i
];
123 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
125 struct intel_vgpu
*vgpu
= workload
->vgpu
;
126 struct intel_gvt
*gvt
= vgpu
->gvt
;
127 struct intel_context
*ctx
= workload
->req
->context
;
128 struct execlist_ring_context
*shadow_ring_context
;
131 unsigned long context_gpa
, context_page_num
;
132 unsigned long gpa_base
; /* first gpa of consecutive GPAs */
133 unsigned long gpa_size
; /* size of consecutive GPAs */
134 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
137 int ring_id
= workload
->engine
->id
;
139 GEM_BUG_ON(!intel_context_is_pinned(ctx
));
141 context_base
= (void *) ctx
->lrc_reg_state
-
142 (LRC_STATE_PN
<< I915_GTT_PAGE_SHIFT
);
144 shadow_ring_context
= (void *) ctx
->lrc_reg_state
;
146 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, true);
147 #define COPY_REG(name) \
148 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
149 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
150 #define COPY_REG_MASKED(name) {\
151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
152 + RING_CTX_OFF(name.val),\
153 &shadow_ring_context->name.val, 4);\
154 shadow_ring_context->name.val |= 0xffff << 16;\
157 COPY_REG_MASKED(ctx_ctrl
);
158 COPY_REG(ctx_timestamp
);
160 if (workload
->engine
->id
== RCS0
) {
161 COPY_REG(bb_per_ctx_ptr
);
162 COPY_REG(rcs_indirect_ctx
);
163 COPY_REG(rcs_indirect_ctx_offset
);
166 #undef COPY_REG_MASKED
168 intel_gvt_hypervisor_read_gpa(vgpu
,
169 workload
->ring_context_gpa
+
170 sizeof(*shadow_ring_context
),
171 (void *)shadow_ring_context
+
172 sizeof(*shadow_ring_context
),
173 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
175 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, false);
177 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
178 workload
->engine
->name
, workload
->ctx_desc
.lrca
,
179 workload
->ctx_desc
.context_id
,
180 workload
->ring_context_gpa
);
182 /* only need to ensure this context is not pinned/unpinned during the
183 * period from last submission to this this submission.
184 * Upon reaching this function, the currently submitted context is not
185 * supposed to get unpinned. If a misbehaving guest driver ever does
186 * this, it would corrupt itself.
188 if (s
->last_ctx
[ring_id
].valid
&&
189 (s
->last_ctx
[ring_id
].lrca
==
190 workload
->ctx_desc
.lrca
) &&
191 (s
->last_ctx
[ring_id
].ring_context_gpa
==
192 workload
->ring_context_gpa
))
195 s
->last_ctx
[ring_id
].lrca
= workload
->ctx_desc
.lrca
;
196 s
->last_ctx
[ring_id
].ring_context_gpa
= workload
->ring_context_gpa
;
198 if (IS_RESTORE_INHIBIT(shadow_ring_context
->ctx_ctrl
.val
) || skip
)
201 s
->last_ctx
[ring_id
].valid
= false;
202 context_page_num
= workload
->engine
->context_size
;
203 context_page_num
= context_page_num
>> PAGE_SHIFT
;
205 if (IS_BROADWELL(gvt
->gt
->i915
) && workload
->engine
->id
== RCS0
)
206 context_page_num
= 19;
208 /* find consecutive GPAs from gma until the first inconsecutive GPA.
209 * read from the continuous GPAs into dst virtual address
212 for (i
= 2; i
< context_page_num
; i
++) {
213 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
214 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
215 I915_GTT_PAGE_SHIFT
));
216 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
217 gvt_vgpu_err("Invalid guest context descriptor\n");
222 gpa_base
= context_gpa
;
223 dst
= context_base
+ (i
<< I915_GTT_PAGE_SHIFT
);
224 } else if (context_gpa
!= gpa_base
+ gpa_size
)
227 gpa_size
+= I915_GTT_PAGE_SIZE
;
229 if (i
== context_page_num
- 1)
235 intel_gvt_hypervisor_read_gpa(vgpu
, gpa_base
, dst
, gpa_size
);
236 gpa_base
= context_gpa
;
237 gpa_size
= I915_GTT_PAGE_SIZE
;
238 dst
= context_base
+ (i
<< I915_GTT_PAGE_SHIFT
);
240 s
->last_ctx
[ring_id
].valid
= true;
244 static inline bool is_gvt_request(struct i915_request
*rq
)
246 return intel_context_force_single_submission(rq
->context
);
249 static void save_ring_hw_state(struct intel_vgpu
*vgpu
,
250 const struct intel_engine_cs
*engine
)
252 struct intel_uncore
*uncore
= engine
->uncore
;
255 reg
= RING_INSTDONE(engine
->mmio_base
);
256 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) =
257 intel_uncore_read(uncore
, reg
);
259 reg
= RING_ACTHD(engine
->mmio_base
);
260 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) =
261 intel_uncore_read(uncore
, reg
);
263 reg
= RING_ACTHD_UDW(engine
->mmio_base
);
264 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) =
265 intel_uncore_read(uncore
, reg
);
268 static int shadow_context_status_change(struct notifier_block
*nb
,
269 unsigned long action
, void *data
)
271 struct i915_request
*rq
= data
;
272 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
273 shadow_ctx_notifier_block
[rq
->engine
->id
]);
274 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
275 enum intel_engine_id ring_id
= rq
->engine
->id
;
276 struct intel_vgpu_workload
*workload
;
279 if (!is_gvt_request(rq
)) {
280 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
281 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
282 scheduler
->engine_owner
[ring_id
]) {
283 /* Switch ring from vGPU to host. */
284 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
286 scheduler
->engine_owner
[ring_id
] = NULL
;
288 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
293 workload
= scheduler
->current_workload
[ring_id
];
294 if (unlikely(!workload
))
298 case INTEL_CONTEXT_SCHEDULE_IN
:
299 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
300 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
301 /* Switch ring from host to vGPU or vGPU to vGPU. */
302 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
303 workload
->vgpu
, rq
->engine
);
304 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
306 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
307 ring_id
, workload
->vgpu
->id
);
308 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
309 atomic_set(&workload
->shadow_ctx_active
, 1);
311 case INTEL_CONTEXT_SCHEDULE_OUT
:
312 save_ring_hw_state(workload
->vgpu
, rq
->engine
);
313 atomic_set(&workload
->shadow_ctx_active
, 0);
315 case INTEL_CONTEXT_SCHEDULE_PREEMPTED
:
316 save_ring_hw_state(workload
->vgpu
, rq
->engine
);
322 wake_up(&workload
->shadow_ctx_status_wq
);
327 shadow_context_descriptor_update(struct intel_context
*ce
,
328 struct intel_vgpu_workload
*workload
)
330 u64 desc
= ce
->lrc
.desc
;
333 * Update bits 0-11 of the context descriptor which includes flags
334 * like GEN8_CTX_* cached in desc_template
336 desc
&= ~(0x3ull
<< GEN8_CTX_ADDRESSING_MODE_SHIFT
);
337 desc
|= (u64
)workload
->ctx_desc
.addressing_mode
<<
338 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
343 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload
*workload
)
345 struct intel_vgpu
*vgpu
= workload
->vgpu
;
346 struct i915_request
*req
= workload
->req
;
347 void *shadow_ring_buffer_va
;
351 if (IS_GEN(req
->i915
, 9) && is_inhibit_context(req
->context
))
352 intel_vgpu_restore_inhibit_context(vgpu
, req
);
355 * To track whether a request has started on HW, we can emit a
356 * breadcrumb at the beginning of the request and check its
357 * timeline's HWSP to see if the breadcrumb has advanced past the
358 * start of this request. Actually, the request must have the
359 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
360 * scheduler might get a wrong state of it during reset. Since the
361 * requests from gvt always set the has_init_breadcrumb flag, here
362 * need to do the emit_init_breadcrumb for all the requests.
364 if (req
->engine
->emit_init_breadcrumb
) {
365 err
= req
->engine
->emit_init_breadcrumb(req
);
367 gvt_vgpu_err("fail to emit init breadcrumb\n");
372 /* allocate shadow ring buffer */
373 cs
= intel_ring_begin(workload
->req
, workload
->rb_len
/ sizeof(u32
));
375 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
380 shadow_ring_buffer_va
= workload
->shadow_ring_buffer_va
;
382 /* get shadow ring buffer va */
383 workload
->shadow_ring_buffer_va
= cs
;
385 memcpy(cs
, shadow_ring_buffer_va
,
388 cs
+= workload
->rb_len
/ sizeof(u32
);
389 intel_ring_advance(workload
->req
, cs
);
394 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
396 if (!wa_ctx
->indirect_ctx
.obj
)
399 i915_gem_object_unpin_map(wa_ctx
->indirect_ctx
.obj
);
400 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
402 wa_ctx
->indirect_ctx
.obj
= NULL
;
403 wa_ctx
->indirect_ctx
.shadow_va
= NULL
;
406 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload
*workload
,
407 struct intel_context
*ce
)
409 struct intel_vgpu_mm
*mm
= workload
->shadow_mm
;
410 struct i915_ppgtt
*ppgtt
= i915_vm_to_ppgtt(ce
->vm
);
413 if (mm
->ppgtt_mm
.root_entry_type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
414 px_dma(ppgtt
->pd
) = mm
->ppgtt_mm
.shadow_pdps
[0];
416 for (i
= 0; i
< GVT_RING_CTX_NR_PDPS
; i
++) {
417 struct i915_page_directory
* const pd
=
418 i915_pd_entry(ppgtt
->pd
, i
);
419 /* skip now as current i915 ppgtt alloc won't allocate
420 top level pdp for non 4-level table, won't impact
424 px_dma(pd
) = mm
->ppgtt_mm
.shadow_pdps
[i
];
430 intel_gvt_workload_req_alloc(struct intel_vgpu_workload
*workload
)
432 struct intel_vgpu
*vgpu
= workload
->vgpu
;
433 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
434 struct i915_request
*rq
;
439 rq
= i915_request_create(s
->shadow
[workload
->engine
->id
]);
441 gvt_vgpu_err("fail to allocate gem request\n");
445 workload
->req
= i915_request_get(rq
);
450 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
451 * shadow it as well, include ringbuffer,wa_ctx and ctx.
452 * @workload: an abstract entity for each execlist submission.
454 * This function is called before the workload submitting to i915, to make
455 * sure the content of the workload is valid.
457 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
459 struct intel_vgpu
*vgpu
= workload
->vgpu
;
460 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
463 lockdep_assert_held(&vgpu
->vgpu_lock
);
465 if (workload
->shadow
)
468 if (!test_and_set_bit(workload
->engine
->id
, s
->shadow_ctx_desc_updated
))
469 shadow_context_descriptor_update(s
->shadow
[workload
->engine
->id
],
472 ret
= intel_gvt_scan_and_shadow_ringbuffer(workload
);
476 if (workload
->engine
->id
== RCS0
&&
477 workload
->wa_ctx
.indirect_ctx
.size
) {
478 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
483 workload
->shadow
= true;
487 release_shadow_wa_ctx(&workload
->wa_ctx
);
491 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
);
493 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
495 struct intel_gvt
*gvt
= workload
->vgpu
->gvt
;
496 const int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
497 struct intel_vgpu_shadow_bb
*bb
;
500 list_for_each_entry(bb
, &workload
->shadow_bb
, list
) {
501 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
502 * is only updated into ring_scan_buffer, not real ring address
503 * allocated in later copy_workload_to_ring_buffer. pls be noted
504 * shadow_ring_buffer_va is now pointed to real ring buffer va
505 * in copy_workload_to_ring_buffer.
509 bb
->bb_start_cmd_va
= workload
->shadow_ring_buffer_va
513 /* for non-priv bb, scan&shadow is only for
514 * debugging purpose, so the content of shadow bb
515 * is the same as original bb. Therefore,
516 * here, rather than switch to shadow bb's gma
517 * address, we directly use original batch buffer's
518 * gma address, and send original bb to hardware
521 if (bb
->clflush
& CLFLUSH_AFTER
) {
522 drm_clflush_virt_range(bb
->va
,
524 bb
->clflush
&= ~CLFLUSH_AFTER
;
526 i915_gem_object_finish_access(bb
->obj
);
527 bb
->accessing
= false;
530 bb
->vma
= i915_gem_object_ggtt_pin(bb
->obj
,
532 if (IS_ERR(bb
->vma
)) {
533 ret
= PTR_ERR(bb
->vma
);
537 /* relocate shadow batch buffer */
538 bb
->bb_start_cmd_va
[1] = i915_ggtt_offset(bb
->vma
);
539 if (gmadr_bytes
== 8)
540 bb
->bb_start_cmd_va
[2] = 0;
542 /* No one is going to touch shadow bb from now on. */
543 if (bb
->clflush
& CLFLUSH_AFTER
) {
544 drm_clflush_virt_range(bb
->va
,
546 bb
->clflush
&= ~CLFLUSH_AFTER
;
549 ret
= i915_gem_object_set_to_gtt_domain(bb
->obj
,
554 ret
= i915_vma_move_to_active(bb
->vma
,
560 i915_gem_object_finish_access(bb
->obj
);
561 bb
->accessing
= false;
566 release_shadow_batch_buffer(workload
);
570 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
572 struct intel_vgpu_workload
*workload
=
573 container_of(wa_ctx
, struct intel_vgpu_workload
, wa_ctx
);
574 struct i915_request
*rq
= workload
->req
;
575 struct execlist_ring_context
*shadow_ring_context
=
576 (struct execlist_ring_context
*)rq
->context
->lrc_reg_state
;
578 shadow_ring_context
->bb_per_ctx_ptr
.val
=
579 (shadow_ring_context
->bb_per_ctx_ptr
.val
&
580 (~PER_CTX_ADDR_MASK
)) | wa_ctx
->per_ctx
.shadow_gma
;
581 shadow_ring_context
->rcs_indirect_ctx
.val
=
582 (shadow_ring_context
->rcs_indirect_ctx
.val
&
583 (~INDIRECT_CTX_ADDR_MASK
)) | wa_ctx
->indirect_ctx
.shadow_gma
;
586 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
588 struct i915_vma
*vma
;
589 unsigned char *per_ctx_va
=
590 (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
591 wa_ctx
->indirect_ctx
.size
;
593 if (wa_ctx
->indirect_ctx
.size
== 0)
596 vma
= i915_gem_object_ggtt_pin(wa_ctx
->indirect_ctx
.obj
, NULL
,
597 0, CACHELINE_BYTES
, 0);
601 /* FIXME: we are not tracking our pinned VMA leaving it
602 * up to the core to fix up the stray pin_count upon
606 wa_ctx
->indirect_ctx
.shadow_gma
= i915_ggtt_offset(vma
);
608 wa_ctx
->per_ctx
.shadow_gma
= *((unsigned int *)per_ctx_va
+ 1);
609 memset(per_ctx_va
, 0, CACHELINE_BYTES
);
611 update_wa_ctx_2_shadow_ctx(wa_ctx
);
615 static void update_vreg_in_ctx(struct intel_vgpu_workload
*workload
)
617 vgpu_vreg_t(workload
->vgpu
, RING_START(workload
->engine
->mmio_base
)) =
621 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
623 struct intel_vgpu_shadow_bb
*bb
, *pos
;
625 if (list_empty(&workload
->shadow_bb
))
628 bb
= list_first_entry(&workload
->shadow_bb
,
629 struct intel_vgpu_shadow_bb
, list
);
631 list_for_each_entry_safe(bb
, pos
, &workload
->shadow_bb
, list
) {
634 i915_gem_object_finish_access(bb
->obj
);
636 if (bb
->va
&& !IS_ERR(bb
->va
))
637 i915_gem_object_unpin_map(bb
->obj
);
639 if (bb
->vma
&& !IS_ERR(bb
->vma
))
640 i915_vma_unpin(bb
->vma
);
642 i915_gem_object_put(bb
->obj
);
650 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload
*workload
)
652 struct intel_vgpu
*vgpu
= workload
->vgpu
;
653 struct intel_vgpu_mm
*m
;
656 ret
= intel_vgpu_pin_mm(workload
->shadow_mm
);
658 gvt_vgpu_err("fail to vgpu pin mm\n");
662 if (workload
->shadow_mm
->type
!= INTEL_GVT_MM_PPGTT
||
663 !workload
->shadow_mm
->ppgtt_mm
.shadowed
) {
664 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
668 if (!list_empty(&workload
->lri_shadow_mm
)) {
669 list_for_each_entry(m
, &workload
->lri_shadow_mm
,
671 ret
= intel_vgpu_pin_mm(m
);
673 list_for_each_entry_from_reverse(m
,
674 &workload
->lri_shadow_mm
,
676 intel_vgpu_unpin_mm(m
);
677 gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
684 intel_vgpu_unpin_mm(workload
->shadow_mm
);
690 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload
*workload
)
692 struct intel_vgpu_mm
*m
;
694 if (!list_empty(&workload
->lri_shadow_mm
)) {
695 list_for_each_entry(m
, &workload
->lri_shadow_mm
,
697 intel_vgpu_unpin_mm(m
);
699 intel_vgpu_unpin_mm(workload
->shadow_mm
);
702 static int prepare_workload(struct intel_vgpu_workload
*workload
)
704 struct intel_vgpu
*vgpu
= workload
->vgpu
;
705 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
708 ret
= intel_vgpu_shadow_mm_pin(workload
);
710 gvt_vgpu_err("fail to pin shadow mm\n");
714 update_shadow_pdps(workload
);
716 set_context_ppgtt_from_shadow(workload
, s
->shadow
[workload
->engine
->id
]);
718 ret
= intel_vgpu_sync_oos_pages(workload
->vgpu
);
720 gvt_vgpu_err("fail to vgpu sync oos pages\n");
724 ret
= intel_vgpu_flush_post_shadow(workload
->vgpu
);
726 gvt_vgpu_err("fail to flush post shadow\n");
730 ret
= copy_workload_to_ring_buffer(workload
);
732 gvt_vgpu_err("fail to generate request\n");
736 ret
= prepare_shadow_batch_buffer(workload
);
738 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
742 ret
= prepare_shadow_wa_ctx(&workload
->wa_ctx
);
744 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
745 goto err_shadow_batch
;
748 if (workload
->prepare
) {
749 ret
= workload
->prepare(workload
);
751 goto err_shadow_wa_ctx
;
756 release_shadow_wa_ctx(&workload
->wa_ctx
);
758 release_shadow_batch_buffer(workload
);
760 intel_vgpu_shadow_mm_unpin(workload
);
764 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
766 struct intel_vgpu
*vgpu
= workload
->vgpu
;
767 struct i915_request
*rq
;
770 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
771 workload
->engine
->name
, workload
);
773 mutex_lock(&vgpu
->vgpu_lock
);
775 ret
= intel_gvt_workload_req_alloc(workload
);
779 ret
= intel_gvt_scan_and_shadow_workload(workload
);
783 ret
= populate_shadow_context(workload
);
785 release_shadow_wa_ctx(&workload
->wa_ctx
);
789 ret
= prepare_workload(workload
);
792 /* We might still need to add request with
793 * clean ctx to retire it properly..
795 rq
= fetch_and_zero(&workload
->req
);
796 i915_request_put(rq
);
799 if (!IS_ERR_OR_NULL(workload
->req
)) {
800 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
801 workload
->engine
->name
, workload
->req
);
802 i915_request_add(workload
->req
);
803 workload
->dispatched
= true;
807 workload
->status
= ret
;
808 mutex_unlock(&vgpu
->vgpu_lock
);
812 static struct intel_vgpu_workload
*
813 pick_next_workload(struct intel_gvt
*gvt
, struct intel_engine_cs
*engine
)
815 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
816 struct intel_vgpu_workload
*workload
= NULL
;
818 mutex_lock(&gvt
->sched_lock
);
821 * no current vgpu / will be scheduled out / no workload
824 if (!scheduler
->current_vgpu
) {
825 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine
->name
);
829 if (scheduler
->need_reschedule
) {
830 gvt_dbg_sched("ring %s stop - will reschedule\n", engine
->name
);
834 if (!scheduler
->current_vgpu
->active
||
835 list_empty(workload_q_head(scheduler
->current_vgpu
, engine
)))
839 * still have current workload, maybe the workload disptacher
840 * fail to submit it for some reason, resubmit it.
842 if (scheduler
->current_workload
[engine
->id
]) {
843 workload
= scheduler
->current_workload
[engine
->id
];
844 gvt_dbg_sched("ring %s still have current workload %p\n",
845 engine
->name
, workload
);
850 * pick a workload as current workload
851 * once current workload is set, schedule policy routines
852 * will wait the current workload is finished when trying to
853 * schedule out a vgpu.
855 scheduler
->current_workload
[engine
->id
] =
856 list_first_entry(workload_q_head(scheduler
->current_vgpu
,
858 struct intel_vgpu_workload
, list
);
860 workload
= scheduler
->current_workload
[engine
->id
];
862 gvt_dbg_sched("ring %s pick new workload %p\n", engine
->name
, workload
);
864 atomic_inc(&workload
->vgpu
->submission
.running_workload_num
);
866 mutex_unlock(&gvt
->sched_lock
);
870 static void update_guest_pdps(struct intel_vgpu
*vgpu
,
871 u64 ring_context_gpa
, u32 pdp
[8])
876 gpa
= ring_context_gpa
+ RING_CTX_OFF(pdps
[0].val
);
878 for (i
= 0; i
< 8; i
++)
879 intel_gvt_hypervisor_write_gpa(vgpu
,
880 gpa
+ i
* 8, &pdp
[7 - i
], 4);
883 static __maybe_unused
bool
884 check_shadow_context_ppgtt(struct execlist_ring_context
*c
, struct intel_vgpu_mm
*m
)
886 if (m
->ppgtt_mm
.root_entry_type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
887 u64 shadow_pdp
= c
->pdps
[7].val
| (u64
) c
->pdps
[6].val
<< 32;
889 if (shadow_pdp
!= m
->ppgtt_mm
.shadow_pdps
[0]) {
890 gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
895 /* see comment in LRI handler in cmd_parser.c */
896 gvt_dbg_mm("invalid shadow mm type\n");
901 static void update_guest_context(struct intel_vgpu_workload
*workload
)
903 struct i915_request
*rq
= workload
->req
;
904 struct intel_vgpu
*vgpu
= workload
->vgpu
;
905 struct execlist_ring_context
*shadow_ring_context
;
906 struct intel_context
*ctx
= workload
->req
->context
;
909 unsigned long context_gpa
, context_page_num
;
910 unsigned long gpa_base
; /* first gpa of consecutive GPAs */
911 unsigned long gpa_size
; /* size of consecutive GPAs*/
917 gvt_dbg_sched("ring id %d workload lrca %x\n", rq
->engine
->id
,
918 workload
->ctx_desc
.lrca
);
920 GEM_BUG_ON(!intel_context_is_pinned(ctx
));
922 head
= workload
->rb_head
;
923 tail
= workload
->rb_tail
;
924 wrap_count
= workload
->guest_rb_head
>> RB_HEAD_WRAP_CNT_OFF
;
927 if (wrap_count
== RB_HEAD_WRAP_CNT_MAX
)
933 head
= (wrap_count
<< RB_HEAD_WRAP_CNT_OFF
) | tail
;
935 ring_base
= rq
->engine
->mmio_base
;
936 vgpu_vreg_t(vgpu
, RING_TAIL(ring_base
)) = tail
;
937 vgpu_vreg_t(vgpu
, RING_HEAD(ring_base
)) = head
;
939 context_page_num
= rq
->engine
->context_size
;
940 context_page_num
= context_page_num
>> PAGE_SHIFT
;
942 if (IS_BROADWELL(rq
->i915
) && rq
->engine
->id
== RCS0
)
943 context_page_num
= 19;
945 context_base
= (void *) ctx
->lrc_reg_state
-
946 (LRC_STATE_PN
<< I915_GTT_PAGE_SHIFT
);
948 /* find consecutive GPAs from gma until the first inconsecutive GPA.
949 * write to the consecutive GPAs from src virtual address
952 for (i
= 2; i
< context_page_num
; i
++) {
953 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
954 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
955 I915_GTT_PAGE_SHIFT
));
956 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
957 gvt_vgpu_err("invalid guest context descriptor\n");
962 gpa_base
= context_gpa
;
963 src
= context_base
+ (i
<< I915_GTT_PAGE_SHIFT
);
964 } else if (context_gpa
!= gpa_base
+ gpa_size
)
967 gpa_size
+= I915_GTT_PAGE_SIZE
;
969 if (i
== context_page_num
- 1)
975 intel_gvt_hypervisor_write_gpa(vgpu
, gpa_base
, src
, gpa_size
);
976 gpa_base
= context_gpa
;
977 gpa_size
= I915_GTT_PAGE_SIZE
;
978 src
= context_base
+ (i
<< I915_GTT_PAGE_SHIFT
);
981 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
982 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
984 shadow_ring_context
= (void *) ctx
->lrc_reg_state
;
986 if (!list_empty(&workload
->lri_shadow_mm
)) {
987 struct intel_vgpu_mm
*m
= list_last_entry(&workload
->lri_shadow_mm
,
988 struct intel_vgpu_mm
,
990 GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context
, m
));
991 update_guest_pdps(vgpu
, workload
->ring_context_gpa
,
992 (void *)m
->ppgtt_mm
.guest_pdps
);
995 #define COPY_REG(name) \
996 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
997 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
1000 COPY_REG(ctx_timestamp
);
1004 intel_gvt_hypervisor_write_gpa(vgpu
,
1005 workload
->ring_context_gpa
+
1006 sizeof(*shadow_ring_context
),
1007 (void *)shadow_ring_context
+
1008 sizeof(*shadow_ring_context
),
1009 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
1012 void intel_vgpu_clean_workloads(struct intel_vgpu
*vgpu
,
1013 intel_engine_mask_t engine_mask
)
1015 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1016 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->gt
->i915
;
1017 struct intel_engine_cs
*engine
;
1018 struct intel_vgpu_workload
*pos
, *n
;
1019 intel_engine_mask_t tmp
;
1021 /* free the unsubmited workloads in the queues. */
1022 for_each_engine_masked(engine
, &dev_priv
->gt
, engine_mask
, tmp
) {
1023 list_for_each_entry_safe(pos
, n
,
1024 &s
->workload_q_head
[engine
->id
], list
) {
1025 list_del_init(&pos
->list
);
1026 intel_vgpu_destroy_workload(pos
);
1028 clear_bit(engine
->id
, s
->shadow_ctx_desc_updated
);
1032 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
1034 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1035 struct intel_vgpu_workload
*workload
=
1036 scheduler
->current_workload
[ring_id
];
1037 struct intel_vgpu
*vgpu
= workload
->vgpu
;
1038 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1039 struct i915_request
*rq
= workload
->req
;
1042 mutex_lock(&vgpu
->vgpu_lock
);
1043 mutex_lock(&gvt
->sched_lock
);
1045 /* For the workload w/ request, needs to wait for the context
1046 * switch to make sure request is completed.
1047 * For the workload w/o request, directly complete the workload.
1050 wait_event(workload
->shadow_ctx_status_wq
,
1051 !atomic_read(&workload
->shadow_ctx_active
));
1053 /* If this request caused GPU hang, req->fence.error will
1054 * be set to -EIO. Use -EIO to set workload status so
1055 * that when this request caused GPU hang, didn't trigger
1056 * context switch interrupt to guest.
1058 if (likely(workload
->status
== -EINPROGRESS
)) {
1059 if (workload
->req
->fence
.error
== -EIO
)
1060 workload
->status
= -EIO
;
1062 workload
->status
= 0;
1065 if (!workload
->status
&&
1066 !(vgpu
->resetting_eng
& BIT(ring_id
))) {
1067 update_guest_context(workload
);
1069 for_each_set_bit(event
, workload
->pending_events
,
1070 INTEL_GVT_EVENT_MAX
)
1071 intel_vgpu_trigger_virtual_event(vgpu
, event
);
1074 i915_request_put(fetch_and_zero(&workload
->req
));
1077 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1078 ring_id
, workload
, workload
->status
);
1080 scheduler
->current_workload
[ring_id
] = NULL
;
1082 list_del_init(&workload
->list
);
1084 if (workload
->status
|| vgpu
->resetting_eng
& BIT(ring_id
)) {
1085 /* if workload->status is not successful means HW GPU
1086 * has occurred GPU hang or something wrong with i915/GVT,
1087 * and GVT won't inject context switch interrupt to guest.
1088 * So this error is a vGPU hang actually to the guest.
1089 * According to this we should emunlate a vGPU hang. If
1090 * there are pending workloads which are already submitted
1091 * from guest, we should clean them up like HW GPU does.
1093 * if it is in middle of engine resetting, the pending
1094 * workloads won't be submitted to HW GPU and will be
1095 * cleaned up during the resetting process later, so doing
1096 * the workload clean up here doesn't have any impact.
1098 intel_vgpu_clean_workloads(vgpu
, BIT(ring_id
));
1101 workload
->complete(workload
);
1103 intel_vgpu_shadow_mm_unpin(workload
);
1104 intel_vgpu_destroy_workload(workload
);
1106 atomic_dec(&s
->running_workload_num
);
1107 wake_up(&scheduler
->workload_complete_wq
);
1109 if (gvt
->scheduler
.need_reschedule
)
1110 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
1112 mutex_unlock(&gvt
->sched_lock
);
1113 mutex_unlock(&vgpu
->vgpu_lock
);
1116 static int workload_thread(void *arg
)
1118 struct intel_engine_cs
*engine
= arg
;
1119 const bool need_force_wake
= INTEL_GEN(engine
->i915
) >= 9;
1120 struct intel_gvt
*gvt
= engine
->i915
->gvt
;
1121 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1122 struct intel_vgpu_workload
*workload
= NULL
;
1123 struct intel_vgpu
*vgpu
= NULL
;
1125 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1127 gvt_dbg_core("workload thread for ring %s started\n", engine
->name
);
1129 while (!kthread_should_stop()) {
1130 intel_wakeref_t wakeref
;
1132 add_wait_queue(&scheduler
->waitq
[engine
->id
], &wait
);
1134 workload
= pick_next_workload(gvt
, engine
);
1137 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
1138 MAX_SCHEDULE_TIMEOUT
);
1139 } while (!kthread_should_stop());
1140 remove_wait_queue(&scheduler
->waitq
[engine
->id
], &wait
);
1145 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1146 engine
->name
, workload
,
1147 workload
->vgpu
->id
);
1149 wakeref
= intel_runtime_pm_get(engine
->uncore
->rpm
);
1151 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1152 engine
->name
, workload
);
1154 if (need_force_wake
)
1155 intel_uncore_forcewake_get(engine
->uncore
,
1158 * Update the vReg of the vGPU which submitted this
1159 * workload. The vGPU may use these registers for checking
1160 * the context state. The value comes from GPU commands
1163 update_vreg_in_ctx(workload
);
1165 ret
= dispatch_workload(workload
);
1168 vgpu
= workload
->vgpu
;
1169 gvt_vgpu_err("fail to dispatch workload, skip\n");
1173 gvt_dbg_sched("ring %s wait workload %p\n",
1174 engine
->name
, workload
);
1175 i915_request_wait(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
1178 gvt_dbg_sched("will complete workload %p, status: %d\n",
1179 workload
, workload
->status
);
1181 complete_current_workload(gvt
, engine
->id
);
1183 if (need_force_wake
)
1184 intel_uncore_forcewake_put(engine
->uncore
,
1187 intel_runtime_pm_put(engine
->uncore
->rpm
, wakeref
);
1188 if (ret
&& (vgpu_is_vm_unhealthy(ret
)))
1189 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1194 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
1196 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1197 struct intel_gvt
*gvt
= vgpu
->gvt
;
1198 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1200 if (atomic_read(&s
->running_workload_num
)) {
1201 gvt_dbg_sched("wait vgpu idle\n");
1203 wait_event(scheduler
->workload_complete_wq
,
1204 !atomic_read(&s
->running_workload_num
));
1208 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
1210 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1211 struct intel_engine_cs
*engine
;
1212 enum intel_engine_id i
;
1214 gvt_dbg_core("clean workload scheduler\n");
1216 for_each_engine(engine
, gvt
->gt
, i
) {
1217 atomic_notifier_chain_unregister(
1218 &engine
->context_status_notifier
,
1219 &gvt
->shadow_ctx_notifier_block
[i
]);
1220 kthread_stop(scheduler
->thread
[i
]);
1224 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
1226 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1227 struct intel_engine_cs
*engine
;
1228 enum intel_engine_id i
;
1231 gvt_dbg_core("init workload scheduler\n");
1233 init_waitqueue_head(&scheduler
->workload_complete_wq
);
1235 for_each_engine(engine
, gvt
->gt
, i
) {
1236 init_waitqueue_head(&scheduler
->waitq
[i
]);
1238 scheduler
->thread
[i
] = kthread_run(workload_thread
, engine
,
1239 "gvt:%s", engine
->name
);
1240 if (IS_ERR(scheduler
->thread
[i
])) {
1241 gvt_err("fail to create workload thread\n");
1242 ret
= PTR_ERR(scheduler
->thread
[i
]);
1246 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
1247 shadow_context_status_change
;
1248 atomic_notifier_chain_register(&engine
->context_status_notifier
,
1249 &gvt
->shadow_ctx_notifier_block
[i
]);
1255 intel_gvt_clean_workload_scheduler(gvt
);
1260 i915_context_ppgtt_root_restore(struct intel_vgpu_submission
*s
,
1261 struct i915_ppgtt
*ppgtt
)
1265 if (i915_vm_is_4lvl(&ppgtt
->vm
)) {
1266 px_dma(ppgtt
->pd
) = s
->i915_context_pml4
;
1268 for (i
= 0; i
< GEN8_3LVL_PDPES
; i
++) {
1269 struct i915_page_directory
* const pd
=
1270 i915_pd_entry(ppgtt
->pd
, i
);
1272 px_dma(pd
) = s
->i915_context_pdps
[i
];
1278 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1281 * This function is called when a vGPU is being destroyed.
1284 void intel_vgpu_clean_submission(struct intel_vgpu
*vgpu
)
1286 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1287 struct intel_engine_cs
*engine
;
1288 enum intel_engine_id id
;
1290 intel_vgpu_select_submission_ops(vgpu
, ALL_ENGINES
, 0);
1292 i915_context_ppgtt_root_restore(s
, i915_vm_to_ppgtt(s
->shadow
[0]->vm
));
1293 for_each_engine(engine
, vgpu
->gvt
->gt
, id
)
1294 intel_context_unpin(s
->shadow
[id
]);
1296 kmem_cache_destroy(s
->workloads
);
1301 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1303 * @engine_mask: engines expected to be reset
1305 * This function is called when a vGPU is being destroyed.
1308 void intel_vgpu_reset_submission(struct intel_vgpu
*vgpu
,
1309 intel_engine_mask_t engine_mask
)
1311 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1316 intel_vgpu_clean_workloads(vgpu
, engine_mask
);
1317 s
->ops
->reset(vgpu
, engine_mask
);
1321 i915_context_ppgtt_root_save(struct intel_vgpu_submission
*s
,
1322 struct i915_ppgtt
*ppgtt
)
1326 if (i915_vm_is_4lvl(&ppgtt
->vm
)) {
1327 s
->i915_context_pml4
= px_dma(ppgtt
->pd
);
1329 for (i
= 0; i
< GEN8_3LVL_PDPES
; i
++) {
1330 struct i915_page_directory
* const pd
=
1331 i915_pd_entry(ppgtt
->pd
, i
);
1333 s
->i915_context_pdps
[i
] = px_dma(pd
);
1339 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1342 * This function is called when a vGPU is being created.
1345 * Zero on success, negative error code if failed.
1348 int intel_vgpu_setup_submission(struct intel_vgpu
*vgpu
)
1350 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
1351 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1352 struct intel_engine_cs
*engine
;
1353 struct i915_ppgtt
*ppgtt
;
1354 enum intel_engine_id i
;
1357 ppgtt
= i915_ppgtt_create(&i915
->gt
);
1359 return PTR_ERR(ppgtt
);
1361 i915_context_ppgtt_root_save(s
, ppgtt
);
1363 for_each_engine(engine
, vgpu
->gvt
->gt
, i
) {
1364 struct intel_context
*ce
;
1366 INIT_LIST_HEAD(&s
->workload_q_head
[i
]);
1367 s
->shadow
[i
] = ERR_PTR(-EINVAL
);
1369 ce
= intel_context_create(engine
);
1372 goto out_shadow_ctx
;
1375 i915_vm_put(ce
->vm
);
1376 ce
->vm
= i915_vm_get(&ppgtt
->vm
);
1377 intel_context_set_single_submission(ce
);
1379 /* Max ring buffer size */
1380 if (!intel_uc_wants_guc_submission(&engine
->gt
->uc
)) {
1381 const unsigned int ring_size
= 512 * SZ_4K
;
1383 ce
->ring
= __intel_context_ring_size(ring_size
);
1386 ret
= intel_context_pin(ce
);
1387 intel_context_put(ce
);
1389 goto out_shadow_ctx
;
1394 bitmap_zero(s
->shadow_ctx_desc_updated
, I915_NUM_ENGINES
);
1396 s
->workloads
= kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1397 sizeof(struct intel_vgpu_workload
), 0,
1399 offsetof(struct intel_vgpu_workload
, rb_tail
),
1400 sizeof_field(struct intel_vgpu_workload
, rb_tail
),
1403 if (!s
->workloads
) {
1405 goto out_shadow_ctx
;
1408 atomic_set(&s
->running_workload_num
, 0);
1409 bitmap_zero(s
->tlb_handle_pending
, I915_NUM_ENGINES
);
1411 memset(s
->last_ctx
, 0, sizeof(s
->last_ctx
));
1413 i915_vm_put(&ppgtt
->vm
);
1417 i915_context_ppgtt_root_restore(s
, ppgtt
);
1418 for_each_engine(engine
, vgpu
->gvt
->gt
, i
) {
1419 if (IS_ERR(s
->shadow
[i
]))
1422 intel_context_unpin(s
->shadow
[i
]);
1423 intel_context_put(s
->shadow
[i
]);
1425 i915_vm_put(&ppgtt
->vm
);
1430 * intel_vgpu_select_submission_ops - select virtual submission interface
1432 * @engine_mask: either ALL_ENGINES or target engine mask
1433 * @interface: expected vGPU virtual submission interface
1435 * This function is called when guest configures submission interface.
1438 * Zero on success, negative error code if failed.
1441 int intel_vgpu_select_submission_ops(struct intel_vgpu
*vgpu
,
1442 intel_engine_mask_t engine_mask
,
1443 unsigned int interface
)
1445 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
1446 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1447 const struct intel_vgpu_submission_ops
*ops
[] = {
1448 [INTEL_VGPU_EXECLIST_SUBMISSION
] =
1449 &intel_vgpu_execlist_submission_ops
,
1453 if (drm_WARN_ON(&i915
->drm
, interface
>= ARRAY_SIZE(ops
)))
1456 if (drm_WARN_ON(&i915
->drm
,
1457 interface
== 0 && engine_mask
!= ALL_ENGINES
))
1461 s
->ops
->clean(vgpu
, engine_mask
);
1463 if (interface
== 0) {
1465 s
->virtual_submission_interface
= 0;
1467 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu
->id
);
1471 ret
= ops
[interface
]->init(vgpu
, engine_mask
);
1475 s
->ops
= ops
[interface
];
1476 s
->virtual_submission_interface
= interface
;
1479 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1480 vgpu
->id
, s
->ops
->name
);
1486 * intel_vgpu_destroy_workload - destroy a vGPU workload
1487 * @workload: workload to destroy
1489 * This function is called when destroy a vGPU workload.
1492 void intel_vgpu_destroy_workload(struct intel_vgpu_workload
*workload
)
1494 struct intel_vgpu_submission
*s
= &workload
->vgpu
->submission
;
1496 release_shadow_batch_buffer(workload
);
1497 release_shadow_wa_ctx(&workload
->wa_ctx
);
1499 if (!list_empty(&workload
->lri_shadow_mm
)) {
1500 struct intel_vgpu_mm
*m
, *mm
;
1501 list_for_each_entry_safe(m
, mm
, &workload
->lri_shadow_mm
,
1503 list_del(&m
->ppgtt_mm
.link
);
1504 intel_vgpu_mm_put(m
);
1508 GEM_BUG_ON(!list_empty(&workload
->lri_shadow_mm
));
1509 if (workload
->shadow_mm
)
1510 intel_vgpu_mm_put(workload
->shadow_mm
);
1512 kmem_cache_free(s
->workloads
, workload
);
1515 static struct intel_vgpu_workload
*
1516 alloc_workload(struct intel_vgpu
*vgpu
)
1518 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1519 struct intel_vgpu_workload
*workload
;
1521 workload
= kmem_cache_zalloc(s
->workloads
, GFP_KERNEL
);
1523 return ERR_PTR(-ENOMEM
);
1525 INIT_LIST_HEAD(&workload
->list
);
1526 INIT_LIST_HEAD(&workload
->shadow_bb
);
1527 INIT_LIST_HEAD(&workload
->lri_shadow_mm
);
1529 init_waitqueue_head(&workload
->shadow_ctx_status_wq
);
1530 atomic_set(&workload
->shadow_ctx_active
, 0);
1532 workload
->status
= -EINPROGRESS
;
1533 workload
->vgpu
= vgpu
;
1538 #define RING_CTX_OFF(x) \
1539 offsetof(struct execlist_ring_context, x)
1541 static void read_guest_pdps(struct intel_vgpu
*vgpu
,
1542 u64 ring_context_gpa
, u32 pdp
[8])
1547 gpa
= ring_context_gpa
+ RING_CTX_OFF(pdps
[0].val
);
1549 for (i
= 0; i
< 8; i
++)
1550 intel_gvt_hypervisor_read_gpa(vgpu
,
1551 gpa
+ i
* 8, &pdp
[7 - i
], 4);
1554 static int prepare_mm(struct intel_vgpu_workload
*workload
)
1556 struct execlist_ctx_descriptor_format
*desc
= &workload
->ctx_desc
;
1557 struct intel_vgpu_mm
*mm
;
1558 struct intel_vgpu
*vgpu
= workload
->vgpu
;
1559 enum intel_gvt_gtt_type root_entry_type
;
1560 u64 pdps
[GVT_RING_CTX_NR_PDPS
];
1562 switch (desc
->addressing_mode
) {
1563 case 1: /* legacy 32-bit */
1564 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
;
1566 case 3: /* legacy 64-bit */
1567 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
;
1570 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1574 read_guest_pdps(workload
->vgpu
, workload
->ring_context_gpa
, (void *)pdps
);
1576 mm
= intel_vgpu_get_ppgtt_mm(workload
->vgpu
, root_entry_type
, pdps
);
1580 workload
->shadow_mm
= mm
;
1584 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1585 ((a)->lrca == (b)->lrca))
1588 * intel_vgpu_create_workload - create a vGPU workload
1590 * @engine: the engine
1591 * @desc: a guest context descriptor
1593 * This function is called when creating a vGPU workload.
1596 * struct intel_vgpu_workload * on success, negative error code in
1597 * pointer if failed.
1600 struct intel_vgpu_workload
*
1601 intel_vgpu_create_workload(struct intel_vgpu
*vgpu
,
1602 const struct intel_engine_cs
*engine
,
1603 struct execlist_ctx_descriptor_format
*desc
)
1605 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1606 struct list_head
*q
= workload_q_head(vgpu
, engine
);
1607 struct intel_vgpu_workload
*last_workload
= NULL
;
1608 struct intel_vgpu_workload
*workload
= NULL
;
1609 u64 ring_context_gpa
;
1610 u32 head
, tail
, start
, ctl
, ctx_ctl
, per_ctx
, indirect_ctx
;
1614 ring_context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
1615 (u32
)((desc
->lrca
+ 1) << I915_GTT_PAGE_SHIFT
));
1616 if (ring_context_gpa
== INTEL_GVT_INVALID_ADDR
) {
1617 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc
->lrca
);
1618 return ERR_PTR(-EINVAL
);
1621 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1622 RING_CTX_OFF(ring_header
.val
), &head
, 4);
1624 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1625 RING_CTX_OFF(ring_tail
.val
), &tail
, 4);
1629 head
&= RB_HEAD_OFF_MASK
;
1630 tail
&= RB_TAIL_OFF_MASK
;
1632 list_for_each_entry_reverse(last_workload
, q
, list
) {
1634 if (same_context(&last_workload
->ctx_desc
, desc
)) {
1635 gvt_dbg_el("ring %s cur workload == last\n",
1637 gvt_dbg_el("ctx head %x real head %lx\n", head
,
1638 last_workload
->rb_tail
);
1640 * cannot use guest context head pointer here,
1641 * as it might not be updated at this time
1643 head
= last_workload
->rb_tail
;
1648 gvt_dbg_el("ring %s begin a new workload\n", engine
->name
);
1650 /* record some ring buffer register values for scan and shadow */
1651 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1652 RING_CTX_OFF(rb_start
.val
), &start
, 4);
1653 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1654 RING_CTX_OFF(rb_ctrl
.val
), &ctl
, 4);
1655 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1656 RING_CTX_OFF(ctx_ctrl
.val
), &ctx_ctl
, 4);
1658 if (!intel_gvt_ggtt_validate_range(vgpu
, start
,
1659 _RING_CTL_BUF_SIZE(ctl
))) {
1660 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start
);
1661 return ERR_PTR(-EINVAL
);
1664 workload
= alloc_workload(vgpu
);
1665 if (IS_ERR(workload
))
1668 workload
->engine
= engine
;
1669 workload
->ctx_desc
= *desc
;
1670 workload
->ring_context_gpa
= ring_context_gpa
;
1671 workload
->rb_head
= head
;
1672 workload
->guest_rb_head
= guest_head
;
1673 workload
->rb_tail
= tail
;
1674 workload
->rb_start
= start
;
1675 workload
->rb_ctl
= ctl
;
1677 if (engine
->id
== RCS0
) {
1678 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1679 RING_CTX_OFF(bb_per_ctx_ptr
.val
), &per_ctx
, 4);
1680 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1681 RING_CTX_OFF(rcs_indirect_ctx
.val
), &indirect_ctx
, 4);
1683 workload
->wa_ctx
.indirect_ctx
.guest_gma
=
1684 indirect_ctx
& INDIRECT_CTX_ADDR_MASK
;
1685 workload
->wa_ctx
.indirect_ctx
.size
=
1686 (indirect_ctx
& INDIRECT_CTX_SIZE_MASK
) *
1689 if (workload
->wa_ctx
.indirect_ctx
.size
!= 0) {
1690 if (!intel_gvt_ggtt_validate_range(vgpu
,
1691 workload
->wa_ctx
.indirect_ctx
.guest_gma
,
1692 workload
->wa_ctx
.indirect_ctx
.size
)) {
1693 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1694 workload
->wa_ctx
.indirect_ctx
.guest_gma
);
1695 kmem_cache_free(s
->workloads
, workload
);
1696 return ERR_PTR(-EINVAL
);
1700 workload
->wa_ctx
.per_ctx
.guest_gma
=
1701 per_ctx
& PER_CTX_ADDR_MASK
;
1702 workload
->wa_ctx
.per_ctx
.valid
= per_ctx
& 1;
1703 if (workload
->wa_ctx
.per_ctx
.valid
) {
1704 if (!intel_gvt_ggtt_validate_range(vgpu
,
1705 workload
->wa_ctx
.per_ctx
.guest_gma
,
1707 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1708 workload
->wa_ctx
.per_ctx
.guest_gma
);
1709 kmem_cache_free(s
->workloads
, workload
);
1710 return ERR_PTR(-EINVAL
);
1715 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1716 workload
, engine
->name
, head
, tail
, start
, ctl
);
1718 ret
= prepare_mm(workload
);
1720 kmem_cache_free(s
->workloads
, workload
);
1721 return ERR_PTR(ret
);
1724 /* Only scan and shadow the first workload in the queue
1725 * as there is only one pre-allocated buf-obj for shadow.
1727 if (list_empty(q
)) {
1728 intel_wakeref_t wakeref
;
1730 with_intel_runtime_pm(engine
->gt
->uncore
->rpm
, wakeref
)
1731 ret
= intel_gvt_scan_and_shadow_workload(workload
);
1735 if (vgpu_is_vm_unhealthy(ret
))
1736 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1737 intel_vgpu_destroy_workload(workload
);
1738 return ERR_PTR(ret
);
1745 * intel_vgpu_queue_workload - Qeue a vGPU workload
1746 * @workload: the workload to queue in
1748 void intel_vgpu_queue_workload(struct intel_vgpu_workload
*workload
)
1750 list_add_tail(&workload
->list
,
1751 workload_q_head(workload
->vgpu
, workload
->engine
));
1752 intel_gvt_kick_schedule(workload
->vgpu
->gvt
);
1753 wake_up(&workload
->vgpu
->gvt
->scheduler
.waitq
[workload
->engine
->id
]);