2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
38 #include "gem/i915_gem_context.h"
39 #include "gem/i915_gem_pm.h"
40 #include "gt/intel_context.h"
45 #define RING_CTX_OFF(x) \
46 offsetof(struct execlist_ring_context, x)
48 static void set_context_pdp_root_pointer(
49 struct execlist_ring_context
*ring_context
,
54 for (i
= 0; i
< 8; i
++)
55 ring_context
->pdps
[i
].val
= pdp
[7 - i
];
58 static void update_shadow_pdps(struct intel_vgpu_workload
*workload
)
60 struct drm_i915_gem_object
*ctx_obj
=
61 workload
->req
->hw_context
->state
->obj
;
62 struct execlist_ring_context
*shadow_ring_context
;
65 if (WARN_ON(!workload
->shadow_mm
))
68 if (WARN_ON(!atomic_read(&workload
->shadow_mm
->pincount
)))
71 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
72 shadow_ring_context
= kmap(page
);
73 set_context_pdp_root_pointer(shadow_ring_context
,
74 (void *)workload
->shadow_mm
->ppgtt_mm
.shadow_pdps
);
79 * when populating shadow ctx from guest, we should not overrride oa related
80 * registers, so that they will not be overlapped by guest oa configs. Thus
81 * made it possible to capture oa data from host for both host and guests.
83 static void sr_oa_regs(struct intel_vgpu_workload
*workload
,
84 u32
*reg_state
, bool save
)
86 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
87 u32 ctx_oactxctrl
= dev_priv
->perf
.oa
.ctx_oactxctrl_offset
;
88 u32 ctx_flexeu0
= dev_priv
->perf
.oa
.ctx_flexeu0_offset
;
91 i915_mmio_reg_offset(EU_PERF_CNTL0
),
92 i915_mmio_reg_offset(EU_PERF_CNTL1
),
93 i915_mmio_reg_offset(EU_PERF_CNTL2
),
94 i915_mmio_reg_offset(EU_PERF_CNTL3
),
95 i915_mmio_reg_offset(EU_PERF_CNTL4
),
96 i915_mmio_reg_offset(EU_PERF_CNTL5
),
97 i915_mmio_reg_offset(EU_PERF_CNTL6
),
100 if (workload
->ring_id
!= RCS0
)
104 workload
->oactxctrl
= reg_state
[ctx_oactxctrl
+ 1];
106 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
107 u32 state_offset
= ctx_flexeu0
+ i
* 2;
109 workload
->flex_mmio
[i
] = reg_state
[state_offset
+ 1];
112 reg_state
[ctx_oactxctrl
] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL
);
114 reg_state
[ctx_oactxctrl
+ 1] = workload
->oactxctrl
;
116 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
117 u32 state_offset
= ctx_flexeu0
+ i
* 2;
118 u32 mmio
= flex_mmio
[i
];
120 reg_state
[state_offset
] = mmio
;
121 reg_state
[state_offset
+ 1] = workload
->flex_mmio
[i
];
126 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
128 struct intel_vgpu
*vgpu
= workload
->vgpu
;
129 struct intel_gvt
*gvt
= vgpu
->gvt
;
130 int ring_id
= workload
->ring_id
;
131 struct drm_i915_gem_object
*ctx_obj
=
132 workload
->req
->hw_context
->state
->obj
;
133 struct execlist_ring_context
*shadow_ring_context
;
136 unsigned long context_gpa
, context_page_num
;
139 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
140 shadow_ring_context
= kmap(page
);
142 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, true);
143 #define COPY_REG(name) \
144 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
145 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
146 #define COPY_REG_MASKED(name) {\
147 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
148 + RING_CTX_OFF(name.val),\
149 &shadow_ring_context->name.val, 4);\
150 shadow_ring_context->name.val |= 0xffff << 16;\
153 COPY_REG_MASKED(ctx_ctrl
);
154 COPY_REG(ctx_timestamp
);
156 if (ring_id
== RCS0
) {
157 COPY_REG(bb_per_ctx_ptr
);
158 COPY_REG(rcs_indirect_ctx
);
159 COPY_REG(rcs_indirect_ctx_offset
);
162 #undef COPY_REG_MASKED
164 intel_gvt_hypervisor_read_gpa(vgpu
,
165 workload
->ring_context_gpa
+
166 sizeof(*shadow_ring_context
),
167 (void *)shadow_ring_context
+
168 sizeof(*shadow_ring_context
),
169 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
171 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, false);
174 if (IS_RESTORE_INHIBIT(shadow_ring_context
->ctx_ctrl
.val
))
177 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
178 workload
->ctx_desc
.lrca
);
180 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
182 context_page_num
= context_page_num
>> PAGE_SHIFT
;
184 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS0
)
185 context_page_num
= 19;
188 while (i
< context_page_num
) {
189 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
190 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
191 I915_GTT_PAGE_SHIFT
));
192 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
193 gvt_vgpu_err("Invalid guest context descriptor\n");
197 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
199 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
207 static inline bool is_gvt_request(struct i915_request
*req
)
209 return i915_gem_context_force_single_submission(req
->gem_context
);
212 static void save_ring_hw_state(struct intel_vgpu
*vgpu
, int ring_id
)
214 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
215 u32 ring_base
= dev_priv
->engine
[ring_id
]->mmio_base
;
218 reg
= RING_INSTDONE(ring_base
);
219 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
220 reg
= RING_ACTHD(ring_base
);
221 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
222 reg
= RING_ACTHD_UDW(ring_base
);
223 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
226 static int shadow_context_status_change(struct notifier_block
*nb
,
227 unsigned long action
, void *data
)
229 struct i915_request
*req
= data
;
230 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
231 shadow_ctx_notifier_block
[req
->engine
->id
]);
232 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
233 enum intel_engine_id ring_id
= req
->engine
->id
;
234 struct intel_vgpu_workload
*workload
;
237 if (!is_gvt_request(req
)) {
238 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
239 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
240 scheduler
->engine_owner
[ring_id
]) {
241 /* Switch ring from vGPU to host. */
242 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
244 scheduler
->engine_owner
[ring_id
] = NULL
;
246 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
251 workload
= scheduler
->current_workload
[ring_id
];
252 if (unlikely(!workload
))
256 case INTEL_CONTEXT_SCHEDULE_IN
:
257 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
258 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
259 /* Switch ring from host to vGPU or vGPU to vGPU. */
260 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
261 workload
->vgpu
, ring_id
);
262 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
264 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
265 ring_id
, workload
->vgpu
->id
);
266 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
267 atomic_set(&workload
->shadow_ctx_active
, 1);
269 case INTEL_CONTEXT_SCHEDULE_OUT
:
270 save_ring_hw_state(workload
->vgpu
, ring_id
);
271 atomic_set(&workload
->shadow_ctx_active
, 0);
273 case INTEL_CONTEXT_SCHEDULE_PREEMPTED
:
274 save_ring_hw_state(workload
->vgpu
, ring_id
);
280 wake_up(&workload
->shadow_ctx_status_wq
);
285 shadow_context_descriptor_update(struct intel_context
*ce
,
286 struct intel_vgpu_workload
*workload
)
288 u64 desc
= ce
->lrc_desc
;
291 * Update bits 0-11 of the context descriptor which includes flags
292 * like GEN8_CTX_* cached in desc_template
294 desc
&= U64_MAX
<< 12;
295 desc
|= ce
->gem_context
->desc_template
& ((1ULL << 12) - 1);
297 desc
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
298 desc
|= workload
->ctx_desc
.addressing_mode
<<
299 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
304 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload
*workload
)
306 struct intel_vgpu
*vgpu
= workload
->vgpu
;
307 struct i915_request
*req
= workload
->req
;
308 void *shadow_ring_buffer_va
;
312 if (IS_GEN(req
->i915
, 9) && is_inhibit_context(req
->hw_context
))
313 intel_vgpu_restore_inhibit_context(vgpu
, req
);
316 * To track whether a request has started on HW, we can emit a
317 * breadcrumb at the beginning of the request and check its
318 * timeline's HWSP to see if the breadcrumb has advanced past the
319 * start of this request. Actually, the request must have the
320 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
321 * scheduler might get a wrong state of it during reset. Since the
322 * requests from gvt always set the has_init_breadcrumb flag, here
323 * need to do the emit_init_breadcrumb for all the requests.
325 if (req
->engine
->emit_init_breadcrumb
) {
326 err
= req
->engine
->emit_init_breadcrumb(req
);
328 gvt_vgpu_err("fail to emit init breadcrumb\n");
333 /* allocate shadow ring buffer */
334 cs
= intel_ring_begin(workload
->req
, workload
->rb_len
/ sizeof(u32
));
336 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
341 shadow_ring_buffer_va
= workload
->shadow_ring_buffer_va
;
343 /* get shadow ring buffer va */
344 workload
->shadow_ring_buffer_va
= cs
;
346 memcpy(cs
, shadow_ring_buffer_va
,
349 cs
+= workload
->rb_len
/ sizeof(u32
);
350 intel_ring_advance(workload
->req
, cs
);
355 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
357 if (!wa_ctx
->indirect_ctx
.obj
)
360 i915_gem_object_unpin_map(wa_ctx
->indirect_ctx
.obj
);
361 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
363 wa_ctx
->indirect_ctx
.obj
= NULL
;
364 wa_ctx
->indirect_ctx
.shadow_va
= NULL
;
367 static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload
*workload
,
368 struct i915_gem_context
*ctx
)
370 struct intel_vgpu_mm
*mm
= workload
->shadow_mm
;
371 struct i915_ppgtt
*ppgtt
= i915_vm_to_ppgtt(ctx
->vm
);
374 if (mm
->type
!= INTEL_GVT_MM_PPGTT
|| !mm
->ppgtt_mm
.shadowed
)
377 if (mm
->ppgtt_mm
.root_entry_type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
378 px_dma(ppgtt
->pd
) = mm
->ppgtt_mm
.shadow_pdps
[0];
380 for (i
= 0; i
< GVT_RING_CTX_NR_PDPS
; i
++) {
381 struct i915_page_directory
* const pd
=
382 i915_pd_entry(ppgtt
->pd
, i
);
384 px_dma(pd
) = mm
->ppgtt_mm
.shadow_pdps
[i
];
392 intel_gvt_workload_req_alloc(struct intel_vgpu_workload
*workload
)
394 struct intel_vgpu
*vgpu
= workload
->vgpu
;
395 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
396 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
397 struct i915_request
*rq
;
399 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
404 rq
= i915_request_create(s
->shadow
[workload
->ring_id
]);
406 gvt_vgpu_err("fail to allocate gem request\n");
410 workload
->req
= i915_request_get(rq
);
415 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
416 * shadow it as well, include ringbuffer,wa_ctx and ctx.
417 * @workload: an abstract entity for each execlist submission.
419 * This function is called before the workload submitting to i915, to make
420 * sure the content of the workload is valid.
422 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
424 struct intel_vgpu
*vgpu
= workload
->vgpu
;
425 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
426 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
429 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
431 if (workload
->shadow
)
434 if (!test_and_set_bit(workload
->ring_id
, s
->shadow_ctx_desc_updated
))
435 shadow_context_descriptor_update(s
->shadow
[workload
->ring_id
],
438 ret
= intel_gvt_scan_and_shadow_ringbuffer(workload
);
442 if (workload
->ring_id
== RCS0
&& workload
->wa_ctx
.indirect_ctx
.size
) {
443 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
448 workload
->shadow
= true;
451 release_shadow_wa_ctx(&workload
->wa_ctx
);
455 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
);
457 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
459 struct intel_gvt
*gvt
= workload
->vgpu
->gvt
;
460 const int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
461 struct intel_vgpu_shadow_bb
*bb
;
464 list_for_each_entry(bb
, &workload
->shadow_bb
, list
) {
465 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
466 * is only updated into ring_scan_buffer, not real ring address
467 * allocated in later copy_workload_to_ring_buffer. pls be noted
468 * shadow_ring_buffer_va is now pointed to real ring buffer va
469 * in copy_workload_to_ring_buffer.
473 bb
->bb_start_cmd_va
= workload
->shadow_ring_buffer_va
477 /* for non-priv bb, scan&shadow is only for
478 * debugging purpose, so the content of shadow bb
479 * is the same as original bb. Therefore,
480 * here, rather than switch to shadow bb's gma
481 * address, we directly use original batch buffer's
482 * gma address, and send original bb to hardware
485 if (bb
->clflush
& CLFLUSH_AFTER
) {
486 drm_clflush_virt_range(bb
->va
,
488 bb
->clflush
&= ~CLFLUSH_AFTER
;
490 i915_gem_object_finish_access(bb
->obj
);
491 bb
->accessing
= false;
494 bb
->vma
= i915_gem_object_ggtt_pin(bb
->obj
,
496 if (IS_ERR(bb
->vma
)) {
497 ret
= PTR_ERR(bb
->vma
);
501 /* relocate shadow batch buffer */
502 bb
->bb_start_cmd_va
[1] = i915_ggtt_offset(bb
->vma
);
503 if (gmadr_bytes
== 8)
504 bb
->bb_start_cmd_va
[2] = 0;
506 /* No one is going to touch shadow bb from now on. */
507 if (bb
->clflush
& CLFLUSH_AFTER
) {
508 drm_clflush_virt_range(bb
->va
,
510 bb
->clflush
&= ~CLFLUSH_AFTER
;
513 ret
= i915_gem_object_set_to_gtt_domain(bb
->obj
,
518 ret
= i915_vma_move_to_active(bb
->vma
,
524 i915_gem_object_finish_access(bb
->obj
);
525 bb
->accessing
= false;
530 release_shadow_batch_buffer(workload
);
534 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
536 struct intel_vgpu_workload
*workload
=
537 container_of(wa_ctx
, struct intel_vgpu_workload
, wa_ctx
);
538 struct i915_request
*rq
= workload
->req
;
539 struct execlist_ring_context
*shadow_ring_context
=
540 (struct execlist_ring_context
*)rq
->hw_context
->lrc_reg_state
;
542 shadow_ring_context
->bb_per_ctx_ptr
.val
=
543 (shadow_ring_context
->bb_per_ctx_ptr
.val
&
544 (~PER_CTX_ADDR_MASK
)) | wa_ctx
->per_ctx
.shadow_gma
;
545 shadow_ring_context
->rcs_indirect_ctx
.val
=
546 (shadow_ring_context
->rcs_indirect_ctx
.val
&
547 (~INDIRECT_CTX_ADDR_MASK
)) | wa_ctx
->indirect_ctx
.shadow_gma
;
550 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
552 struct i915_vma
*vma
;
553 unsigned char *per_ctx_va
=
554 (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
555 wa_ctx
->indirect_ctx
.size
;
557 if (wa_ctx
->indirect_ctx
.size
== 0)
560 vma
= i915_gem_object_ggtt_pin(wa_ctx
->indirect_ctx
.obj
, NULL
,
561 0, CACHELINE_BYTES
, 0);
565 /* FIXME: we are not tracking our pinned VMA leaving it
566 * up to the core to fix up the stray pin_count upon
570 wa_ctx
->indirect_ctx
.shadow_gma
= i915_ggtt_offset(vma
);
572 wa_ctx
->per_ctx
.shadow_gma
= *((unsigned int *)per_ctx_va
+ 1);
573 memset(per_ctx_va
, 0, CACHELINE_BYTES
);
575 update_wa_ctx_2_shadow_ctx(wa_ctx
);
579 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
581 struct intel_vgpu
*vgpu
= workload
->vgpu
;
582 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
583 struct intel_vgpu_shadow_bb
*bb
, *pos
;
585 if (list_empty(&workload
->shadow_bb
))
588 bb
= list_first_entry(&workload
->shadow_bb
,
589 struct intel_vgpu_shadow_bb
, list
);
591 mutex_lock(&dev_priv
->drm
.struct_mutex
);
593 list_for_each_entry_safe(bb
, pos
, &workload
->shadow_bb
, list
) {
596 i915_gem_object_finish_access(bb
->obj
);
598 if (bb
->va
&& !IS_ERR(bb
->va
))
599 i915_gem_object_unpin_map(bb
->obj
);
601 if (bb
->vma
&& !IS_ERR(bb
->vma
)) {
602 i915_vma_unpin(bb
->vma
);
603 i915_vma_close(bb
->vma
);
605 i915_gem_object_put(bb
->obj
);
611 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
614 static int prepare_workload(struct intel_vgpu_workload
*workload
)
616 struct intel_vgpu
*vgpu
= workload
->vgpu
;
619 ret
= intel_vgpu_pin_mm(workload
->shadow_mm
);
621 gvt_vgpu_err("fail to vgpu pin mm\n");
625 update_shadow_pdps(workload
);
627 ret
= intel_vgpu_sync_oos_pages(workload
->vgpu
);
629 gvt_vgpu_err("fail to vgpu sync oos pages\n");
633 ret
= intel_vgpu_flush_post_shadow(workload
->vgpu
);
635 gvt_vgpu_err("fail to flush post shadow\n");
639 ret
= copy_workload_to_ring_buffer(workload
);
641 gvt_vgpu_err("fail to generate request\n");
645 ret
= prepare_shadow_batch_buffer(workload
);
647 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
651 ret
= prepare_shadow_wa_ctx(&workload
->wa_ctx
);
653 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
654 goto err_shadow_batch
;
657 if (workload
->prepare
) {
658 ret
= workload
->prepare(workload
);
660 goto err_shadow_wa_ctx
;
665 release_shadow_wa_ctx(&workload
->wa_ctx
);
667 release_shadow_batch_buffer(workload
);
669 intel_vgpu_unpin_mm(workload
->shadow_mm
);
673 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
675 struct intel_vgpu
*vgpu
= workload
->vgpu
;
676 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
677 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
678 struct i915_request
*rq
;
679 int ring_id
= workload
->ring_id
;
682 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
685 mutex_lock(&vgpu
->vgpu_lock
);
686 mutex_lock(&dev_priv
->drm
.struct_mutex
);
688 ret
= set_context_ppgtt_from_shadow(workload
,
689 s
->shadow
[ring_id
]->gem_context
);
691 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
695 ret
= intel_gvt_workload_req_alloc(workload
);
699 ret
= intel_gvt_scan_and_shadow_workload(workload
);
703 ret
= populate_shadow_context(workload
);
705 release_shadow_wa_ctx(&workload
->wa_ctx
);
709 ret
= prepare_workload(workload
);
712 /* We might still need to add request with
713 * clean ctx to retire it properly..
715 rq
= fetch_and_zero(&workload
->req
);
716 i915_request_put(rq
);
719 if (!IS_ERR_OR_NULL(workload
->req
)) {
720 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
721 ring_id
, workload
->req
);
722 i915_request_add(workload
->req
);
723 workload
->dispatched
= true;
727 workload
->status
= ret
;
728 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
729 mutex_unlock(&vgpu
->vgpu_lock
);
733 static struct intel_vgpu_workload
*pick_next_workload(
734 struct intel_gvt
*gvt
, int ring_id
)
736 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
737 struct intel_vgpu_workload
*workload
= NULL
;
739 mutex_lock(&gvt
->sched_lock
);
742 * no current vgpu / will be scheduled out / no workload
745 if (!scheduler
->current_vgpu
) {
746 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
750 if (scheduler
->need_reschedule
) {
751 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
755 if (!scheduler
->current_vgpu
->active
||
756 list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
)))
760 * still have current workload, maybe the workload disptacher
761 * fail to submit it for some reason, resubmit it.
763 if (scheduler
->current_workload
[ring_id
]) {
764 workload
= scheduler
->current_workload
[ring_id
];
765 gvt_dbg_sched("ring id %d still have current workload %p\n",
771 * pick a workload as current workload
772 * once current workload is set, schedule policy routines
773 * will wait the current workload is finished when trying to
774 * schedule out a vgpu.
776 scheduler
->current_workload
[ring_id
] = container_of(
777 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
778 struct intel_vgpu_workload
, list
);
780 workload
= scheduler
->current_workload
[ring_id
];
782 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
784 atomic_inc(&workload
->vgpu
->submission
.running_workload_num
);
786 mutex_unlock(&gvt
->sched_lock
);
790 static void update_guest_context(struct intel_vgpu_workload
*workload
)
792 struct i915_request
*rq
= workload
->req
;
793 struct intel_vgpu
*vgpu
= workload
->vgpu
;
794 struct intel_gvt
*gvt
= vgpu
->gvt
;
795 struct drm_i915_gem_object
*ctx_obj
= rq
->hw_context
->state
->obj
;
796 struct execlist_ring_context
*shadow_ring_context
;
799 unsigned long context_gpa
, context_page_num
;
801 struct drm_i915_private
*dev_priv
= gvt
->dev_priv
;
806 gvt_dbg_sched("ring id %d workload lrca %x\n", rq
->engine
->id
,
807 workload
->ctx_desc
.lrca
);
809 head
= workload
->rb_head
;
810 tail
= workload
->rb_tail
;
811 wrap_count
= workload
->guest_rb_head
>> RB_HEAD_WRAP_CNT_OFF
;
814 if (wrap_count
== RB_HEAD_WRAP_CNT_MAX
)
820 head
= (wrap_count
<< RB_HEAD_WRAP_CNT_OFF
) | tail
;
822 ring_base
= dev_priv
->engine
[workload
->ring_id
]->mmio_base
;
823 vgpu_vreg_t(vgpu
, RING_TAIL(ring_base
)) = tail
;
824 vgpu_vreg_t(vgpu
, RING_HEAD(ring_base
)) = head
;
826 context_page_num
= rq
->engine
->context_size
;
827 context_page_num
= context_page_num
>> PAGE_SHIFT
;
829 if (IS_BROADWELL(gvt
->dev_priv
) && rq
->engine
->id
== RCS0
)
830 context_page_num
= 19;
834 while (i
< context_page_num
) {
835 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
836 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
837 I915_GTT_PAGE_SHIFT
));
838 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
839 gvt_vgpu_err("invalid guest context descriptor\n");
843 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
845 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
851 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
852 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
854 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
855 shadow_ring_context
= kmap(page
);
857 #define COPY_REG(name) \
858 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
859 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
862 COPY_REG(ctx_timestamp
);
866 intel_gvt_hypervisor_write_gpa(vgpu
,
867 workload
->ring_context_gpa
+
868 sizeof(*shadow_ring_context
),
869 (void *)shadow_ring_context
+
870 sizeof(*shadow_ring_context
),
871 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
876 void intel_vgpu_clean_workloads(struct intel_vgpu
*vgpu
,
877 intel_engine_mask_t engine_mask
)
879 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
880 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
881 struct intel_engine_cs
*engine
;
882 struct intel_vgpu_workload
*pos
, *n
;
883 intel_engine_mask_t tmp
;
885 /* free the unsubmited workloads in the queues. */
886 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
) {
887 list_for_each_entry_safe(pos
, n
,
888 &s
->workload_q_head
[engine
->id
], list
) {
889 list_del_init(&pos
->list
);
890 intel_vgpu_destroy_workload(pos
);
892 clear_bit(engine
->id
, s
->shadow_ctx_desc_updated
);
896 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
898 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
899 struct intel_vgpu_workload
*workload
=
900 scheduler
->current_workload
[ring_id
];
901 struct intel_vgpu
*vgpu
= workload
->vgpu
;
902 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
903 struct i915_request
*rq
= workload
->req
;
906 mutex_lock(&vgpu
->vgpu_lock
);
907 mutex_lock(&gvt
->sched_lock
);
909 /* For the workload w/ request, needs to wait for the context
910 * switch to make sure request is completed.
911 * For the workload w/o request, directly complete the workload.
914 wait_event(workload
->shadow_ctx_status_wq
,
915 !atomic_read(&workload
->shadow_ctx_active
));
917 /* If this request caused GPU hang, req->fence.error will
918 * be set to -EIO. Use -EIO to set workload status so
919 * that when this request caused GPU hang, didn't trigger
920 * context switch interrupt to guest.
922 if (likely(workload
->status
== -EINPROGRESS
)) {
923 if (workload
->req
->fence
.error
== -EIO
)
924 workload
->status
= -EIO
;
926 workload
->status
= 0;
929 if (!workload
->status
&&
930 !(vgpu
->resetting_eng
& BIT(ring_id
))) {
931 update_guest_context(workload
);
933 for_each_set_bit(event
, workload
->pending_events
,
935 intel_vgpu_trigger_virtual_event(vgpu
, event
);
938 i915_request_put(fetch_and_zero(&workload
->req
));
941 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
942 ring_id
, workload
, workload
->status
);
944 scheduler
->current_workload
[ring_id
] = NULL
;
946 list_del_init(&workload
->list
);
948 if (workload
->status
|| vgpu
->resetting_eng
& BIT(ring_id
)) {
949 /* if workload->status is not successful means HW GPU
950 * has occurred GPU hang or something wrong with i915/GVT,
951 * and GVT won't inject context switch interrupt to guest.
952 * So this error is a vGPU hang actually to the guest.
953 * According to this we should emunlate a vGPU hang. If
954 * there are pending workloads which are already submitted
955 * from guest, we should clean them up like HW GPU does.
957 * if it is in middle of engine resetting, the pending
958 * workloads won't be submitted to HW GPU and will be
959 * cleaned up during the resetting process later, so doing
960 * the workload clean up here doesn't have any impact.
962 intel_vgpu_clean_workloads(vgpu
, BIT(ring_id
));
965 workload
->complete(workload
);
967 atomic_dec(&s
->running_workload_num
);
968 wake_up(&scheduler
->workload_complete_wq
);
970 if (gvt
->scheduler
.need_reschedule
)
971 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
973 mutex_unlock(&gvt
->sched_lock
);
974 mutex_unlock(&vgpu
->vgpu_lock
);
977 struct workload_thread_param
{
978 struct intel_gvt
*gvt
;
982 static int workload_thread(void *priv
)
984 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
985 struct intel_gvt
*gvt
= p
->gvt
;
986 int ring_id
= p
->ring_id
;
987 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
988 struct intel_vgpu_workload
*workload
= NULL
;
989 struct intel_vgpu
*vgpu
= NULL
;
991 bool need_force_wake
= (INTEL_GEN(gvt
->dev_priv
) >= 9);
992 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
996 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
998 while (!kthread_should_stop()) {
999 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
1001 workload
= pick_next_workload(gvt
, ring_id
);
1004 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
1005 MAX_SCHEDULE_TIMEOUT
);
1006 } while (!kthread_should_stop());
1007 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
1012 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
1013 workload
->ring_id
, workload
,
1014 workload
->vgpu
->id
);
1016 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
1017 workload
->ring_id
, workload
);
1019 if (need_force_wake
)
1020 intel_uncore_forcewake_get(&gvt
->dev_priv
->uncore
,
1023 ret
= dispatch_workload(workload
);
1026 vgpu
= workload
->vgpu
;
1027 gvt_vgpu_err("fail to dispatch workload, skip\n");
1031 gvt_dbg_sched("ring id %d wait workload %p\n",
1032 workload
->ring_id
, workload
);
1033 i915_request_wait(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
1036 gvt_dbg_sched("will complete workload %p, status: %d\n",
1037 workload
, workload
->status
);
1039 complete_current_workload(gvt
, ring_id
);
1041 if (need_force_wake
)
1042 intel_uncore_forcewake_put(&gvt
->dev_priv
->uncore
,
1045 if (ret
&& (vgpu_is_vm_unhealthy(ret
)))
1046 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1051 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
1053 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1054 struct intel_gvt
*gvt
= vgpu
->gvt
;
1055 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1057 if (atomic_read(&s
->running_workload_num
)) {
1058 gvt_dbg_sched("wait vgpu idle\n");
1060 wait_event(scheduler
->workload_complete_wq
,
1061 !atomic_read(&s
->running_workload_num
));
1065 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
1067 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1068 struct intel_engine_cs
*engine
;
1069 enum intel_engine_id i
;
1071 gvt_dbg_core("clean workload scheduler\n");
1073 for_each_engine(engine
, gvt
->dev_priv
, i
) {
1074 atomic_notifier_chain_unregister(
1075 &engine
->context_status_notifier
,
1076 &gvt
->shadow_ctx_notifier_block
[i
]);
1077 kthread_stop(scheduler
->thread
[i
]);
1081 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
1083 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1084 struct workload_thread_param
*param
= NULL
;
1085 struct intel_engine_cs
*engine
;
1086 enum intel_engine_id i
;
1089 gvt_dbg_core("init workload scheduler\n");
1091 init_waitqueue_head(&scheduler
->workload_complete_wq
);
1093 for_each_engine(engine
, gvt
->dev_priv
, i
) {
1094 init_waitqueue_head(&scheduler
->waitq
[i
]);
1096 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
1105 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
1106 "gvt workload %d", i
);
1107 if (IS_ERR(scheduler
->thread
[i
])) {
1108 gvt_err("fail to create workload thread\n");
1109 ret
= PTR_ERR(scheduler
->thread
[i
]);
1113 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
1114 shadow_context_status_change
;
1115 atomic_notifier_chain_register(&engine
->context_status_notifier
,
1116 &gvt
->shadow_ctx_notifier_block
[i
]);
1120 intel_gvt_clean_workload_scheduler(gvt
);
1127 i915_context_ppgtt_root_restore(struct intel_vgpu_submission
*s
,
1128 struct i915_ppgtt
*ppgtt
)
1132 if (i915_vm_is_4lvl(&ppgtt
->vm
)) {
1133 px_dma(ppgtt
->pd
) = s
->i915_context_pml4
;
1135 for (i
= 0; i
< GEN8_3LVL_PDPES
; i
++) {
1136 struct i915_page_directory
* const pd
=
1137 i915_pd_entry(ppgtt
->pd
, i
);
1139 px_dma(pd
) = s
->i915_context_pdps
[i
];
1145 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1148 * This function is called when a vGPU is being destroyed.
1151 void intel_vgpu_clean_submission(struct intel_vgpu
*vgpu
)
1153 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1154 struct intel_engine_cs
*engine
;
1155 enum intel_engine_id id
;
1157 intel_vgpu_select_submission_ops(vgpu
, ALL_ENGINES
, 0);
1159 i915_context_ppgtt_root_restore(s
, i915_vm_to_ppgtt(s
->shadow
[0]->gem_context
->vm
));
1160 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, id
)
1161 intel_context_unpin(s
->shadow
[id
]);
1163 kmem_cache_destroy(s
->workloads
);
1168 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1170 * @engine_mask: engines expected to be reset
1172 * This function is called when a vGPU is being destroyed.
1175 void intel_vgpu_reset_submission(struct intel_vgpu
*vgpu
,
1176 intel_engine_mask_t engine_mask
)
1178 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1183 intel_vgpu_clean_workloads(vgpu
, engine_mask
);
1184 s
->ops
->reset(vgpu
, engine_mask
);
1188 i915_context_ppgtt_root_save(struct intel_vgpu_submission
*s
,
1189 struct i915_ppgtt
*ppgtt
)
1193 if (i915_vm_is_4lvl(&ppgtt
->vm
)) {
1194 s
->i915_context_pml4
= px_dma(ppgtt
->pd
);
1196 for (i
= 0; i
< GEN8_3LVL_PDPES
; i
++) {
1197 struct i915_page_directory
* const pd
=
1198 i915_pd_entry(ppgtt
->pd
, i
);
1200 s
->i915_context_pdps
[i
] = px_dma(pd
);
1206 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1209 * This function is called when a vGPU is being created.
1212 * Zero on success, negative error code if failed.
1215 int intel_vgpu_setup_submission(struct intel_vgpu
*vgpu
)
1217 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1218 struct intel_engine_cs
*engine
;
1219 struct i915_gem_context
*ctx
;
1220 enum intel_engine_id i
;
1223 ctx
= i915_gem_context_create_gvt(&vgpu
->gvt
->dev_priv
->drm
);
1225 return PTR_ERR(ctx
);
1227 i915_context_ppgtt_root_save(s
, i915_vm_to_ppgtt(ctx
->vm
));
1229 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
1230 struct intel_context
*ce
;
1232 INIT_LIST_HEAD(&s
->workload_q_head
[i
]);
1233 s
->shadow
[i
] = ERR_PTR(-EINVAL
);
1235 ce
= i915_gem_context_get_engine(ctx
, i
);
1238 goto out_shadow_ctx
;
1241 ret
= intel_context_pin(ce
);
1242 intel_context_put(ce
);
1244 goto out_shadow_ctx
;
1249 bitmap_zero(s
->shadow_ctx_desc_updated
, I915_NUM_ENGINES
);
1251 s
->workloads
= kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1252 sizeof(struct intel_vgpu_workload
), 0,
1254 offsetof(struct intel_vgpu_workload
, rb_tail
),
1255 sizeof_field(struct intel_vgpu_workload
, rb_tail
),
1258 if (!s
->workloads
) {
1260 goto out_shadow_ctx
;
1263 atomic_set(&s
->running_workload_num
, 0);
1264 bitmap_zero(s
->tlb_handle_pending
, I915_NUM_ENGINES
);
1266 i915_gem_context_put(ctx
);
1270 i915_context_ppgtt_root_restore(s
, i915_vm_to_ppgtt(ctx
->vm
));
1271 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
1272 if (IS_ERR(s
->shadow
[i
]))
1275 intel_context_unpin(s
->shadow
[i
]);
1277 i915_gem_context_put(ctx
);
1282 * intel_vgpu_select_submission_ops - select virtual submission interface
1284 * @engine_mask: either ALL_ENGINES or target engine mask
1285 * @interface: expected vGPU virtual submission interface
1287 * This function is called when guest configures submission interface.
1290 * Zero on success, negative error code if failed.
1293 int intel_vgpu_select_submission_ops(struct intel_vgpu
*vgpu
,
1294 intel_engine_mask_t engine_mask
,
1295 unsigned int interface
)
1297 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1298 const struct intel_vgpu_submission_ops
*ops
[] = {
1299 [INTEL_VGPU_EXECLIST_SUBMISSION
] =
1300 &intel_vgpu_execlist_submission_ops
,
1304 if (WARN_ON(interface
>= ARRAY_SIZE(ops
)))
1307 if (WARN_ON(interface
== 0 && engine_mask
!= ALL_ENGINES
))
1311 s
->ops
->clean(vgpu
, engine_mask
);
1313 if (interface
== 0) {
1315 s
->virtual_submission_interface
= 0;
1317 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu
->id
);
1321 ret
= ops
[interface
]->init(vgpu
, engine_mask
);
1325 s
->ops
= ops
[interface
];
1326 s
->virtual_submission_interface
= interface
;
1329 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1330 vgpu
->id
, s
->ops
->name
);
1336 * intel_vgpu_destroy_workload - destroy a vGPU workload
1337 * @workload: workload to destroy
1339 * This function is called when destroy a vGPU workload.
1342 void intel_vgpu_destroy_workload(struct intel_vgpu_workload
*workload
)
1344 struct intel_vgpu_submission
*s
= &workload
->vgpu
->submission
;
1346 release_shadow_batch_buffer(workload
);
1347 release_shadow_wa_ctx(&workload
->wa_ctx
);
1349 if (workload
->shadow_mm
)
1350 intel_vgpu_mm_put(workload
->shadow_mm
);
1352 kmem_cache_free(s
->workloads
, workload
);
1355 static struct intel_vgpu_workload
*
1356 alloc_workload(struct intel_vgpu
*vgpu
)
1358 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1359 struct intel_vgpu_workload
*workload
;
1361 workload
= kmem_cache_zalloc(s
->workloads
, GFP_KERNEL
);
1363 return ERR_PTR(-ENOMEM
);
1365 INIT_LIST_HEAD(&workload
->list
);
1366 INIT_LIST_HEAD(&workload
->shadow_bb
);
1368 init_waitqueue_head(&workload
->shadow_ctx_status_wq
);
1369 atomic_set(&workload
->shadow_ctx_active
, 0);
1371 workload
->status
= -EINPROGRESS
;
1372 workload
->vgpu
= vgpu
;
1377 #define RING_CTX_OFF(x) \
1378 offsetof(struct execlist_ring_context, x)
1380 static void read_guest_pdps(struct intel_vgpu
*vgpu
,
1381 u64 ring_context_gpa
, u32 pdp
[8])
1386 gpa
= ring_context_gpa
+ RING_CTX_OFF(pdps
[0].val
);
1388 for (i
= 0; i
< 8; i
++)
1389 intel_gvt_hypervisor_read_gpa(vgpu
,
1390 gpa
+ i
* 8, &pdp
[7 - i
], 4);
1393 static int prepare_mm(struct intel_vgpu_workload
*workload
)
1395 struct execlist_ctx_descriptor_format
*desc
= &workload
->ctx_desc
;
1396 struct intel_vgpu_mm
*mm
;
1397 struct intel_vgpu
*vgpu
= workload
->vgpu
;
1398 enum intel_gvt_gtt_type root_entry_type
;
1399 u64 pdps
[GVT_RING_CTX_NR_PDPS
];
1401 switch (desc
->addressing_mode
) {
1402 case 1: /* legacy 32-bit */
1403 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
;
1405 case 3: /* legacy 64-bit */
1406 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
;
1409 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1413 read_guest_pdps(workload
->vgpu
, workload
->ring_context_gpa
, (void *)pdps
);
1415 mm
= intel_vgpu_get_ppgtt_mm(workload
->vgpu
, root_entry_type
, pdps
);
1419 workload
->shadow_mm
= mm
;
1423 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1424 ((a)->lrca == (b)->lrca))
1426 #define get_last_workload(q) \
1427 (list_empty(q) ? NULL : container_of(q->prev, \
1428 struct intel_vgpu_workload, list))
1430 * intel_vgpu_create_workload - create a vGPU workload
1432 * @ring_id: ring index
1433 * @desc: a guest context descriptor
1435 * This function is called when creating a vGPU workload.
1438 * struct intel_vgpu_workload * on success, negative error code in
1439 * pointer if failed.
1442 struct intel_vgpu_workload
*
1443 intel_vgpu_create_workload(struct intel_vgpu
*vgpu
, int ring_id
,
1444 struct execlist_ctx_descriptor_format
*desc
)
1446 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1447 struct list_head
*q
= workload_q_head(vgpu
, ring_id
);
1448 struct intel_vgpu_workload
*last_workload
= get_last_workload(q
);
1449 struct intel_vgpu_workload
*workload
= NULL
;
1450 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
1451 u64 ring_context_gpa
;
1452 u32 head
, tail
, start
, ctl
, ctx_ctl
, per_ctx
, indirect_ctx
;
1456 ring_context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
1457 (u32
)((desc
->lrca
+ 1) << I915_GTT_PAGE_SHIFT
));
1458 if (ring_context_gpa
== INTEL_GVT_INVALID_ADDR
) {
1459 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc
->lrca
);
1460 return ERR_PTR(-EINVAL
);
1463 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1464 RING_CTX_OFF(ring_header
.val
), &head
, 4);
1466 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1467 RING_CTX_OFF(ring_tail
.val
), &tail
, 4);
1471 head
&= RB_HEAD_OFF_MASK
;
1472 tail
&= RB_TAIL_OFF_MASK
;
1474 if (last_workload
&& same_context(&last_workload
->ctx_desc
, desc
)) {
1475 gvt_dbg_el("ring id %d cur workload == last\n", ring_id
);
1476 gvt_dbg_el("ctx head %x real head %lx\n", head
,
1477 last_workload
->rb_tail
);
1479 * cannot use guest context head pointer here,
1480 * as it might not be updated at this time
1482 head
= last_workload
->rb_tail
;
1485 gvt_dbg_el("ring id %d begin a new workload\n", ring_id
);
1487 /* record some ring buffer register values for scan and shadow */
1488 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1489 RING_CTX_OFF(rb_start
.val
), &start
, 4);
1490 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1491 RING_CTX_OFF(rb_ctrl
.val
), &ctl
, 4);
1492 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1493 RING_CTX_OFF(ctx_ctrl
.val
), &ctx_ctl
, 4);
1495 workload
= alloc_workload(vgpu
);
1496 if (IS_ERR(workload
))
1499 workload
->ring_id
= ring_id
;
1500 workload
->ctx_desc
= *desc
;
1501 workload
->ring_context_gpa
= ring_context_gpa
;
1502 workload
->rb_head
= head
;
1503 workload
->guest_rb_head
= guest_head
;
1504 workload
->rb_tail
= tail
;
1505 workload
->rb_start
= start
;
1506 workload
->rb_ctl
= ctl
;
1508 if (ring_id
== RCS0
) {
1509 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1510 RING_CTX_OFF(bb_per_ctx_ptr
.val
), &per_ctx
, 4);
1511 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1512 RING_CTX_OFF(rcs_indirect_ctx
.val
), &indirect_ctx
, 4);
1514 workload
->wa_ctx
.indirect_ctx
.guest_gma
=
1515 indirect_ctx
& INDIRECT_CTX_ADDR_MASK
;
1516 workload
->wa_ctx
.indirect_ctx
.size
=
1517 (indirect_ctx
& INDIRECT_CTX_SIZE_MASK
) *
1519 workload
->wa_ctx
.per_ctx
.guest_gma
=
1520 per_ctx
& PER_CTX_ADDR_MASK
;
1521 workload
->wa_ctx
.per_ctx
.valid
= per_ctx
& 1;
1524 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1525 workload
, ring_id
, head
, tail
, start
, ctl
);
1527 ret
= prepare_mm(workload
);
1529 kmem_cache_free(s
->workloads
, workload
);
1530 return ERR_PTR(ret
);
1533 /* Only scan and shadow the first workload in the queue
1534 * as there is only one pre-allocated buf-obj for shadow.
1536 if (list_empty(workload_q_head(vgpu
, ring_id
))) {
1537 intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1538 mutex_lock(&dev_priv
->drm
.struct_mutex
);
1539 ret
= intel_gvt_scan_and_shadow_workload(workload
);
1540 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1541 intel_runtime_pm_put_unchecked(&dev_priv
->runtime_pm
);
1545 if (vgpu_is_vm_unhealthy(ret
))
1546 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1547 intel_vgpu_destroy_workload(workload
);
1548 return ERR_PTR(ret
);
1555 * intel_vgpu_queue_workload - Qeue a vGPU workload
1556 * @workload: the workload to queue in
1558 void intel_vgpu_queue_workload(struct intel_vgpu_workload
*workload
)
1560 list_add_tail(&workload
->list
,
1561 workload_q_head(workload
->vgpu
, workload
->ring_id
));
1562 intel_gvt_kick_schedule(workload
->vgpu
->gvt
);
1563 wake_up(&workload
->vgpu
->gvt
->scheduler
.waitq
[workload
->ring_id
]);