2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
29 * Tina Zhang <tina.zhang@intel.com>
30 * Min He <min.he@intel.com>
31 * Niu Bing <bing.niu@intel.com>
32 * Zhi Wang <zhi.a.wang@intel.com>
40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
44 * Zero on success, negative error code if failed
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu
*vgpu
, u64 gpa
)
48 u64 gttmmio_gpa
= *(u64
*)(vgpu_cfg_space(vgpu
) + PCI_BASE_ADDRESS_0
) &
50 return gpa
- gttmmio_gpa
;
53 #define reg_is_mmio(gvt, reg) \
54 (reg >= 0 && reg < gvt->device_info.mmio_size)
56 #define reg_is_gtt(gvt, reg) \
57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
60 static void failsafe_emulate_mmio_rw(struct intel_vgpu
*vgpu
, uint64_t pa
,
61 void *p_data
, unsigned int bytes
, bool read
)
63 struct intel_gvt
*gvt
= NULL
;
65 unsigned int offset
= 0;
71 mutex_lock(&gvt
->lock
);
72 offset
= intel_vgpu_gpa_to_mmio_offset(vgpu
, pa
);
73 if (reg_is_mmio(gvt
, offset
)) {
75 intel_vgpu_default_mmio_read(vgpu
, offset
, p_data
,
78 intel_vgpu_default_mmio_write(vgpu
, offset
, p_data
,
80 } else if (reg_is_gtt(gvt
, offset
) &&
81 vgpu
->gtt
.ggtt_mm
->virtual_page_table
) {
82 offset
-= gvt
->device_info
.gtt_start_offset
;
83 pt
= vgpu
->gtt
.ggtt_mm
->virtual_page_table
+ offset
;
85 memcpy(p_data
, pt
, bytes
);
87 memcpy(pt
, p_data
, bytes
);
89 } else if (atomic_read(&vgpu
->gtt
.n_write_protected_guest_page
)) {
90 struct intel_vgpu_guest_page
*gp
;
92 /* Since we enter the failsafe mode early during guest boot,
93 * guest may not have chance to set up its ppgtt table, so
94 * there should not be any wp pages for guest. Keep the wp
95 * related code here in case we need to handle it in furture.
97 gp
= intel_vgpu_find_guest_page(vgpu
, pa
>> PAGE_SHIFT
);
99 /* remove write protection to prevent furture traps */
100 intel_vgpu_clean_guest_page(vgpu
, gp
);
102 intel_gvt_hypervisor_read_gpa(vgpu
, pa
,
105 intel_gvt_hypervisor_write_gpa(vgpu
, pa
,
109 mutex_unlock(&gvt
->lock
);
113 * intel_vgpu_emulate_mmio_read - emulate MMIO read
115 * @pa: guest physical address
116 * @p_data: data return buffer
117 * @bytes: access data length
120 * Zero on success, negative error code if failed
122 int intel_vgpu_emulate_mmio_read(struct intel_vgpu
*vgpu
, uint64_t pa
,
123 void *p_data
, unsigned int bytes
)
125 struct intel_gvt
*gvt
= vgpu
->gvt
;
126 struct intel_gvt_mmio_info
*mmio
;
127 unsigned int offset
= 0;
131 if (vgpu
->failsafe
) {
132 failsafe_emulate_mmio_rw(vgpu
, pa
, p_data
, bytes
, true);
135 mutex_lock(&gvt
->lock
);
137 if (atomic_read(&vgpu
->gtt
.n_write_protected_guest_page
)) {
138 struct intel_vgpu_guest_page
*gp
;
140 gp
= intel_vgpu_find_guest_page(vgpu
, pa
>> PAGE_SHIFT
);
142 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, pa
,
145 gvt_vgpu_err("guest page read error %d, "
146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
147 ret
, gp
->gfn
, pa
, *(u32
*)p_data
,
150 mutex_unlock(&gvt
->lock
);
155 offset
= intel_vgpu_gpa_to_mmio_offset(vgpu
, pa
);
157 if (WARN_ON(bytes
> 8))
160 if (reg_is_gtt(gvt
, offset
)) {
161 if (WARN_ON(!IS_ALIGNED(offset
, 4) && !IS_ALIGNED(offset
, 8)))
163 if (WARN_ON(bytes
!= 4 && bytes
!= 8))
165 if (WARN_ON(!reg_is_gtt(gvt
, offset
+ bytes
- 1)))
168 ret
= intel_vgpu_emulate_gtt_mmio_read(vgpu
, offset
,
172 mutex_unlock(&gvt
->lock
);
176 if (WARN_ON_ONCE(!reg_is_mmio(gvt
, offset
))) {
177 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, pa
, p_data
, bytes
);
178 mutex_unlock(&gvt
->lock
);
182 if (WARN_ON(!reg_is_mmio(gvt
, offset
+ bytes
- 1)))
185 if (!intel_gvt_mmio_is_unalign(gvt
, offset
)) {
186 if (WARN_ON(!IS_ALIGNED(offset
, bytes
)))
190 mmio
= intel_gvt_find_mmio_info(gvt
, rounddown(offset
, 4));
192 if (!intel_gvt_mmio_is_unalign(gvt
, mmio
->offset
)) {
193 if (WARN_ON(offset
+ bytes
> mmio
->offset
+ mmio
->size
))
195 if (WARN_ON(mmio
->offset
!= offset
))
198 ret
= mmio
->read(vgpu
, offset
, p_data
, bytes
);
200 ret
= intel_vgpu_default_mmio_read(vgpu
, offset
, p_data
, bytes
);
202 if (!vgpu
->mmio
.disable_warn_untrack
) {
203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 offset
, bytes
, *(u32
*)p_data
);
206 if (offset
== 0x206c) {
207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 gvt_vgpu_err("------------------------------------------\n");
210 vgpu
->mmio
.disable_warn_untrack
= true;
218 intel_gvt_mmio_set_accessed(gvt
, offset
);
219 mutex_unlock(&gvt
->lock
);
222 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
224 mutex_unlock(&gvt
->lock
);
229 * intel_vgpu_emulate_mmio_write - emulate MMIO write
231 * @pa: guest physical address
232 * @p_data: write data buffer
233 * @bytes: access data length
236 * Zero on success, negative error code if failed
238 int intel_vgpu_emulate_mmio_write(struct intel_vgpu
*vgpu
, uint64_t pa
,
239 void *p_data
, unsigned int bytes
)
241 struct intel_gvt
*gvt
= vgpu
->gvt
;
242 struct intel_gvt_mmio_info
*mmio
;
243 unsigned int offset
= 0;
244 u32 old_vreg
= 0, old_sreg
= 0;
247 if (vgpu
->failsafe
) {
248 failsafe_emulate_mmio_rw(vgpu
, pa
, p_data
, bytes
, false);
252 mutex_lock(&gvt
->lock
);
254 if (atomic_read(&vgpu
->gtt
.n_write_protected_guest_page
)) {
255 struct intel_vgpu_guest_page
*gp
;
257 gp
= intel_vgpu_find_guest_page(vgpu
, pa
>> PAGE_SHIFT
);
259 ret
= gp
->handler(gp
, pa
, p_data
, bytes
);
261 gvt_err("guest page write error %d, "
262 "gfn 0x%lx, pa 0x%llx, "
263 "var 0x%x, len %d\n",
265 *(u32
*)p_data
, bytes
);
267 mutex_unlock(&gvt
->lock
);
272 offset
= intel_vgpu_gpa_to_mmio_offset(vgpu
, pa
);
274 if (WARN_ON(bytes
> 8))
277 if (reg_is_gtt(gvt
, offset
)) {
278 if (WARN_ON(!IS_ALIGNED(offset
, 4) && !IS_ALIGNED(offset
, 8)))
280 if (WARN_ON(bytes
!= 4 && bytes
!= 8))
282 if (WARN_ON(!reg_is_gtt(gvt
, offset
+ bytes
- 1)))
285 ret
= intel_vgpu_emulate_gtt_mmio_write(vgpu
, offset
,
289 mutex_unlock(&gvt
->lock
);
293 if (WARN_ON_ONCE(!reg_is_mmio(gvt
, offset
))) {
294 ret
= intel_gvt_hypervisor_write_gpa(vgpu
, pa
, p_data
, bytes
);
295 mutex_unlock(&gvt
->lock
);
299 mmio
= intel_gvt_find_mmio_info(gvt
, rounddown(offset
, 4));
300 if (!mmio
&& !vgpu
->mmio
.disable_warn_untrack
)
301 gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
302 vgpu
->id
, offset
, bytes
, *(u32
*)p_data
);
304 if (!intel_gvt_mmio_is_unalign(gvt
, offset
)) {
305 if (WARN_ON(!IS_ALIGNED(offset
, bytes
)))
310 u64 ro_mask
= mmio
->ro_mask
;
312 if (!intel_gvt_mmio_is_unalign(gvt
, mmio
->offset
)) {
313 if (WARN_ON(offset
+ bytes
> mmio
->offset
+ mmio
->size
))
315 if (WARN_ON(mmio
->offset
!= offset
))
319 if (intel_gvt_mmio_has_mode_mask(gvt
, mmio
->offset
)) {
320 old_vreg
= vgpu_vreg(vgpu
, offset
);
321 old_sreg
= vgpu_sreg(vgpu
, offset
);
325 ret
= mmio
->write(vgpu
, offset
, p_data
, bytes
);
327 /* Protect RO bits like HW */
330 /* all register bits are RO. */
331 if (ro_mask
== ~(u64
)0) {
332 gvt_vgpu_err("try to write RO reg %x\n",
337 /* keep the RO bits in the virtual register */
338 memcpy(&data
, p_data
, bytes
);
339 data
&= ~mmio
->ro_mask
;
340 data
|= vgpu_vreg(vgpu
, offset
) & mmio
->ro_mask
;
341 ret
= mmio
->write(vgpu
, offset
, &data
, bytes
);
344 /* higher 16bits of mode ctl regs are mask bits for change */
345 if (intel_gvt_mmio_has_mode_mask(gvt
, mmio
->offset
)) {
346 u32 mask
= vgpu_vreg(vgpu
, offset
) >> 16;
348 vgpu_vreg(vgpu
, offset
) = (old_vreg
& ~mask
)
349 | (vgpu_vreg(vgpu
, offset
) & mask
);
350 vgpu_sreg(vgpu
, offset
) = (old_sreg
& ~mask
)
351 | (vgpu_sreg(vgpu
, offset
) & mask
);
354 ret
= intel_vgpu_default_mmio_write(vgpu
, offset
, p_data
,
359 intel_gvt_mmio_set_accessed(gvt
, offset
);
360 mutex_unlock(&gvt
->lock
);
363 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset
,
365 mutex_unlock(&gvt
->lock
);
371 * intel_vgpu_reset_mmio - reset virtual MMIO space
375 void intel_vgpu_reset_mmio(struct intel_vgpu
*vgpu
)
377 struct intel_gvt
*gvt
= vgpu
->gvt
;
378 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
380 memcpy(vgpu
->mmio
.vreg
, gvt
->firmware
.mmio
, info
->mmio_size
);
381 memcpy(vgpu
->mmio
.sreg
, gvt
->firmware
.mmio
, info
->mmio_size
);
383 vgpu_vreg(vgpu
, GEN6_GT_THREAD_STATUS_REG
) = 0;
385 /* set the bit 0:2(Core C-State ) to C0 */
386 vgpu_vreg(vgpu
, GEN6_GT_CORE_STATUS
) = 0;
388 vgpu
->mmio
.disable_warn_untrack
= false;
392 * intel_vgpu_init_mmio - init MMIO space
396 * Zero on success, negative error code if failed
398 int intel_vgpu_init_mmio(struct intel_vgpu
*vgpu
)
400 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
402 vgpu
->mmio
.vreg
= vzalloc(info
->mmio_size
* 2);
403 if (!vgpu
->mmio
.vreg
)
406 vgpu
->mmio
.sreg
= vgpu
->mmio
.vreg
+ info
->mmio_size
;
408 intel_vgpu_reset_mmio(vgpu
);
414 * intel_vgpu_clean_mmio - clean MMIO space
418 void intel_vgpu_clean_mmio(struct intel_vgpu
*vgpu
)
420 vfree(vgpu
->mmio
.vreg
);
421 vgpu
->mmio
.vreg
= vgpu
->mmio
.sreg
= NULL
;