ppgtt_set_shadow_entry(spt, se, index);
return 0;
err:
- /* Cancel the existing addess mappings of DMA addr. */
+ /* Cancel the existing address mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
gvt_vgpu_err("requesting SMI service\n");
return 0;
}
- /* ignore non 0->1 trasitions */
+ /* ignore non 0->1 transitions */
if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
& SWSCI_SCI_TRIGGER) ||
!(swsci & SWSCI_SCI_TRIGGER)) {
return -ENXIO;
if (unlikely(vgpu->failsafe)) {
- /* Remove write protection to prevent furture traps. */
+ /* Remove write protection to prevent future traps. */
intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
- /* free the unsubmited workloads in the queues. */
+ /* free the unsubmitted workloads in the queues. */
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {