__shadow_walk_next(iterator, *iterator->sptep);
}
+/*
+ * Note: while normally KVM uses a "bool flush" return value to let
+ * the caller batch flushes, __link_shadow_page() flushes immediately
+ * before populating the parent PTE with the new shadow page. The
+ * typical callers, direct_map() and FNAME(fetch)(), are not going
+ * to zap more than one huge SPTE anyway.
+ *
+ * The only exception, where @flush can be false, is when a huge SPTE
+ * is replaced with a shadow page SPTE with a fully populated page table,
+ * which can happen from shadow_mmu_split_huge_page(). In this case,
+ * no memory is unmapped across the change to the page tables and no
+ * immediate flush is needed for correctness.
+ *
+ * Even in that case, calls to kvm_mmu_commit_zap_page() are not
+ * batched. Doing so would require adding an invalid_list argument
+ * all the way down to __walk_slot_rmaps().
+ */
static void __link_shadow_page(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache, u64 *sptep,
struct kvm_mmu_page *sp, bool flush)
parent_sp = sptep_to_sp(sptep);
WARN_ON_ONCE(parent_sp->role.level == PG_LEVEL_4K);
- mmu_page_zap_pte(kvm, parent_sp, sptep, &invalid_list);
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, true);
+ if (mmu_page_zap_pte(kvm, parent_sp, sptep, &invalid_list))
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ else if (flush)
+ kvm_flush_remote_tlbs_sptep(kvm, sptep);
}
spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));