scoped_guard(read_lock, &kvm->mmu_lock) {
if (!mmu_invalidate_retry_gfn(kvm, inv_seq, f->gfn)) {
f->valid = true;
- rc = gmap_link(mc, kvm->arch.gmap, f);
+ rc = gmap_link(mc, kvm->arch.gmap, f, slot);
kvm_release_faultin_page(kvm, f->page, !!rc, f->write_attempt);
f->page = NULL;
}
return rc;
}
-static inline bool gmap_2g_allowed(struct gmap *gmap, gfn_t gfn)
+static inline bool gmap_2g_allowed(struct gmap *gmap, struct guest_fault *f,
+ struct kvm_memory_slot *slot)
{
return false;
}
-static inline bool gmap_1m_allowed(struct gmap *gmap, gfn_t gfn)
+/**
+ * gmap_1m_allowed() - Check whether a 1M hugepage is allowed.
+ * @gmap: The gmap of the guest.
+ * @f: Describes the fault that is being resolved.
+ * @slot: The memslot the faulting address belongs to.
+ *
+ * The function checks whether the GMAP_FLAG_ALLOW_HPAGE_1M flag is set for
+ * @gmap, whether the offset of the address in the 1M virtual frame is the
+ * same as the offset in the physical 1M frame, and finally whether the whole
+ * 1M page would fit in the given memslot.
+ *
+ * Return: true if a 1M hugepage is allowed to back the faulting address, false
+ * otherwise.
+ */
+static inline bool gmap_1m_allowed(struct gmap *gmap, struct guest_fault *f,
+ struct kvm_memory_slot *slot)
{
- return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags);
+ return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags) &&
+ !((f->gfn ^ f->pfn) & ~_SEGMENT_FR_MASK) &&
+ slot->base_gfn <= ALIGN_DOWN(f->gfn, _PAGES_PER_SEGMENT) &&
+ slot->base_gfn + slot->npages >= ALIGN(f->gfn + 1, _PAGES_PER_SEGMENT);
}
-int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f)
+int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f,
+ struct kvm_memory_slot *slot)
{
unsigned int order;
int rc, level;
level = TABLE_TYPE_PAGE_TABLE;
if (f->page) {
order = folio_order(page_folio(f->page));
- if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f->gfn))
+ if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f, slot))
level = TABLE_TYPE_REGION3;
- else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f->gfn))
+ else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f, slot))
level = TABLE_TYPE_SEGMENT;
}
rc = dat_link(mc, gmap->asce, level, uses_skeys(gmap), f);
struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit);
void gmap_remove_child(struct gmap *child);
void gmap_dispose(struct gmap *gmap);
-int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault);
+int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault,
+ struct kvm_memory_slot *slot);
void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end);
int gmap_set_limit(struct gmap *gmap, gfn_t limit);
int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);