From: Claudio Imbrenda Date: Thu, 26 Mar 2026 13:17:14 +0000 (+0100) Subject: KVM: s390: Correctly handle guest mappings without struct page X-Git-Tag: v7.0-rc6~5^2~2^2~5 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0f2b760a17126cb7940d410c99edfa14e928554c;p=thirdparty%2Flinux.git KVM: s390: Correctly handle guest mappings without struct page Introduce a new special softbit for large pages, like already presend for normal pages, and use it to mark guest mappings that do not have struct pages. Whenever a leaf DAT entry becomes dirty, check the special softbit and only call SetPageDirty() if there is an actual struct page. Move the logic to mark pages dirty inside _gmap_ptep_xchg() and _gmap_crstep_xchg_atomic(), to avoid needlessly duplicating the code. Fixes: 5a74e3d93417 ("KVM: s390: KVM-specific bitfields and helper functions") Fixes: a2c17f9270cc ("KVM: s390: New gmap code") Reviewed-by: Christian Borntraeger Signed-off-by: Claudio Imbrenda --- diff --git a/arch/s390/kvm/dat.h b/arch/s390/kvm/dat.h index efedcf96110c..874cc962e196 100644 --- a/arch/s390/kvm/dat.h +++ b/arch/s390/kvm/dat.h @@ -160,14 +160,14 @@ union pmd { unsigned long :44; /* HW */ unsigned long : 3; /* Unused */ unsigned long : 1; /* HW */ + unsigned long s : 1; /* Special */ unsigned long w : 1; /* Writable soft-bit */ unsigned long r : 1; /* Readable soft-bit */ unsigned long d : 1; /* Dirty */ unsigned long y : 1; /* Young */ - unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long : 3; /* HW */ + unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long vsie_notif : 1; /* Referenced in a shadow table */ - unsigned long : 1; /* Unused */ unsigned long : 4; /* HW */ unsigned long sd : 1; /* Soft-Dirty */ unsigned long pr : 1; /* Present */ @@ -183,14 +183,14 @@ union pud { unsigned long :33; /* HW */ unsigned long :14; /* Unused */ unsigned long : 1; /* HW */ + unsigned long s : 1; /* Special */ unsigned long w : 1; /* Writable soft-bit */ unsigned long r : 1; /* Readable soft-bit */ unsigned long d : 1; /* Dirty */ unsigned long y : 1; /* Young */ - unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long : 3; /* HW */ + unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long vsie_notif : 1; /* Referenced in a shadow table */ - unsigned long : 1; /* Unused */ unsigned long : 4; /* HW */ unsigned long sd : 1; /* Soft-Dirty */ unsigned long pr : 1; /* Present */ @@ -254,14 +254,14 @@ union crste { struct { unsigned long :47; unsigned long : 1; /* HW (should be 0) */ + unsigned long s : 1; /* Special */ unsigned long w : 1; /* Writable */ unsigned long r : 1; /* Readable */ unsigned long d : 1; /* Dirty */ unsigned long y : 1; /* Young */ - unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long : 3; /* HW */ + unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */ unsigned long vsie_notif : 1; /* Referenced in a shadow table */ - unsigned long : 1; unsigned long : 4; /* HW */ unsigned long sd : 1; /* Soft-Dirty */ unsigned long pr : 1; /* Present */ diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c index 03e15b5e0b9a..c8b79ad04ac9 100644 --- a/arch/s390/kvm/gmap.c +++ b/arch/s390/kvm/gmap.c @@ -519,7 +519,7 @@ void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end) _dat_walk_gfn_range(start, end, gmap->asce, &walk_ops, 0, gmap); } -static int gmap_handle_minor_crste_fault(union asce asce, struct guest_fault *f) +static int gmap_handle_minor_crste_fault(struct gmap *gmap, struct guest_fault *f) { union crste newcrste, oldcrste = READ_ONCE(*f->crstep); @@ -544,10 +544,8 @@ static int gmap_handle_minor_crste_fault(union asce asce, struct guest_fault *f) newcrste.s.fc1.d = 1; newcrste.s.fc1.sd = 1; } - if (!oldcrste.s.fc1.d && newcrste.s.fc1.d) - SetPageDirty(phys_to_page(crste_origin_large(newcrste))); /* In case of races, let the slow path deal with it. */ - return !dat_crstep_xchg_atomic(f->crstep, oldcrste, newcrste, f->gfn, asce); + return !gmap_crstep_xchg_atomic(gmap, f->crstep, oldcrste, newcrste, f->gfn); } /* Trying to write on a read-only page, let the slow path deal with it. */ return 1; @@ -576,8 +574,6 @@ static int _gmap_handle_minor_pte_fault(struct gmap *gmap, union pgste *pgste, newpte.s.d = 1; newpte.s.sd = 1; } - if (!oldpte.s.d && newpte.s.d) - SetPageDirty(pfn_to_page(newpte.h.pfra)); *pgste = gmap_ptep_xchg(gmap, f->ptep, newpte, *pgste, f->gfn); return 0; @@ -614,7 +610,7 @@ int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault) fault->callback(fault); pgste_set_unlock(fault->ptep, pgste); } else { - rc = gmap_handle_minor_crste_fault(gmap->asce, fault); + rc = gmap_handle_minor_crste_fault(gmap, fault); if (!rc && fault->callback) fault->callback(fault); } @@ -669,6 +665,7 @@ static int _gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, int leve oldval = READ_ONCE(*f->crstep); newval = _crste_fc1(f->pfn, oldval.h.tt, f->writable, f->write_attempt | oldval.s.fc1.d); + newval.s.fc1.s = !f->page; newval.s.fc1.sd = oldval.s.fc1.sd; if (oldval.val != _CRSTE_EMPTY(oldval.h.tt).val && crste_origin_large(oldval) != crste_origin_large(newval)) diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h index 150e91e15ee0..579399ef5480 100644 --- a/arch/s390/kvm/gmap.h +++ b/arch/s390/kvm/gmap.h @@ -185,6 +185,8 @@ static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, un else _gmap_handle_vsie_unshadow_event(gmap, gfn); } + if (!ptep->s.d && newpte.s.d && !newpte.s.s) + SetPageDirty(pfn_to_page(newpte.h.pfra)); return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap)); } @@ -220,6 +222,8 @@ static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, unio else _gmap_handle_vsie_unshadow_event(gmap, gfn); } + if (!oldcrste.s.fc1.d && newcrste.s.fc1.d && !newcrste.s.fc1.s) + SetPageDirty(phys_to_page(crste_origin_large(newcrste))); return dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, gmap->asce); }