* The shadow table will be removed automatically on any change to the
* PTE mapping for the source table.
*
+ * The returned shadow gmap will be returned with one extra reference.
+ *
* Return: A guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
* ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
* parent gmap table could not be protected.
struct gmap *sg, *new;
int rc;
- scoped_guard(spinlock, &parent->children_lock)
+ scoped_guard(spinlock, &parent->children_lock) {
sg = gmap_find_shadow(parent, asce, edat_level);
- if (sg)
- return sg;
+ if (sg) {
+ gmap_get(sg);
+ return sg;
+ }
+ }
/* Create a new shadow gmap. */
new = gmap_new(parent->kvm, asce.r ? 1UL << (64 - PAGE_SHIFT) : asce_end(asce));
if (!new)
sg = gmap_find_shadow(parent, asce, edat_level);
if (sg) {
gmap_put(new);
+ gmap_get(sg);
return sg;
}
if (asce.r) {
}
gmap_add_child(parent, new);
/* Nothing to protect, return right away. */
+ gmap_get(new);
return new;
}
}
+ gmap_get(new);
new->parent = parent;
/* Protect while inserting, protects against invalidation races. */
rc = gmap_protect_asce_top_level(mc, new);
if (rc) {
new->parent = NULL;
gmap_put(new);
+ gmap_put(new);
return ERR_PTR(rc);
}
return new;
release_gmap_shadow(vsie_page);
}
}
+again:
gmap = gmap_create_shadow(vcpu->arch.mc, vcpu->kvm->arch.gmap, asce, edat);
if (IS_ERR(gmap))
return gmap;
/* unlikely race condition, remove the previous shadow */
if (vsie_page->gmap_cache.gmap)
release_gmap_shadow(vsie_page);
+ if (!gmap->parent) {
+ gmap_put(gmap);
+ goto again;
+ }
vcpu->kvm->stat.gmap_shadow_create++;
list_add(&vsie_page->gmap_cache.list, &gmap->scb_users);
vsie_page->gmap_cache.gmap = gmap;
prefix_unmapped(vsie_page);
- gmap_get(gmap);
}
return gmap;
}