* Authors: Microsoft Linux virtualization team
*/
+#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
if (!is_mmio)
region->flags.range_pinned = true;
+ kref_init(®ion->refcount);
+
return region;
}
mshv_region_chunk_unmap);
}
-void mshv_region_destroy(struct mshv_mem_region *region)
+static void mshv_region_destroy(struct kref *ref)
{
+ struct mshv_mem_region *region =
+ container_of(ref, struct mshv_mem_region, refcount);
struct mshv_partition *partition = region->partition;
int ret;
- hlist_del(®ion->hnode);
-
if (mshv_partition_encrypted(partition)) {
ret = mshv_region_share(region);
if (ret) {
vfree(region);
}
+
+void mshv_region_put(struct mshv_mem_region *region)
+{
+ kref_put(®ion->refcount, mshv_region_destroy);
+}
+
+int mshv_region_get(struct mshv_mem_region *region)
+{
+ return kref_get_unless_zero(®ion->refcount);
+}
struct mshv_mem_region {
struct hlist_node hnode;
+ struct kref refcount;
u64 nr_pages;
u64 start_gfn;
u64 start_uaddr;
u64 pt_id;
refcount_t pt_ref_count;
struct mutex pt_mutex;
+
+ spinlock_t pt_mem_regions_lock;
struct hlist_head pt_mem_regions; // not ordered
u32 pt_vp_count;
int mshv_region_map(struct mshv_mem_region *region);
void mshv_region_invalidate(struct mshv_mem_region *region);
int mshv_region_pin(struct mshv_mem_region *region);
-void mshv_region_destroy(struct mshv_mem_region *region);
+void mshv_region_put(struct mshv_mem_region *region);
+int mshv_region_get(struct mshv_mem_region *region);
#endif /* _MSHV_ROOT_H_ */
u64 nr_pages = HVPFN_DOWN(mem->size);
/* Reject overlapping regions */
+ spin_lock(&partition->pt_mem_regions_lock);
hlist_for_each_entry(rg, &partition->pt_mem_regions, hnode) {
if (mem->guest_pfn + nr_pages <= rg->start_gfn ||
rg->start_gfn + rg->nr_pages <= mem->guest_pfn)
continue;
-
+ spin_unlock(&partition->pt_mem_regions_lock);
return -EEXIST;
}
+ spin_unlock(&partition->pt_mem_regions_lock);
rg = mshv_region_create(mem->guest_pfn, nr_pages,
mem->userspace_addr, mem->flags,
if (ret)
goto errout;
- /* Install the new region */
+ spin_lock(&partition->pt_mem_regions_lock);
hlist_add_head(®ion->hnode, &partition->pt_mem_regions);
+ spin_unlock(&partition->pt_mem_regions_lock);
return 0;
if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
return -EINVAL;
+ spin_lock(&partition->pt_mem_regions_lock);
+
region = mshv_partition_region_by_gfn(partition, mem.guest_pfn);
- if (!region)
- return -EINVAL;
+ if (!region) {
+ spin_unlock(&partition->pt_mem_regions_lock);
+ return -ENOENT;
+ }
/* Paranoia check */
if (region->start_uaddr != mem.userspace_addr ||
region->start_gfn != mem.guest_pfn ||
- region->nr_pages != HVPFN_DOWN(mem.size))
+ region->nr_pages != HVPFN_DOWN(mem.size)) {
+ spin_unlock(&partition->pt_mem_regions_lock);
return -EINVAL;
+ }
+
+ hlist_del(®ion->hnode);
- mshv_region_destroy(region);
+ spin_unlock(&partition->pt_mem_regions_lock);
+
+ mshv_region_put(region);
return 0;
}
remove_partition(partition);
hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
- hnode)
- mshv_region_destroy(region);
+ hnode) {
+ hlist_del(®ion->hnode);
+ mshv_region_put(region);
+ }
/* Withdraw and free all pages we deposited */
hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
INIT_HLIST_HEAD(&partition->pt_devices);
+ spin_lock_init(&partition->pt_mem_regions_lock);
INIT_HLIST_HEAD(&partition->pt_mem_regions);
mshv_eventfd_init(partition);