struct page *page;
int stride, ret;
- page = region->pages[page_offset];
+ page = region->mreg_pages[page_offset];
if (!page)
return -EINVAL;
/* Start at stride since the first stride is validated */
for (count = stride; count < page_count; count += stride) {
- page = region->pages[page_offset + count];
+ page = region->mreg_pages[page_offset + count];
/* Break if current page is not present */
if (!page)
while (page_count) {
/* Skip non-present pages */
- if (!region->pages[page_offset]) {
+ if (!region->mreg_pages[page_offset]) {
page_offset++;
page_count--;
continue;
if (flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
- kref_init(®ion->refcount);
+ kref_init(®ion->mreg_refcount);
return region;
}
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages + page_offset,
+ region->mreg_pages + page_offset,
page_count,
HV_MAP_GPA_READABLE |
HV_MAP_GPA_WRITABLE,
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages + page_offset,
+ region->mreg_pages + page_offset,
page_count, 0,
flags, false);
}
return hv_call_map_gpa_pages(region->partition->pt_id,
region->start_gfn + page_offset,
page_count, flags,
- region->pages + page_offset);
+ region->mreg_pages + page_offset);
}
static int mshv_region_remap_pages(struct mshv_mem_region *region,
static void mshv_region_invalidate_pages(struct mshv_mem_region *region,
u64 page_offset, u64 page_count)
{
- if (region->type == MSHV_REGION_TYPE_MEM_PINNED)
- unpin_user_pages(region->pages + page_offset, page_count);
+ if (region->mreg_type == MSHV_REGION_TYPE_MEM_PINNED)
+ unpin_user_pages(region->mreg_pages + page_offset, page_count);
- memset(region->pages + page_offset, 0,
+ memset(region->mreg_pages + page_offset, 0,
page_count * sizeof(struct page *));
}
int ret;
for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
- pages = region->pages + done_count;
+ pages = region->mreg_pages + done_count;
userspace_addr = region->start_uaddr +
done_count * HV_HYP_PAGE_SIZE;
nr_pages = min(region->nr_pages - done_count,
static void mshv_region_destroy(struct kref *ref)
{
struct mshv_mem_region *region =
- container_of(ref, struct mshv_mem_region, refcount);
+ container_of(ref, struct mshv_mem_region, mreg_refcount);
struct mshv_partition *partition = region->partition;
int ret;
- if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
+ if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
mshv_region_movable_fini(region);
if (mshv_partition_encrypted(partition)) {
void mshv_region_put(struct mshv_mem_region *region)
{
- kref_put(®ion->refcount, mshv_region_destroy);
+ kref_put(®ion->mreg_refcount, mshv_region_destroy);
}
int mshv_region_get(struct mshv_mem_region *region)
{
- return kref_get_unless_zero(®ion->refcount);
+ return kref_get_unless_zero(®ion->mreg_refcount);
}
/**
int ret;
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- mmap_read_lock(region->mni.mm);
+ mmap_read_lock(region->mreg_mni.mm);
ret = hmm_range_fault(range);
- mmap_read_unlock(region->mni.mm);
+ mmap_read_unlock(region->mreg_mni.mm);
if (ret)
return ret;
- mutex_lock(®ion->mutex);
+ mutex_lock(®ion->mreg_mutex);
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
- mutex_unlock(®ion->mutex);
+ mutex_unlock(®ion->mreg_mutex);
cond_resched();
return -EBUSY;
}
u64 page_offset, u64 page_count)
{
struct hmm_range range = {
- .notifier = ®ion->mni,
+ .notifier = ®ion->mreg_mni,
.default_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
};
unsigned long *pfns;
goto out;
for (i = 0; i < page_count; i++)
- region->pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
+ region->mreg_pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
ret = mshv_region_remap_pages(region, region->hv_map_flags,
page_offset, page_count);
- mutex_unlock(®ion->mutex);
+ mutex_unlock(®ion->mreg_mutex);
out:
kfree(pfns);
return ret;
{
struct mshv_mem_region *region = container_of(mni,
struct mshv_mem_region,
- mni);
+ mreg_mni);
u64 page_offset, page_count;
unsigned long mstart, mend;
int ret = -EPERM;
page_count = HVPFN_DOWN(mend - mstart);
if (mmu_notifier_range_blockable(range))
- mutex_lock(®ion->mutex);
- else if (!mutex_trylock(®ion->mutex))
+ mutex_lock(®ion->mreg_mutex);
+ else if (!mutex_trylock(®ion->mreg_mutex))
goto out_fail;
mmu_interval_set_seq(mni, cur_seq);
mshv_region_invalidate_pages(region, page_offset, page_count);
- mutex_unlock(®ion->mutex);
+ mutex_unlock(®ion->mreg_mutex);
return true;
out_unlock:
- mutex_unlock(®ion->mutex);
+ mutex_unlock(®ion->mreg_mutex);
out_fail:
WARN_ONCE(ret,
"Failed to invalidate region %#llx-%#llx (range %#lx-%#lx, event: %u, pages %#llx-%#llx, mm: %#llx): %d\n",
void mshv_region_movable_fini(struct mshv_mem_region *region)
{
- mmu_interval_notifier_remove(®ion->mni);
+ mmu_interval_notifier_remove(®ion->mreg_mni);
}
bool mshv_region_movable_init(struct mshv_mem_region *region)
{
int ret;
- ret = mmu_interval_notifier_insert(®ion->mni, current->mm,
+ ret = mmu_interval_notifier_insert(®ion->mreg_mni, current->mm,
region->start_uaddr,
region->nr_pages << HV_HYP_PAGE_SHIFT,
&mshv_region_mni_ops);
if (ret)
return false;
- mutex_init(®ion->mutex);
+ mutex_init(®ion->mreg_mutex);
return true;
}