{
struct mshv_partition *p = vp->vp_partition;
struct mshv_mem_region *region;
- bool ret;
+ bool ret = false;
u64 gfn;
#if defined(CONFIG_X86_64)
struct hv_x64_memory_intercept_message *msg =
(struct hv_arm64_memory_intercept_message *)
vp->vp_intercept_msg_page->u.payload;
#endif
+ enum hv_intercept_access_type access_type =
+ msg->header.intercept_access_type;
gfn = HVPFN_DOWN(msg->guest_physical_address);
if (!region)
return false;
+ if (access_type == HV_INTERCEPT_ACCESS_WRITE &&
+ !(region->hv_map_flags & HV_MAP_GPA_WRITABLE))
+ goto put_region;
+
+ if (access_type == HV_INTERCEPT_ACCESS_EXECUTE &&
+ !(region->hv_map_flags & HV_MAP_GPA_EXECUTABLE))
+ goto put_region;
+
/* Only movable memory ranges are supported for GPA intercepts */
if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
ret = mshv_region_handle_gfn_fault(region, gfn);
- else
- ret = false;
+put_region:
mshv_region_put(region);
return ret;
u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
} __packed;
+enum hv_intercept_access_type {
+ HV_INTERCEPT_ACCESS_READ = 0,
+ HV_INTERCEPT_ACCESS_WRITE = 1,
+ HV_INTERCEPT_ACCESS_EXECUTE = 2
+};
+
#endif /* _HV_HVGDK_MINI_H */
u32 vp_index;
u8 instruction_length:4;
u8 cr8:4; /* Only set for exo partitions */
- u8 intercept_access_type;
+ u8 intercept_access_type; /* enum hv_intercept_access_type */
union hv_x64_vp_execution_state execution_state;
struct hv_x64_segment_register cs_segment;
u64 rip;
struct hv_arm64_intercept_message_header {
u32 vp_index;
u8 instruction_length;
- u8 intercept_access_type;
+ u8 intercept_access_type; /* enum hv_intercept_access_type */
union hv_arm64_vp_execution_state execution_state;
u64 pc;
u64 cpsr;