Add virt command and interface to send VF ras command.
Signed-off-by: YiPeng Chai <YiPeng.Chai@amd.com>
Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
return r;
}
+
+static int req_remote_ras_cmd(struct amdgpu_device *adev,
+ u32 param1, u32 param2, u32 param3)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_remote_ras_cmd)
+ return virt->ops->req_remote_ras_cmd(adev, param1, param2, param3);
+ return -ENOENT;
+}
+
+int amdgpu_virt_send_remote_ras_cmd(struct amdgpu_device *adev,
+ uint64_t buf, uint32_t buf_len)
+{
+ uint64_t gpa = buf;
+ int ret = -EIO;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ ret = req_remote_ras_cmd(adev,
+ lower_32_bits(gpa), upper_32_bits(gpa), buf_len);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
+ int (*req_remote_ras_cmd)(struct amdgpu_device *adev,
+ u32 param1, u32 param2, u32 param3);
};
/*
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
+int amdgpu_virt_send_remote_ras_cmd(struct amdgpu_device *adev,
+ uint64_t buf, uint32_t buf_len);
#endif
case IDH_REQ_RAS_CHK_CRITI:
event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
+ case IDH_REQ_RAS_REMOTE_CMD:
+ event = IDH_REQ_RAS_REMOTE_CMD_READY;
+ break;
default:
break;
}
adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
}
+static int xgpu_nv_req_remote_ras_cmd(struct amdgpu_device *adev,
+ u32 param1, u32 param2, u32 param3)
+{
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_REMOTE_CMD, param1, param2, param3);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
- .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region,
+ .req_remote_ras_cmd = xgpu_nv_req_remote_ras_cmd,
};
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
- IDH_REQ_RAS_CHK_CRITI = 206
+ IDH_REQ_RAS_CHK_CRITI = 206,
+ IDH_REQ_RAS_REMOTE_CMD = 207,
};
enum idh_event {
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_REQ_RAS_CHK_CRITI_READY = 18,
+ IDH_REQ_RAS_REMOTE_CMD_READY = 19,
IDH_TEXT_MESSAGE = 255,
};