return ret;
}
+static int amdgpu_userq_metadata_info_sdma(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_sdma *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->sdma.get_csa_info) {
+ struct amdgpu_sdma_csa_info csa = {};
+
+ adev->sdma.get_csa_info(adev, &csa);
+ meta->csa_size = csa.size;
+ meta->csa_alignment = csa.alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
if (ret)
return ret;
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ case AMDGPU_HW_IP_DMA:
+ ret = amdgpu_userq_metadata_info_sdma(adev, info, &meta_info.sdma);
+ if (ret)
+ return ret;
+
ret = copy_to_user(out, &meta_info,
min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
return 0;
__u32 eop_alignment;
};
+struct drm_amdgpu_info_uq_metadata_sdma {
+ /* context save area size for sdma6 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for sdma6 */
+ __u32 csa_alignment;
+};
+
struct drm_amdgpu_info_uq_metadata {
union {
struct drm_amdgpu_info_uq_metadata_gfx gfx;
struct drm_amdgpu_info_uq_metadata_compute compute;
+ struct drm_amdgpu_info_uq_metadata_sdma sdma;
};
};