} else {
tmp = adev->gmc.mem_partitions[mem_id].size;
}
- do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+
+ if (adev->xcp_mgr->mem_alloc_mode == AMDGPU_PARTITION_MEM_CAPPING_EVEN)
+ do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+
return ALIGN_DOWN(tmp, PAGE_SIZE);
} else if (adev->apu_prefer_gtt) {
return (ttm_tt_pages_limit() << PAGE_SHIFT);
return count;
}
+static ssize_t compute_partition_mem_alloc_mode_show(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int mode = adev->xcp_mgr->mem_alloc_mode;
+
+ return sysfs_emit(buf, "%s\n",
+ amdgpu_gfx_compute_mem_alloc_mode_desc(mode));
+}
+
+
+static ssize_t compute_partition_mem_alloc_mode_store(struct device *dev,
+ struct device_attribute *addr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!strncasecmp("CAPPING", buf, strlen("CAPPING")))
+ adev->xcp_mgr->mem_alloc_mode = AMDGPU_PARTITION_MEM_CAPPING_EVEN;
+ else if (!strncasecmp("ALL", buf, strlen("ALL")))
+ adev->xcp_mgr->mem_alloc_mode = AMDGPU_PARTITION_MEM_ALLOC_ALL;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
static const char *xcp_desc[] = {
[AMDGPU_SPX_PARTITION_MODE] = "SPX",
[AMDGPU_DPX_PARTITION_MODE] = "DPX",
static DEVICE_ATTR(compute_reset_mask, 0444,
amdgpu_gfx_get_compute_reset_mask, NULL);
+static DEVICE_ATTR(compute_partition_mem_alloc_mode, 0644,
+ compute_partition_mem_alloc_mode_show,
+ compute_partition_mem_alloc_mode_store);
+
static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
{
struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
if (r)
return r;
+ r = device_create_file(adev->dev,
+ &dev_attr_compute_partition_mem_alloc_mode);
+ if (r)
+ return r;
+
if (xcp_switch_supported)
r = device_create_file(adev->dev,
&dev_attr_available_compute_partition);
AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2,
};
+enum amdgpu_gfx_partition_mem_alloc_mode {
+ AMDGPU_PARTITION_MEM_CAPPING_EVEN = 0,
+ AMDGPU_PARTITION_MEM_ALLOC_ALL = 1,
+};
+
#define NUM_XCC(x) hweight16(x)
enum amdgpu_gfx_ras_mem_id_type {
}
}
+static inline const char *amdgpu_gfx_compute_mem_alloc_mode_desc(int mode)
+{
+ switch (mode) {
+ case AMDGPU_PARTITION_MEM_CAPPING_EVEN:
+ return "CAPPING";
+ case AMDGPU_PARTITION_MEM_ALLOC_ALL:
+ return "ALL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
#endif