struct amdgpu_device *adev;
};
+#define MAX_UMA_OPTION_NAME 28
+#define MAX_UMA_OPTION_ENTRIES 19
+
+#define AMDGPU_UMA_FLAG_AUTO BIT(1)
+#define AMDGPU_UMA_FLAG_CUSTOM BIT(0)
+
+/**
+ * struct amdgpu_uma_carveout_option - single UMA carveout option
+ * @name: Name of the carveout option
+ * @memory_carved_mb: Amount of memory carved in MB
+ * @flags: ATCS flags supported by this option
+ */
+struct amdgpu_uma_carveout_option {
+ char name[MAX_UMA_OPTION_NAME];
+ uint32_t memory_carved_mb;
+ uint8_t flags;
+};
+
+/**
+ * struct amdgpu_uma_carveout_info - table of available UMA carveout options
+ * @num_entries: Number of available options
+ * @uma_option_index: The index of the option currently applied
+ * @update_lock: Lock to serialize changes to the option
+ * @entries: The array of carveout options
+ */
+struct amdgpu_uma_carveout_info {
+ uint8_t num_entries;
+ uint8_t uma_option_index;
+ struct mutex update_lock;
+ struct amdgpu_uma_carveout_option entries[MAX_UMA_OPTION_ENTRIES];
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
struct work_struct userq_reset_work;
struct amdgpu_uid *uid_info;
+ struct amdgpu_uma_carveout_info uma_info;
+
/* KFD
* Must be last --ends in a flexible-array member.
*/
return vram_type;
}
+static int amdgpu_atomfirmware_get_uma_carveout_info_v2_3(struct amdgpu_device *adev,
+ union igp_info *igp_info,
+ struct amdgpu_uma_carveout_info *uma_info)
+{
+ struct uma_carveout_option *opts;
+ uint8_t nr_uma_options;
+ int i;
+
+ nr_uma_options = igp_info->v23.UMACarveoutIndexMax;
+
+ if (!nr_uma_options)
+ return -ENODEV;
+
+ if (nr_uma_options > MAX_UMA_OPTION_ENTRIES) {
+ drm_dbg(adev_to_drm(adev),
+ "Number of UMA options exceeds max table size. Options will not be parsed");
+ return -EINVAL;
+ }
+
+ uma_info->num_entries = nr_uma_options;
+ uma_info->uma_option_index = igp_info->v23.UMACarveoutIndex;
+
+ opts = igp_info->v23.UMASizeControlOption;
+
+ for (i = 0; i < nr_uma_options; i++) {
+ if (!opts[i].memoryCarvedGb)
+ uma_info->entries[i].memory_carved_mb = 512;
+ else
+ uma_info->entries[i].memory_carved_mb = (uint32_t)opts[i].memoryCarvedGb << 10;
+
+ uma_info->entries[i].flags = opts[i].uma_carveout_option_flags.all8;
+ strscpy(uma_info->entries[i].name, opts[i].optionName, MAX_UMA_OPTION_NAME);
+ }
+
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
+ struct amdgpu_uma_carveout_info *uma_info)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ union igp_info *igp_info;
+ u16 data_offset, size;
+ u8 frev, crev;
+ int index;
+
+ if (!(adev->flags & AMD_IS_APU))
+ return -ENODEV;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ return -EINVAL;
+ }
+
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 2:
+ switch (crev) {
+ case 3:
+ return amdgpu_atomfirmware_get_uma_carveout_info_v2_3(adev, igp_info, uma_info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -ENODEV;
+}
+
int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type,
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
+ struct amdgpu_uma_carveout_info *uma_info);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);