*
*
*/
+#include "amdgpu_reg_access.h"
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/module.h>
* is changed. In such case, replace the aqua_vanjaram implementation
* with more common helper */
reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
- aqua_vanjaram_encode_ext_smn_addressing(instance);
+ amdgpu_reg_get_smn_base64(adev, MP0_HWIP, instance);
fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
- aqua_vanjaram_encode_ext_smn_addressing(instance);
+ amdgpu_reg_get_smn_base64(adev, MP0_HWIP, instance);
boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
int retry_loop;
reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
- aqua_vanjaram_encode_ext_smn_addressing(instance);
+ amdgpu_reg_get_smn_base64(adev, MP0_HWIP, instance);
for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
adev->reg.pcie.port_wreg(adev, reg, v);
}
+static int amdgpu_reg_get_smn_base_version(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev = adev->pdev;
+ int id;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
+ id = (pdev->device >> 4) & 0xFFFF;
+ if (id == 0x74A || id == 0x74B || id == 0x75A || id == 0x75B)
+ return 1;
+
+ return -EOPNOTSUPP;
+}
+
uint64_t amdgpu_reg_get_smn_base64(struct amdgpu_device *adev,
enum amd_hw_ip_block_type block,
int die_inst)
{
if (!adev->reg.smn.get_smn_base) {
- dev_err_once(adev->dev, "SMN base address callback not set\n");
+ int version = amdgpu_reg_get_smn_base_version(adev);
+ switch (version) {
+ case 1:
+ return amdgpu_reg_smn_v1_0_get_base(adev, block,
+ die_inst);
+ default:
+ dev_err_once(
+ adev->dev,
+ "SMN base address query not supported for this device\n");
+ return 0;
+ }
return 0;
}
return adev->reg.smn.get_smn_base(adev, block, die_inst);
}
+uint64_t amdgpu_reg_smn_v1_0_get_base(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int die_inst)
+{
+ uint64_t smn_base;
+
+ if (die_inst == 0)
+ return 0;
+
+ switch (block) {
+ case XGMI_HWIP:
+ case NBIO_HWIP:
+ case MP0_HWIP:
+ case UMC_HWIP:
+ case DF_HWIP:
+ smn_base = ((uint64_t)(die_inst & 0x3) << 32) | (1ULL << 34);
+ break;
+ default:
+ dev_warn_once(
+ adev->dev,
+ "SMN base address query not supported for this block %d\n",
+ block);
+ smn_base = 0;
+ break;
+ }
+
+ return smn_base;
+}
+
/*
* register access helper functions.
*/
uint64_t amdgpu_reg_get_smn_base64(struct amdgpu_device *adev,
enum amd_hw_ip_block_type block,
int die_inst);
+uint64_t amdgpu_reg_smn_v1_0_get_base(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int die_inst);
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
if (!(adev->aid_mask & BIT(i)))
return U32_MAX;
- addr += adev->asic_funcs->encode_ext_smn_addressing(i);
+ addr += amdgpu_reg_get_smn_base64(adev, XGMI_HWIP, i);
return RREG32_PCIE_EXT(addr);
}
static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
{
- WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
+ uint64_t smn_base =
+ amdgpu_reg_get_smn_base64(adev, XGMI_HWIP, xgmi_inst);
+
+ WREG64_MCA(smn_base, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
u64 mca_base, struct ras_err_data *err_data)
{
int xgmi_inst = mcm_info->die_id;
+ uint64_t smn_base;
u64 status = 0;
status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
default:
break;
}
-
- WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
+ smn_base = amdgpu_reg_get_smn_base64(adev, XGMI_HWIP, xgmi_inst);
+ WREG64_MCA(smn_base, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-/* Fixed pattern for smn addressing on different AIDs:
- * bit[34]: indicate cross AID access
- * bit[33:32]: indicate target AID id
- * AID id range is 0 ~ 3 as maximum AID number is 4.
- */
-u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
-{
- u64 ext_offset;
-
- /* local routing and bit[34:32] will be zeros */
- if (ext_id == 0)
- return 0;
-
- /* Initiated from host, accessing to all non-zero aids are cross traffic */
- ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
-
- return ext_offset;
-}
-
static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
uint64_t smn_addr, int i)
{
regdata->addr =
- smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
+ smn_addr + amdgpu_reg_get_smn_base64(adev, XGMI_HWIP, i);
regdata->value = RREG32_PCIE_EXT(regdata->addr);
}
bl_status_reg =
(SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
<< 2) +
- adev->asic_funcs->encode_ext_smn_addressing(i);
+ amdgpu_reg_get_smn_base64(adev, MP0_HWIP, i);
at += snprintf(bl_status_msg + at,
PSP13_BL_STATUS_SIZE - at,
" status(%02i): 0x%08x", i,
.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.query_video_codecs = &soc15_query_video_codecs,
- .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
.get_reg_state = &aqua_vanjaram_get_reg_state,
};
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
adev->asic_funcs = &aqua_vanjaram_asic_funcs;
+ adev->reg.smn.get_smn_base = &amdgpu_reg_smn_v1_0_get_base;
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
enum amdgpu_reg_state reg_state, void *buf,
__RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst)
/* inst equals to ext for some IPs */
-#define RREG32_SOC15_EXT(ip, inst, reg, ext) \
- RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
- + adev->asic_funcs->encode_ext_smn_addressing(ext)) \
-
-#define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \
- WREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
- + adev->asic_funcs->encode_ext_smn_addressing(ext), \
- value) \
-
-#define RREG64_MCA(ext, mca_base, idx) \
- RREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8))
-
-#define WREG64_MCA(ext, mca_base, idx, val) \
- WREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8), val)
+#define RREG32_SOC15_EXT(ip, inst, reg, ext) \
+ RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + \
+ reg) * 4 + \
+ amdgpu_reg_get_smn_base64(adev, ip##_HWIP, inst))
+
+#define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \
+ WREG32_PCIE_EXT( \
+ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * \
+ 4 + \
+ amdgpu_reg_get_smn_base64(adev, ip##_HWIP, inst), \
+ value)
+
+#define RREG64_MCA(smn_base, mca_base, idx) \
+ RREG64_PCIE_EXT(smn_base + mca_base + (idx * 8))
+
+#define WREG64_MCA(smn_base, mca_base, idx, val) \
+ WREG64_PCIE_EXT(smn_base + mca_base + (idx * 8), val)
#endif