Move didt callbacks to register access block.
Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
amdgpu_wreg64_t pcie_wreg64;
amdgpu_rreg64_ext_t pcie_rreg64_ext;
amdgpu_wreg64_ext_t pcie_wreg64_ext;
- /* protects concurrent DIDT register access */
- spinlock_t didt_idx_lock;
- amdgpu_rreg_t didt_rreg;
- amdgpu_wreg_t didt_wreg;
/* protects concurrent gc_cac register access */
spinlock_t gc_cac_idx_lock;
amdgpu_rreg_t gc_cac_rreg;
#define WREG32_SMC(reg, v) amdgpu_reg_smc_wr32(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) amdgpu_reg_uvd_ctx_rd32(adev, (reg))
#define WREG32_UVD_CTX(reg, v) amdgpu_reg_uvd_ctx_wr32(adev, (reg), (v))
-#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
-#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_DIDT(reg) amdgpu_reg_didt_rd32(adev, (reg))
+#define WREG32_DIDT(reg, v) amdgpu_reg_didt_wr32(adev, (reg), (v))
#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- if (!adev->didt_rreg)
+ if (!adev->reg.didt.rreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- if (!adev->didt_wreg)
+ if (!adev->reg.didt.wreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
- adev->didt_rreg = &amdgpu_invalid_rreg;
- adev->didt_wreg = &amdgpu_invalid_wreg;
adev->gc_cac_rreg = &amdgpu_invalid_rreg;
adev->gc_cac_wreg = &amdgpu_invalid_wreg;
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->pcie_idx_lock);
- spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->reg.uvd_ctx.lock);
adev->reg.uvd_ctx.rreg = NULL;
adev->reg.uvd_ctx.wreg = NULL;
+
+ spin_lock_init(&adev->reg.didt.lock);
+ adev->reg.didt.rreg = NULL;
+ adev->reg.didt.wreg = NULL;
}
uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg)
adev->reg.uvd_ctx.wreg(adev, reg, v);
}
+uint32_t amdgpu_reg_didt_rd32(struct amdgpu_device *adev, uint32_t reg)
+{
+ if (!adev->reg.didt.rreg) {
+ dev_err_once(adev->dev, "DIDT register read not supported\n");
+ return 0;
+ }
+ return adev->reg.didt.rreg(adev, reg);
+}
+
+void amdgpu_reg_didt_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ if (!adev->reg.didt.wreg) {
+ dev_err_once(adev->dev, "DIDT register write not supported\n");
+ return;
+ }
+ adev->reg.didt.wreg(adev, reg, v);
+}
+
/*
* register access helper functions.
*/
struct amdgpu_reg_access {
struct amdgpu_reg_ind smc;
struct amdgpu_reg_ind uvd_ctx;
+ struct amdgpu_reg_ind didt;
};
void amdgpu_reg_access_init(struct amdgpu_device *adev);
void amdgpu_reg_smc_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
uint32_t amdgpu_reg_uvd_ctx_rd32(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_reg_uvd_ctx_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+uint32_t amdgpu_reg_didt_rd32(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_reg_didt_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device *, uint64_t);
typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device *, uint64_t, uint32_t);
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
r = RREG32(mmDIDT_IND_DATA);
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
WREG32(mmDIDT_IND_DATA, (v));
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
}
static const u32 bonaire_golden_spm_registers[] =
adev->pcie_wreg = &cik_pcie_wreg;
adev->reg.uvd_ctx.rreg = &cik_uvd_ctx_rreg;
adev->reg.uvd_ctx.wreg = &cik_uvd_ctx_wreg;
- adev->didt_rreg = &cik_didt_rreg;
- adev->didt_wreg = &cik_didt_wreg;
+ adev->reg.didt.rreg = &cik_didt_rreg;
+ adev->reg.didt.wreg = &cik_didt_wreg;
adev->asic_funcs = &cik_asic_funcs;
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
r = RREG32(data);
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
return r;
}
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
WREG32(data, (v));
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
}
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- adev->didt_rreg = &nv_didt_rreg;
- adev->didt_wreg = &nv_didt_wreg;
+ adev->reg.didt.rreg = &nv_didt_rreg;
+ adev->reg.didt.wreg = &nv_didt_wreg;
adev->asic_funcs = &nv_asic_funcs;
adev->pciep_wreg = &si_pciep_wreg;
adev->reg.uvd_ctx.rreg = &si_uvd_ctx_rreg;
adev->reg.uvd_ctx.wreg = &si_uvd_ctx_wreg;
- adev->didt_rreg = NULL;
- adev->didt_wreg = NULL;
adev->asic_funcs = &si_asic_funcs;
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
r = RREG32(data);
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
return r;
}
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
WREG32(data, (v));
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
}
static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
adev->reg.uvd_ctx.rreg = &soc15_uvd_ctx_rreg;
adev->reg.uvd_ctx.wreg = &soc15_uvd_ctx_wreg;
- adev->didt_rreg = &soc15_didt_rreg;
- adev->didt_wreg = &soc15_didt_wreg;
+ adev->reg.didt.rreg = &soc15_didt_rreg;
+ adev->reg.didt.wreg = &soc15_didt_wreg;
adev->gc_cac_rreg = &soc15_gc_cac_rreg;
adev->gc_cac_wreg = &soc15_gc_cac_wreg;
adev->se_cac_rreg = &soc15_se_cac_rreg;
address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
r = RREG32(data);
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
return r;
}
address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(address, (reg));
WREG32(data, (v));
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
}
static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- adev->didt_rreg = &soc21_didt_rreg;
- adev->didt_wreg = &soc21_didt_wreg;
+ adev->reg.didt.rreg = &soc21_didt_rreg;
+ adev->reg.didt.wreg = &soc21_didt_wreg;
adev->asic_funcs = &soc21_asic_funcs;
adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- adev->didt_rreg = NULL;
- adev->didt_wreg = NULL;
adev->asic_funcs = &soc24_asic_funcs;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
- adev->didt_rreg = NULL;
- adev->didt_wreg = NULL;
adev->asic_funcs = &soc_v1_0_asic_funcs;
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
r = RREG32(mmDIDT_IND_DATA);
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.didt.lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
WREG32(mmDIDT_IND_DATA, (v));
- spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.didt.lock, flags);
}
static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
adev->pcie_wreg = &vi_pcie_wreg;
adev->reg.uvd_ctx.rreg = &vi_uvd_ctx_rreg;
adev->reg.uvd_ctx.wreg = &vi_uvd_ctx_wreg;
- adev->didt_rreg = &vi_didt_rreg;
- adev->didt_wreg = &vi_didt_wreg;
+ adev->reg.didt.rreg = &vi_didt_rreg;
+ adev->reg.didt.wreg = &vi_didt_wreg;
adev->gc_cac_rreg = &vi_gc_cac_rreg;
adev->gc_cac_wreg = &vi_gc_cac_wreg;