amdgpu_rreg64_t pcie_rreg64;
amdgpu_wreg64_t pcie_wreg64;
amdgpu_rreg64_ext_t pcie_rreg64_ext;
- amdgpu_wreg64_ext_t pcie_wreg64_ext;
- /* protects concurrent UVD register access */
- spinlock_t uvd_ctx_idx_lock;
- amdgpu_rreg_t uvd_ctx_rreg;
- amdgpu_wreg_t uvd_ctx_wreg;
+ amdgpu_wreg64_ext_t pcie_wreg64_ext;
/* protects concurrent DIDT register access */
spinlock_t didt_idx_lock;
amdgpu_rreg_t didt_rreg;
#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
#define RREG32_SMC(reg) amdgpu_reg_smc_rd32(adev, (reg))
#define WREG32_SMC(reg, v) amdgpu_reg_smc_wr32(adev, (reg), (v))
-#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
-#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
+#define RREG32_UVD_CTX(reg) amdgpu_reg_uvd_ctx_rd32(adev, (reg))
+#define WREG32_UVD_CTX(reg, v) amdgpu_reg_uvd_ctx_wr32(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
- adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
- adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
adev->didt_wreg = &amdgpu_invalid_wreg;
adev->gc_cac_rreg = &amdgpu_invalid_rreg;
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->pcie_idx_lock);
- spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->reg.smc.lock);
adev->reg.smc.rreg = NULL;
adev->reg.smc.wreg = NULL;
+
+ spin_lock_init(&adev->reg.uvd_ctx.lock);
+ adev->reg.uvd_ctx.rreg = NULL;
+ adev->reg.uvd_ctx.wreg = NULL;
}
uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg)
adev->reg.smc.wreg(adev, reg, v);
}
+uint32_t amdgpu_reg_uvd_ctx_rd32(struct amdgpu_device *adev, uint32_t reg)
+{
+ if (!adev->reg.uvd_ctx.rreg) {
+ dev_err_once(adev->dev,
+ "UVD_CTX register read not supported\n");
+ return 0;
+ }
+ return adev->reg.uvd_ctx.rreg(adev, reg);
+}
+
+void amdgpu_reg_uvd_ctx_wr32(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t v)
+{
+ if (!adev->reg.uvd_ctx.wreg) {
+ dev_err_once(adev->dev,
+ "UVD_CTX register write not supported\n");
+ return;
+ }
+ adev->reg.uvd_ctx.wreg(adev, reg, v);
+}
+
/*
* register access helper functions.
*/
struct amdgpu_reg_access {
struct amdgpu_reg_ind smc;
+ struct amdgpu_reg_ind uvd_ctx;
};
void amdgpu_reg_access_init(struct amdgpu_device *adev);
uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_reg_smc_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+uint32_t amdgpu_reg_uvd_ctx_rd32(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_reg_uvd_ctx_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device *, uint64_t);
typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device *, uint64_t, uint32_t);
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(mmUVD_CTX_DATA);
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(mmUVD_CTX_DATA, (v));
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
}
static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
adev->reg.smc.wreg = cik_smc_wreg;
adev->pcie_rreg = &cik_pcie_rreg;
adev->pcie_wreg = &cik_pcie_wreg;
- adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
- adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
+ adev->reg.uvd_ctx.rreg = &cik_uvd_ctx_rreg;
+ adev->reg.uvd_ctx.wreg = &cik_uvd_ctx_wreg;
adev->didt_rreg = &cik_didt_rreg;
adev->didt_wreg = &cik_didt_wreg;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- /* TODO: will add them during VCN v2 implementation */
- adev->uvd_ctx_rreg = NULL;
- adev->uvd_ctx_wreg = NULL;
-
adev->didt_rreg = &nv_didt_rreg;
adev->didt_wreg = &nv_didt_wreg;
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(mmUVD_CTX_DATA);
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(mmUVD_CTX_DATA, (v));
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
adev->pcie_wreg = &si_pcie_wreg;
adev->pciep_rreg = &si_pciep_rreg;
adev->pciep_wreg = &si_pciep_wreg;
- adev->uvd_ctx_rreg = si_uvd_ctx_rreg;
- adev->uvd_ctx_wreg = si_uvd_ctx_wreg;
+ adev->reg.uvd_ctx.rreg = &si_uvd_ctx_rreg;
+ adev->reg.uvd_ctx.wreg = &si_uvd_ctx_wreg;
adev->didt_rreg = NULL;
adev->didt_wreg = NULL;
address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(address, ((reg) & 0x1ff));
r = RREG32(data);
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
return r;
}
address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(address, ((reg) & 0x1ff));
WREG32(data, (v));
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
}
static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
- adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
- adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
+ adev->reg.uvd_ctx.rreg = &soc15_uvd_ctx_rreg;
+ adev->reg.uvd_ctx.wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
adev->didt_wreg = &soc15_didt_wreg;
adev->gc_cac_rreg = &soc15_gc_cac_rreg;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- /* TODO: will add them during VCN v2 implementation */
- adev->uvd_ctx_rreg = NULL;
- adev->uvd_ctx_wreg = NULL;
-
adev->didt_rreg = &soc21_didt_rreg;
adev->didt_wreg = &soc21_didt_wreg;
adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
- adev->uvd_ctx_rreg = NULL;
- adev->uvd_ctx_wreg = NULL;
adev->didt_rreg = NULL;
adev->didt_wreg = NULL;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
- adev->uvd_ctx_rreg = NULL;
- adev->uvd_ctx_wreg = NULL;
adev->didt_rreg = NULL;
adev->didt_wreg = NULL;
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(mmUVD_CTX_DATA);
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.uvd_ctx.lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(mmUVD_CTX_DATA, (v));
- spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.uvd_ctx.lock, flags);
}
static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
}
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
- adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
- adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
+ adev->reg.uvd_ctx.rreg = &vi_uvd_ctx_rreg;
+ adev->reg.uvd_ctx.wreg = &vi_uvd_ctx_wreg;
adev->didt_rreg = &vi_didt_rreg;
adev->didt_wreg = &vi_didt_wreg;
adev->gc_cac_rreg = &vi_gc_cac_rreg;