/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
struct amdgpu_mmio_remap rmmio_remap;
- /* protects concurrent SMC based register access */
- spinlock_t smc_idx_lock;
- amdgpu_rreg_t smc_rreg;
- amdgpu_wreg_t smc_wreg;
+ /* Indirect register access blocks */
+ struct amdgpu_reg_access reg;
/* protects concurrent PCIE register access */
spinlock_t pcie_idx_lock;
amdgpu_rreg_t pcie_rreg;
#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
-#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
-#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
+#define RREG32_SMC(reg) amdgpu_reg_smc_rd32(adev, (reg))
+#define WREG32_SMC(reg, v) amdgpu_reg_smc_wr32(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
ssize_t result = 0;
int r;
- if (!adev->smc_rreg)
+ if (!adev->reg.smc.rreg)
return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
ssize_t result = 0;
int r;
- if (!adev->smc_wreg)
+ if (!adev->reg.smc.wreg)
return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
- adev->smc_rreg = &amdgpu_invalid_rreg;
- adev->smc_wreg = &amdgpu_invalid_wreg;
+ amdgpu_reg_access_init(adev);
+
adev->pcie_rreg = &amdgpu_invalid_rreg;
adev->pcie_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
return r;
spin_lock_init(&adev->mmio_idx_lock);
- spin_lock_init(&adev->smc_idx_lock);
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
+void amdgpu_reg_access_init(struct amdgpu_device *adev)
+{
+ spin_lock_init(&adev->reg.smc.lock);
+ adev->reg.smc.rreg = NULL;
+ adev->reg.smc.wreg = NULL;
+}
+
+uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg)
+{
+ if (!adev->reg.smc.rreg) {
+ dev_err_once(adev->dev, "SMC register read not supported\n");
+ return 0;
+ }
+ return adev->reg.smc.rreg(adev, reg);
+}
+
+void amdgpu_reg_smc_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ if (!adev->reg.smc.wreg) {
+ dev_err_once(adev->dev, "SMC register write not supported\n");
+ return;
+ }
+ adev->reg.smc.wreg(adev, reg, v);
+}
+
/*
* register access helper functions.
*/
#define __AMDGPU_REG_ACCESS_H__
#include <linux/types.h>
+#include <linux/spinlock.h>
struct amdgpu_device;
-/*
- * Registers read & write functions.
- */
typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device *, uint32_t);
typedef void (*amdgpu_wreg_t)(struct amdgpu_device *, uint32_t, uint32_t);
+struct amdgpu_reg_ind {
+ spinlock_t lock;
+ amdgpu_rreg_t rreg;
+ amdgpu_wreg_t wreg;
+};
+
+struct amdgpu_reg_access {
+ struct amdgpu_reg_ind smc;
+};
+
+void amdgpu_reg_access_init(struct amdgpu_device *adev);
+uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_reg_smc_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+
typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device *, uint64_t);
typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device *, uint64_t, uint32_t);
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
r = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
WREG32(mmSMC_IND_DATA_0, (v));
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
}
static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_0, 0);
WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return true;
}
{
struct amdgpu_device *adev = ip_block->adev;
- adev->smc_rreg = &cik_smc_rreg;
- adev->smc_wreg = &cik_smc_wreg;
+ adev->reg.smc.rreg = cik_smc_rreg;
+ adev->reg.smc.wreg = cik_smc_wreg;
adev->pcie_rreg = &cik_pcie_rreg;
adev->pcie_wreg = &cik_pcie_wreg;
adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
- adev->smc_rreg = NULL;
- adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
r = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
WREG32(mmSMC_IND_DATA_0, (v));
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
}
static u32 si_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->smc_rreg = &si_smc_rreg;
- adev->smc_wreg = &si_smc_wreg;
+ adev->reg.smc.rreg = si_smc_rreg;
+ adev->reg.smc.wreg = si_smc_wreg;
adev->pcie_rreg = &si_pcie_rreg;
adev->pcie_wreg = &si_pcie_wreg;
adev->pciep_rreg = &si_pciep_rreg;
struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
- adev->smc_rreg = NULL;
- adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
- adev->smc_rreg = NULL;
- adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
- adev->smc_rreg = NULL;
- adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
{
struct amdgpu_device *adev = ip_block->adev;
- adev->smc_rreg = NULL;
- adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
}
/* smu_8_0_d.h */
unsigned long flags;
u32 r;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmMP0PUB_IND_INDEX, (reg));
r = RREG32(mmMP0PUB_IND_DATA);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return r;
}
{
unsigned long flags;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmMP0PUB_IND_INDEX, (reg));
WREG32(mmMP0PUB_IND_DATA, (v));
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
}
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_11, 0);
WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return true;
}
struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU) {
- adev->smc_rreg = &cz_smc_rreg;
- adev->smc_wreg = &cz_smc_wreg;
+ adev->reg.smc.rreg = cz_smc_rreg;
+ adev->reg.smc.wreg = cz_smc_wreg;
} else {
- adev->smc_rreg = &vi_smc_rreg;
- adev->smc_wreg = &vi_smc_wreg;
+ adev->reg.smc.rreg = vi_smc_rreg;
+ adev->reg.smc.wreg = vi_smc_wreg;
}
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
addr = smc_start_address;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
}
done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}
if (ucode_size & 3)
return -EINVAL;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
ucode_size -= 4;
}
WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return 0;
}
unsigned long flags;
int ret;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
*value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}
unsigned long flags;
int ret;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}