int req_cm_idx = rdev->pm.requested_clock_mode_index;
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (voltage->type == VOLTAGE_SW) {
/* 0xff0x are flags rather then an actual voltage */
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+ drm_dbg(ddev, "Setting: vddc: %d\n", voltage->voltage);
}
/* starting with BTC, there is one state that is used for both
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
rdev->pm.current_vddci = voltage->vddci;
- DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+ drm_dbg(ddev, "Setting: vddci: %d\n", voltage->vddci);
}
}
}
u32 pipe_offset = radeon_crtc->crtc_id * 16;
u32 tmp, arb_control3;
fixed20_12 a, b, c;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (radeon_crtc->base.enabled && num_heads && mode) {
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
!evergreen_check_latency_hiding(&wm_high) ||
(rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority a to high\n");
+ drm_dbg_kms(ddev, "force priority a to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
}
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
!evergreen_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority b to high\n");
+ drm_dbg_kms(ddev, "force priority b to high\n");
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
{
u32 tmp;
int r;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
WREG32(VM_CONTEXT1_CNTL, 0);
evergreen_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(ddev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
unsigned stream_ctrl;
unsigned fifo_ctrl;
unsigned counter = 0;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
- DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ drm_err(ddev, "invalid dig_fe %d\n", dig_fe);
return;
}
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
- DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ drm_err(ddev, "dig %d , should be enable\n", dig_fe);
return;
}
evergreen_dp_offsets[dig_fe]);
}
if (counter >= 32)
- DRM_ERROR("counter exceeds %d\n", counter);
+ drm_err(ddev, "counter exceeds %d\n", counter);
fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
+ struct drm_device *ddev = rdev_to_drm(rdev);
r = radeon_ring_lock(rdev, ring, 7);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ drm_err(ddev, "radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ drm_err(ddev, "radeon: cp failed to lock ring (%d).\n", r);
return r;
}
{
u32 reset_mask = 0;
u32 tmp;
+ struct drm_device *ddev = rdev_to_drm(rdev);
/* GRBM_STATUS */
tmp = RREG32(GRBM_STATUS);
/* Skip MC reset as it's mostly likely not hung, just busy */
if (reset_mask & RADEON_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ drm_dbg(ddev, "MC busy: 0x%08X, clearing.\n", reset_mask);
reset_mask &= ~RADEON_RESET_MC;
}
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1 = 0;
u32 thermal_int = 0;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ drm_dbg(ddev, "%s : sw int gfx\n", __func__);
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
- DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+ drm_dbg(ddev, "%s : sw int cp1\n", __func__);
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
- DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+ drm_dbg(ddev, "%s : sw int cp2\n", __func__);
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ drm_dbg(ddev, "%s : sw int gfx\n", __func__);
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
}
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
- DRM_DEBUG("r600_irq_set: sw int dma\n");
+ drm_dbg(ddev, "r600_irq_set: sw int dma\n");
dma_cntl |= TRAP_ENABLE;
}
if (rdev->family >= CHIP_CAYMAN) {
dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
- DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ drm_dbg(ddev, "r600_irq_set: sw int dma1\n");
dma_cntl1 |= TRAP_ENABLE;
}
}
if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
+ drm_dbg(ddev, "dpm thermal\n");
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
}
bool queue_thermal = false;
u32 status, addr;
const char *event_name;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ drm_dbg(ddev, "%s start: rptr %d, wptr %d\n", __func__, rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
mask = LB_D1_VLINE_INTERRUPT;
event_name = "vline";
} else {
- DRM_DEBUG("Unhandled interrupt: %d %d\n",
+ drm_dbg(ddev, "Unhandled interrupt: %d %d\n",
src_id, src_data);
break;
}
if (!(disp_int[crtc_idx] & mask)) {
- DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
+ drm_dbg(ddev, "IH: D%d %s - IH event w/o asserted irq bit?\n",
crtc_idx + 1, event_name);
}
disp_int[crtc_idx] &= ~mask;
- DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
+ drm_dbg(ddev, "IH: D%d %s\n", crtc_idx + 1, event_name);
break;
case 8: /* D1 page flip */
case 14: /* D4 page flip */
case 16: /* D5 page flip */
case 18: /* D6 page flip */
- DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ drm_dbg(ddev, "IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
if (radeon_use_pflipirq > 0)
radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
event_name = "HPD_RX";
} else {
- DRM_DEBUG("Unhandled interrupt: %d %d\n",
+ drm_dbg(ddev, "Unhandled interrupt: %d %d\n",
src_id, src_data);
break;
}
if (!(disp_int[hpd_idx] & mask))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+ drm_dbg(ddev, "IH: IH event w/o asserted irq bit?\n");
disp_int[hpd_idx] &= ~mask;
- DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
+ drm_dbg(ddev, "IH: %s%d\n", event_name, hpd_idx + 1);
break;
case 44: /* hdmi */
afmt_idx = src_data;
if (afmt_idx > 5) {
- DRM_ERROR("Unhandled interrupt: %d %d\n",
+ drm_err(ddev, "Unhandled interrupt: %d %d\n",
src_id, src_data);
break;
}
if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+ drm_dbg(ddev, "IH: IH event w/o asserted irq bit?\n");
afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
- DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+ drm_dbg(ddev, "IH: HDMI%d\n", afmt_idx + 1);
break;
case 96:
- DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+ drm_err(ddev, "SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
WREG32(SRBM_INT_ACK, 0x1);
break;
case 124: /* UVD */
- DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ drm_dbg(ddev, "IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 146:
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
- DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ drm_dbg(ddev, "IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
- DRM_DEBUG("IH: CP EOP\n");
+ drm_dbg(ddev, "IH: CP EOP\n");
if (rdev->family >= CHIP_CAYMAN) {
switch (src_data) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 224: /* DMA trap event */
- DRM_DEBUG("IH: DMA trap\n");
+ drm_dbg(ddev, "IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
+ drm_dbg(ddev, "IH: thermal low to high\n");
rdev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
+ drm_dbg(ddev, "IH: thermal high to low\n");
rdev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
case 233: /* GUI IDLE */
- DRM_DEBUG("IH: GUI idle\n");
+ drm_dbg(ddev, "IH: GUI idle\n");
break;
case 244: /* DMA trap event */
if (rdev->family >= CHIP_CAYMAN) {
- DRM_DEBUG("IH: DMA1 trap\n");
+ drm_dbg(ddev, "IH: DMA1 trap\n");
radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
}
break;
default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ drm_dbg(ddev, "Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
{
struct radeon_ring *ring;
int r;
+ struct drm_device *ddev = rdev_to_drm(rdev);
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load MC firmware!\n");
+ drm_err(ddev, "Failed to load MC firmware!\n");
return r;
}
}
rdev->rlc.cs_data = evergreen_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
- DRM_ERROR("Failed to init rlc BOs!\n");
+ drm_err(ddev, "Failed to init rlc BOs!\n");
return r;
}
}
r = r600_irq_init(rdev);
if (r) {
- DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ drm_err(ddev, "radeon: IH init failed (%d).\n", r);
radeon_irq_kms_fini(rdev);
return r;
}
r = radeon_audio_init(rdev);
if (r) {
- DRM_ERROR("radeon: audio init failed\n");
+ drm_err(ddev, "radeon: audio init failed\n");
return r;
}
int evergreen_resume(struct radeon_device *rdev)
{
int r;
+ struct drm_device *ddev = rdev_to_drm(rdev);
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
- DRM_ERROR("evergreen startup failed on resume\n");
+ drm_err(ddev, "evergreen startup failed on resume\n");
rdev->accel_working = false;
return r;
}
int evergreen_init(struct radeon_device *rdev)
{
int r;
+ struct drm_device *ddev = rdev_to_drm(rdev);
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
}
- DRM_INFO("GPU not posted. posting now...\n");
+ drm_info(ddev, "GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* init golden registers */
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ drm_err(ddev, "Failed to load firmware!\n");
return r;
}
}
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ drm_err(ddev, "Failed to load firmware!\n");
return r;
}
}
*/
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
- DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ drm_err(ddev, "radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
}
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if (radeon_pcie_gen2 == 0)
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
- DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ drm_info(ddev, "PCIE gen 2 link speeds already enabled\n");
return;
}
- DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+ drm_info(ddev, "enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {