size = *(u16 *) info->buffer.pointer;
if (size < 3) {
- DRM_INFO("ATCS buffer is too small: %zu\n", size);
+ drm_info(adev_to_drm(adev),
+ "ATCS buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, ¶ms);
if (!info) {
- DRM_ERROR("ATCS PSC update failed\n");
+ drm_err(adev_to_drm(adev), "ATCS PSC call failed\n");
return -EIO;
}
xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info),
GFP_KERNEL);
- if (!xcc_info) {
- DRM_ERROR("Failed to allocate memory for xcc info\n");
+ if (!xcc_info)
return -ENOMEM;
- }
INIT_LIST_HEAD(&xcc_info->list);
xcc_info->handle = acpi_device_handle(acpi_dev);
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
if (ret) {
- DRM_ERROR("amdgpu: failed to schedule IB.\n");
+ drm_err(adev_to_drm(adev), "failed to schedule IB.\n");
goto err_ib_sched;
}
strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
- DRM_ERROR("SMC firmware not supported\n");
+ drm_err(adev_to_drm(adev), "SMC firmware not supported\n");
return -EINVAL;
}
AMDGPU_UCODE_REQUIRED,
"%s", fw_name);
if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+ drm_err(adev_to_drm(adev),
+ "Failed to load firmware \"%s\"\n", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
return err;
}
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
if (!cgs_device) {
- DRM_ERROR("Couldn't allocate CGS device structure\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate CGS device structure\n");
return NULL;
}
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
- DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
- connector->name);
+ drm_err(connector->dev,
+ "%s: probed a monitor but no|invalid EDID\n",
+ connector->name);
ret = connector_status_connected;
} else {
amdgpu_connector->use_digital =
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
- DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+ drm_err(adev_to_drm(adev), "%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
broken_edid = true; /* defer use_digital to later */
if (router->ddc_valid || router->cd_valid) {
amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info);
if (!amdgpu_connector->router_bus)
- DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
if (is_dp_bridge) {
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
- DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
switch (connector_type) {
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
- DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
- DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
- DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
- DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
- DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
- DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "eDP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
- DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ drm_err(adev_to_drm(adev),
+ "LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ drm_err(adev_to_drm(adev),
+ "failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
AMDGPU_PTE_EXECUTABLE);
if (r) {
- DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ drm_err(adev_to_drm(adev),
+ "failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
goto error;
}
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ drm_err(adev_to_drm(adev),
+ "failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
if (r) {
- DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
+ drm_err(adev_to_drm(adev),
+ "failed to do bo_unmap on static CSA, err=%d\n", r);
goto error;
}
struct drm_sched_entity *ctx_entity;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
- DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
+ drm_err(adev_to_drm(ctx->mgr->adev),
+ "unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
}
/* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) {
- DRM_DEBUG("invalid ip instance: %d\n", instance);
+ drm_dbg(adev_to_drm(ctx->mgr->adev),
+ "invalid ip instance: %d\n", instance);
return -EINVAL;
}
if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
- DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
+ drm_dbg(adev_to_drm(ctx->mgr->adev),
+ "invalid ring: %d %d\n", hw_ip, ring);
return -EINVAL;
}
r = dma_fence_wait(other, true);
if (r < 0 && r != -ERESTARTSYS)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ drm_err(adev_to_drm(ctx->mgr->adev),
+ "AMDGPU: Error waiting for fence in ctx %p\n", ctx);
dma_fence_put(other);
return r;
idr_for_each_entry(idp, ctx, id) {
if (kref_read(&ctx->refcount) != 1) {
- DRM_ERROR("ctx %p is still alive\n", ctx);
+ drm_err(adev_to_drm(mgr->adev), "ctx %p is still alive\n", ctx);
continue;
}
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
if (r) {
- DRM_WARN("failed to preempt ring %d\n", ring->idx);
+ drm_warn(adev_to_drm(adev), "failed to preempt ring %d\n", ring->idx);
goto failure;
}
if (atomic_read(&ring->fence_drv.last_seq) !=
ring->fence_drv.sync_seq) {
- DRM_INFO("ring %d was preempted\n", ring->idx);
+ drm_info(adev_to_drm(adev), "ring %d was preempted\n", ring->idx);
amdgpu_ib_preempt_mark_partial_job(ring);
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
if (IS_ERR(ent)) {
- DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
+ drm_err(adev_to_drm(adev),
+ "unable to create amdgpu_preempt_ib debugsfs file\n");
return PTR_ERR(ent);
}
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set);
if (IS_ERR(ent)) {
- DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ drm_err(adev_to_drm(adev),
+ "unable to create amdgpu_set_sclk debugsfs file\n");
return PTR_ERR(ent);
}
r = amdgpu_debugfs_regs_init(adev);
if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
+ drm_err(adev_to_drm(adev), "registering register debugfs failed (%d).\n", r);
amdgpu_debugfs_firmware_init(adev);
amdgpu_ta_if_debugfs_init(adev);
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
-
- if (!coredump) {
- DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
+ if (!coredump)
return;
- }
coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
dev_info(
tmp_adev->dev,
- "GPU reset(%d) failed with error %d \n",
+ "GPU reset(%d) failed with error %d\n",
atomic_read(
&tmp_adev->gpu_reset_counter),
tmp_adev->asic_reset_res);
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
- DRM_INFO("This hardware requires experimental hardware support.\n"
+ dev_info(&pdev->dev, "This hardware requires experimental hardware support.\n"
"See modparam exp_hw_support\n");
return -ENODEV;
}
retry_init:
ret = drm_dev_register(ddev, flags);
if (ret == -EAGAIN && ++retry <= 3) {
- DRM_INFO("retry init %d\n", retry);
+ drm_info(adev_to_drm(adev), "retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */
msleep(5000);
goto retry_init;
struct FW_ATT_RECORD fw_att_record = {0};
if (size < sizeof(struct FW_ATT_RECORD)) {
- DRM_WARN("FW attestation input buffer not enough memory");
+ drm_warn(adev_to_drm(adev), "FW attestation input buffer not enough memory");
return -EINVAL;
}
if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
- DRM_WARN("FW attestation out of bounds");
+ drm_warn(adev_to_drm(adev), "FW attestation out of bounds");
return 0;
}
if (psp_get_fw_attestation_records_addr(&adev->psp, &records_addr)) {
- DRM_WARN("Failed to get FW attestation record address");
+ drm_warn(adev_to_drm(adev), "Failed to get FW attestation record address");
return -EINVAL;
}
false);
if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) {
- DRM_WARN("Invalid FW attestation cookie");
+ drm_warn(adev_to_drm(adev), "Invalid FW attestation cookie");
return -EINVAL;
}
- DRM_INFO("FW attestation version = 0x%X", fw_att_hdr.AttDbVersion);
+ drm_info(adev_to_drm(adev), "FW attestation version = 0x%X",
+ fw_att_hdr.AttDbVersion);
}
amdgpu_device_vram_access(adev,
/* Compute table size */
adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
- DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+ drm_info(adev_to_drm(adev), "GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
return 0;
int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
if (ret < 3) {
- DRM_ERROR("amdgpu: could not parse disable_cu\n");
+ drm_err(adev_to_drm(adev), "could not parse disable_cu\n");
return;
}
return;
if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
- DRM_WARN("smart shift update failed\n");
+ drm_warn(dev, "smart shift update failed\n");
amdgpu_acpi_fini(adev);
amdgpu_device_fini_hw(adev);
mutex_lock(&mgpu_info.mutex);
if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
- DRM_ERROR("Cannot register more gpu instance\n");
+ drm_err(adev_to_drm(adev), "Cannot register more gpu instance\n");
mutex_unlock(&mgpu_info.mutex);
return;
}
dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
- DRM_WARN("smart shift update failed\n");
+ drm_warn(dev, "smart shift update failed\n");
out:
if (r)
r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
if (r)
- DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+ drm_warn(adev_to_drm(adev),
+ "Failed to init usermode queue manager (%d), use legacy workload submission only\n",
+ r);
r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
if (r)
adev->gmc.aper_size);
}
- DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+ drm_info(adev_to_drm(adev), "Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->gmc.aper_size >> 20);
- DRM_INFO("RAM width %dbits %s\n",
+ drm_info(adev_to_drm(adev), "RAM width %dbits %s\n",
adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
return 0;
if (amdgpu_ras_query_error_status(adev, &info) != 0)
- DRM_WARN("RAS init harvest failure");
+ drm_warn(adev_to_drm(adev), "RAS init query failure");
if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
- DRM_WARN("RAS init harvest reset failure");
+ drm_warn(adev_to_drm(adev), "RAS init harvest reset failure");
return 0;
}
AMDGPU_GEM_DOMAIN_GTT,
&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr))
- DRM_WARN("Debug VRAM access will use slowpath MM access\n");
+ drm_warn(adev_to_drm(adev),
+ "Debug VRAM access will use slowpath MM access\n");
return 0;
}
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
+ drm_info(adev_to_drm(adev), "Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
- DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
+ drm_info(adev_to_drm(adev), "Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
enc_major, enc_minor, dec_minor, family_id);
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
if (amdgpu_ras_intr_triggered())
- DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+ drm_warn(adev_to_drm(adev),
+ "UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
return 0;
}
ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
output->period_ns);
if (ret_overrun != 1)
- DRM_WARN("%s: vblank timer overrun\n", __func__);
+ drm_warn(amdgpu_crtc->base.dev,
+ "%s: vblank timer overrun count: %llu\n",
+ __func__, ret_overrun);
ret = drm_crtc_handle_vblank(crtc);
/* Don't queue timer again when vblank is disabled. */
bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
- DRM_INFO("amdgpu atom DIG backlight initialized\n");
+ drm_info(adev_to_drm(adev), "ATOM DIG backlight initialized\n");
return;
backlight_device_unregister(bd);
kfree(pdata);
- DRM_INFO("amdgpu atom LVDS backlight unloaded\n");
+ drm_info(adev_to_drm(adev), "ATOM LVDS backlight unloaded\n");
}
}
uint32_t bios_0_scratch;
if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) {
- DRM_DEBUG_KMS("detect returned false \n");
+ DRM_DEBUG_KMS("detect returned false\n");
return connector_status_unknown;
}
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
- DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ drm_info(adev_to_drm(adev), "PCIE gen 3 link speeds already enabled\n");
return;
}
- DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ drm_info(adev_to_drm(adev), "enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
- DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ drm_info(adev_to_drm(adev), "PCIE gen 2 link speeds already enabled\n");
return;
}
- DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ drm_info(adev_to_drm(adev), "enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
}
if (!adev->gfx.cp_fw_write_wait)
- DRM_WARN_ONCE("CP firmware version too old, please update!");
+ drm_warn_once(adev_to_drm(adev), "CP firmware version too old, please update!");
}
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_ERROR("Illegal opcode in command stream \n");
+ DRM_ERROR("Illegal opcode in command stream\n");
gfx_v10_0_handle_priv_fault(adev, entry);
return 0;
}
adev->gfx.is_poweron = true;
if(get_gb_addr_config(adev))
- DRM_WARN("Invalid gb_addr_config !\n");
+ drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
adev->gfx.rs64_enable)
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_ERROR("Illegal opcode in command stream \n");
+ DRM_ERROR("Illegal opcode in command stream\n");
gfx_v11_0_handle_priv_fault(adev, entry);
return 0;
}
adev->gfx.is_poweron = true;
if (get_gb_addr_config(adev))
- DRM_WARN("Invalid gb_addr_config !\n");
+ drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
gfx_v12_0_config_gfx_rs64(adev);
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_ERROR("Illegal opcode in command stream \n");
+ DRM_ERROR("Illegal opcode in command stream\n");
gfx_v12_0_handle_priv_fault(adev, entry);
return 0;
}
static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
- DRM_INFO("Not implemented\n");
+ drm_info(adev_to_drm(adev), "Not implemented\n");
}
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
if (adev->gfx.ce_feature_version >= 46 &&
adev->gfx.pfp_feature_version >= 46) {
adev->virt.chained_ib_support = true;
- DRM_INFO("Chained IB support enabled!\n");
+ drm_info(adev_to_drm(adev), "Chained IB support enabled!\n");
} else
adev->virt.chained_ib_support = false;
gfx_v8_0_select_se_sh(adev, 0xffffffff,
0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
i, j);
return;
}
switch (enc) {
case 0:
- DRM_INFO("SQ general purpose intr detected:"
+ drm_info(adev_to_drm(adev), "SQ general purpose intr detected:"
"se_id %d, immed_overflow %d, host_reg_overflow %d,"
"host_cmd_overflow %d, cmd_timestamp %d,"
"reg_timestamp %d, thread_trace_buff_full %d,"
else
sprintf(type, "EDC/ECC error");
- DRM_INFO(
+ drm_info(adev_to_drm(adev),
"SQ %s detected: "
"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
"trap %s, sq_ed_info.source %s.\n",
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46)))
- DRM_WARN_ONCE("CP firmware version too old, please update!");
+ drm_warn_once(adev_to_drm(adev),
+ "CP firmware version too old, please update!");
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 0, 1):
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
- DRM_INFO("fix gfx.config for vega12\n");
+ drm_info(adev_to_drm(adev), "fix gfx.config for vega12\n");
break;
case IP_VERSION(9, 4, 0):
adev->gfx.ras = &gfx_v9_0_ras;
amdgpu_gfx_select_se_sh(adev, 0xffffffff,
0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
i, j);
return;
}
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
if(rlc_ucode_ver == 0x108) {
- DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
+ drm_info(adev_to_drm(adev), "Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
rlc_ucode_ver, adev->gfx.rlc_fw_version);
/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
* default is 0x9C4 to create a 100us interval */
*/
if (adev->flags & AMD_IS_APU &&
adev->in_s3 && !pm_resume_via_firmware()) {
- DRM_INFO("Will skip the CSB packet resubmit\n");
+ drm_info(adev_to_drm(adev), "Will skip the CSB packet resubmit\n");
return 0;
}
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
return r;
}
if (i >= adev->usec_timeout) {
r = -EINVAL;
- DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
+ drm_warn(adev_to_drm(adev), "ring %d timeout to preempt ib\n", ring->idx);
}
/*reset the CP_VMID_PREEMPT after trailing fence*/
0xffffffff,
0xffffffff, xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
i, j);
return;
}
rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
if (rlc_ucode_ver == 0x108) {
dev_info(adev->dev,
- "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
+ "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
rlc_ucode_ver, adev->gfx.rlc_fw_version);
/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
* default is 0x9C4 to create a 100us interval */
}
if (!time)
- DRM_WARN("failed to wait for GRBM(EA) idle\n");
+ drm_warn(adev_to_drm(adev), "failed to wait for GRBM(EA) idle\n");
}
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
if (!adev->in_s0ix)
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
adev->mmhub.funcs->set_fault_enable_default(adev, value);
adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
- dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
- dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
tmp = RREG32(mmCONFIG_MEMSIZE);
/* some boards may have garbage in the upper 16 bits */
if (tmp & 0xffff0000) {
- DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ drm_info(adev_to_drm(adev), "Probably bad vram size: 0x%08x\n", tmp);
if (tmp & 0xffff)
tmp &= 0xffff;
}
gmc_v8_0_set_fault_enable_default(adev, true);
gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
*/
mtype_local = MTYPE_RW;
if (amdgpu_mtype_local == 1) {
- DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
+ drm_info_once(adev_to_drm(adev), "Using MTYPE_NC for local memory\n");
mtype_local = MTYPE_NC;
} else if (amdgpu_mtype_local == 2) {
- DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
+ drm_info_once(adev_to_drm(adev), "Using MTYPE_CC for local memory\n");
mtype_local = MTYPE_CC;
} else {
- DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
+ drm_info_once(adev_to_drm(adev), "Using MTYPE_RW for local memory\n");
}
is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
num_possible_nodes() <= 1) ||
if (r)
return r;
- DRM_INFO("PCIE GART of %uM enabled.\n",
+ drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled.\n",
(unsigned int)(adev->gmc.gart_size >> 20));
if (adev->gmc.pdb0_bo)
- DRM_INFO("PDB0 located at 0x%016llX\n",
+ drm_info(adev_to_drm(adev), "PDB0 located at 0x%016llX\n",
(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
- DRM_INFO("PTB located at 0x%016llX\n",
+ drm_info(adev_to_drm(adev), "PTB located at 0x%016llX\n",
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
return 0;
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
- DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
+ drm_err(adev_to_drm(adev), "failed to disable JPEG power gating\n");
return r;
}
}
break;
default:
- DRM_ERROR("unsupported misc op (%d) \n", input->op);
+ drm_err(adev_to_drm(mes->adev), "unsupported misc op (%d)\n", input->op);
return -EINVAL;
}
break;
default:
- DRM_ERROR("unsupported misc op (%d) \n", input->op);
+ DRM_ERROR("unsupported misc op (%d)\n", input->op);
return -EINVAL;
}
timeout -= 5;
} while (timeout > 1);
- dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+ dev_err(adev->dev,
+ "Doesn't get TRN_MSG_ACK from pf in %d msec\n",
+ NV_MAILBOX_POLL_ACK_TIMEDOUT);
return -ETIME;
}
static int psp_v10_0_mode1_reset(struct psp_context *psp)
{
- DRM_INFO("psp mode 1 reset not supported now! \n");
+ drm_info(adev_to_drm(psp->adev), "psp mode 1 reset not supported now!\n");
return -EINVAL;
}
MBOX_TOS_READY_MASK, 0);
if (ret) {
- DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n");
return -EINVAL;
}
MBOX_TOS_READY_MASK, 0);
if (ret) {
- DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n");
return -EINVAL;
}
0);
if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
+ drm_info(adev_to_drm(adev), "psp mode 1 reset failed!\n");
return -EINVAL;
}
- DRM_INFO("psp mode1 reset succeed \n");
+ drm_info(adev_to_drm(adev), "psp mode1 reset succeed\n");
return 0;
}
ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
- DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n");
return -EINVAL;
}
ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
+ drm_info(adev_to_drm(adev), "psp mode 1 reset failed!\n");
return -EINVAL;
}
- DRM_INFO("psp mode1 reset succeed \n");
+ drm_info(adev_to_drm(adev), "psp mode1 reset succeed\n");
return 0;
}
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
- DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ drm_info(adev_to_drm(adev), "PCIE gen 3 link speeds already enabled\n");
return;
}
- DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ drm_info(adev_to_drm(adev), "enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
- DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ drm_info(adev_to_drm(adev), "PCIE gen 2 link speeds already enabled\n");
return;
}
- DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ drm_info(adev_to_drm(adev), "enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block)
{
- DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+ drm_info(adev_to_drm(ip_block->adev), "si_dma_soft_reset --- not implemented !!!!!!!\n");
return 0;
}
if (REG_GET_FIELD(reg, CKSVII2C_IC_INTR_STAT, R_TX_ABRT) == 1) {
reg_c_tx_abrt_source = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TX_ABRT_SOURCE);
- DRM_INFO("TX was terminated, IC_TX_ABRT_SOURCE val is:%x", reg_c_tx_abrt_source);
+ drm_info(adev_to_drm(adev),
+ "TX was terminated, IC_TX_ABRT_SOURCE val is:%x",
+ reg_c_tx_abrt_source);
/* Check for stop due to NACK */
if (REG_GET_FIELD(reg_c_tx_abrt_source,
uint8_t data[6] = {0xf, 0, 0xde, 0xad, 0xbe, 0xef};
- DRM_INFO("Begin");
+ drm_info(adev_to_drm(adev), "Begin");
if (!smu_v11_0_i2c_bus_lock(control)) {
DRM_ERROR("Failed to lock the bus!.");
smu_v11_0_i2c_bus_unlock(control);
- DRM_INFO("End");
+ drm_info(adev_to_drm(adev), "End");
return true;
}
#endif
if ((adev->asic_type == CHIP_ARCTURUS) &&
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- DRM_WARN("Fail to disable DF-Cstate.\n");
+ drm_warn(adev_to_drm(adev),
+ "Fail to disable DF-Cstate.\n");
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
if ((adev->asic_type == CHIP_ARCTURUS) &&
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- DRM_WARN("Fail to enable DF-Cstate\n");
+ drm_warn(adev_to_drm(adev), "Fail to enable DF-Cstate\n");
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
if ((adev->asic_type == CHIP_ARCTURUS) &&
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- DRM_WARN("Fail to disable DF-Cstate.\n");
+ drm_warn(adev_to_drm(adev), "Fail to disable DF-Cstate.\n");
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
if ((adev->asic_type == CHIP_ARCTURUS) &&
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- DRM_WARN("Fail to enable DF-Cstate\n");
+ drm_warn(adev_to_drm(adev), "Fail to enable DF-Cstate\n");
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
done:
if (!r)
- DRM_INFO("UVD initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "UVD initialized successfully.\n");
return r;
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
- DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
+ drm_info(adev_to_drm(adev), "Cannot get clockgating state when UVD is powergated.\n");
goto out;
}
adev->uvd.inst->irq.num_types = 1;
adev->uvd.num_enc_rings = 0;
- DRM_INFO("UVD ENC is disabled\n");
+ drm_info(adev_to_drm(adev), "UVD ENC is disabled\n");
}
ring = &adev->uvd.inst->ring;
done:
if (!r) {
if (uvd_v6_0_enc_support(adev))
- DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "UVD and UVD ENC initialized successfully.\n");
else
- DRM_INFO("UVD initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "UVD initialized successfully.\n");
}
return r;
data = RREG32_SMC(ixCURRENT_PG_STATUS);
if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
- DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
+ drm_info(adev_to_drm(adev), "Cannot get clockgating state when UVD is powergated.\n");
goto out;
}
{
if (adev->asic_type >= CHIP_POLARIS10) {
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
- DRM_INFO("UVD is enabled in VM mode\n");
+ drm_info(adev_to_drm(adev), "UVD is enabled in VM mode\n");
} else {
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
- DRM_INFO("UVD is enabled in physical mode\n");
+ drm_info(adev_to_drm(adev), "UVD is enabled in physical mode\n");
}
}
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
- DRM_INFO("UVD ENC is enabled in VM mode\n");
+ drm_info(adev_to_drm(adev), "UVD ENC is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
- DRM_INFO("PSP loading UVD firmware\n");
+ drm_info(adev_to_drm(adev), "PSP loading UVD firmware\n");
}
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
}
done:
if (!r)
- DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "UVD and UVD ENC initialized successfully.\n");
return r;
}
continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
- DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
+ drm_info(adev_to_drm(adev), "UVD(%d) is enabled in VM mode\n", i);
}
}
adev->uvd.inst[j].ring_enc[i].me = j;
}
- DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
+ drm_info(adev_to_drm(adev), "UVD(%d) ENC is enabled in VM mode\n", j);
}
}
if (vce_v2_0_lmi_clean(adev)) {
- DRM_INFO("VCE is not idle \n");
+ drm_info(adev_to_drm(adev), "VCE is not idle\n");
return 0;
}
return -EINVAL;
if (vce_v2_0_wait_for_idle(ip_block)) {
- DRM_INFO("VCE is busy, Can't set clock gating");
+ drm_info(adev_to_drm(adev), "VCE is busy, Can't set clock gating");
return 0;
}
return r;
}
- DRM_INFO("VCE initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "VCE initialized successfully.\n");
return 0;
}
return r;
}
- DRM_INFO("VCE initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "VCE initialized successfully.\n");
return 0;
}
data = RREG32_SMC(ixCURRENT_PG_STATUS);
if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
- DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
+ drm_info(adev_to_drm(adev), "Cannot get clockgating state when VCE is powergated.\n");
goto out;
}
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
- DRM_INFO("VCE enabled in VM mode\n");
+ drm_info(adev_to_drm(adev), "VCE enabled in VM mode\n");
} else {
for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
adev->vce.ring[i].me = i;
}
- DRM_INFO("VCE enabled in physical mode\n");
+ drm_info(adev_to_drm(adev), "VCE enabled in physical mode\n");
}
}
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- DRM_INFO("PSP loading VCE firmware\n");
+ drm_info(adev_to_drm(adev), "PSP loading VCE firmware\n");
} else {
r = amdgpu_vce_resume(adev);
if (r)
return r;
}
- DRM_INFO("VCE initialized successfully.\n");
+ drm_info(adev_to_drm(adev), "VCE initialized successfully.\n");
return 0;
}
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
- DRM_INFO("VCE enabled in VM mode\n");
+ drm_info(adev_to_drm(adev), "VCE enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
continue;
if (idx >= AMDGPU_DM_MAX_CRTC) {
- DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
+ drm_warn(adev_to_drm(adev),
+ "%s connected connectors exceed max crtc\n",
+ __func__);
mutex_unlock(&ddev->mode_config.mutex);
return;
}
return -EFAULT;
}
- /* check number of parameters. isspace could not differ space and \n */
+ /* check number of parameters. isspace could not differ space and\n */
while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
/* skip space*/
while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
struct ta_dtm_shared_memory *dtm_cmd;
if (!psp->dtm_context.context.initialized) {
- DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
+ drm_info(adev_to_drm(psp->adev),
+ "Failed to enable ASSR, DTM TA is not initialized.");
return false;
}
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
- DRM_INFO("Failed to enable ASSR");
+ drm_info(adev_to_drm(psp->adev),
+ "Failed to enable ASSR");
return false;
}
sysfs_bin_attr_init(&hdcp_work[0].attr);
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
- DRM_WARN("Failed to create device file hdcp_srm");
+ drm_warn(adev_to_drm(adev), "Failed to create device file hdcp_srm\n");
return hdcp_work;
}
link->panel_mode = panel_mode;
- DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
- "eDP panel mode enabled: %d \n",
- link->link_index,
- link->dpcd_caps.panel_mode_edp,
- panel_mode_edp);
+ DC_LOG_DETECTION_DP_CAPS("%d eDP panel mode supported: %d, enabled: %d\n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
}
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ drm_err(adev_to_drm(adev), "DPM %s uvd failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ drm_err(adev_to_drm(adev), "DPM %s vcn failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ drm_err(adev_to_drm(adev), "DPM %s vce failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ drm_err(adev_to_drm(adev), "Dpm %s jpeg failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
- DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
- enable ? "enable" : "disable", ret);
+ drm_err(adev_to_drm(adev), "DPM %s vpe failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
}
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- DRM_INFO("amdgpu: dpm initialized\n");
-
+ drm_info(adev_to_drm(adev), "si dpm initialized\n");
return 0;
dpm_failed:
si_dpm_fini(adev);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
+ drm_err(adev_to_drm(adev), "dpm initialization failed\n");
return ret;
}
&adev->pm.smu_prv_buffer,
&gpu_addr,
&cpu_ptr)) {
- DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
+ drm_err(adev_to_drm(adev), "failed to create smu prv buffer\n");
return;
}
if (r) {
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
adev->pm.smu_prv_buffer = NULL;
- DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
+ drm_err(adev_to_drm(adev), "failed to notify SMU buffer address\n");
}
}
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
- pr_debug("Error in phm_get_clock_info \n");
+ drm_err(adev_to_drm(hwmgr->adev),
+ "Error in phm_get_clock_info\n");
return -EINVAL;
}