]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'drm-misc-next-2022-06-17' of git://anongit.freedesktop.org/drm/drm-misc...
[thirdparty/linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 4515630fb9d23ca9a6d926c81ab76b06d00cae15..eb6c55e9f6b50e6585253becab700c457560ccca 100644 (file)
@@ -85,7 +85,6 @@
 #include <drm/drm_audio_component.h>
 #include <drm/drm_gem_atomic_helper.h>
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
 
 #include "dcn/dcn_1_0_offset.h"
@@ -94,7 +93,6 @@
 #include "vega10_ip_offset.h"
 
 #include "soc15_common.h"
-#endif
 
 #include "modules/inc/mod_freesync.h"
 #include "modules/power/power_helpers.h"
@@ -605,7 +603,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 /**
  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
@@ -773,7 +770,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
 
                do {
                        dc_stat_get_dmub_notification(adev->dm.dc, &notify);
-                       if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+                       if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
                                DRM_ERROR("DM: notify type %d invalid!", notify.type);
                                continue;
                        }
@@ -829,7 +826,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
        if (count > DMUB_TRACE_MAX_READ)
                DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
 }
-#endif /* CONFIG_DRM_AMD_DC_DCN */
 
 static int dm_set_clockgating_state(void *handle,
                  enum amd_clockgating_state state)
@@ -1127,9 +1123,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        switch (adev->ip_versions[DCE_HWIP][0]) {
        case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
                hw_params.dpia_supported = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
-#endif
                break;
        default:
                break;
@@ -1191,7 +1185,6 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
        }
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
        uint64_t pt_base;
@@ -1246,8 +1239,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        pa_config->is_hvm_enabled = 0;
 
 }
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
 static void vblank_control_worker(struct work_struct *work)
 {
        struct vblank_control_work *vblank_work =
@@ -1284,8 +1276,6 @@ static void vblank_control_worker(struct work_struct *work)
        kfree(vblank_work);
 }
 
-#endif
-
 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
 {
        struct hpd_rx_irq_offload_work *offload_work;
@@ -1412,9 +1402,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        spin_lock_init(&adev->dm.vblank_lock);
-#endif
 
        if(amdgpu_dm_irq_init(adev)) {
                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
@@ -1507,12 +1495,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
                init_data.flags.edp_no_power_sequencing = true;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN
        if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
        if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
-#endif
 
        init_data.flags.seamless_boot_edp_requested = false;
 
@@ -1568,7 +1554,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
                struct dc_phy_addr_space_config pa_config;
 
@@ -1577,7 +1562,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                // Call the DC init_memory func
                dc_setup_system_context(adev->dm.dc, &pa_config);
        }
-#endif
 
        adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
        if (!adev->dm.freesync_module) {
@@ -1589,14 +1573,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        amdgpu_dm_init_color_mod();
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (adev->dm.dc->caps.max_links > 0) {
                adev->dm.vblank_control_workqueue =
                        create_singlethread_workqueue("dm_vblank_control_workqueue");
                if (!adev->dm.vblank_control_workqueue)
                        DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
        }
-#endif
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
@@ -1628,7 +1610,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                }
 
                amdgpu_dm_outbox_init(adev);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
                        dmub_aux_setconfig_callback, false)) {
                        DRM_ERROR("amdgpu: fail to register dmub aux callback");
@@ -1642,7 +1623,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                        DRM_ERROR("amdgpu: fail to register dmub hpd callback");
                        goto error;
                }
-#endif /* CONFIG_DRM_AMD_DC_DCN */
        }
 
        if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1689,12 +1669,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 {
        int i;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (adev->dm.vblank_control_workqueue) {
                destroy_workqueue(adev->dm.vblank_control_workqueue);
                adev->dm.vblank_control_workqueue = NULL;
        }
-#endif
 
        for (i = 0; i < adev->dm.display_indexes_num; i++) {
                drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
@@ -2405,9 +2383,7 @@ static int dm_suspend(void *handle)
        if (amdgpu_in_reset(adev)) {
                mutex_lock(&dm->dc_lock);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                dc_allow_idle_optimizations(adev->dm.dc, false);
-#endif
 
                dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
 
@@ -3560,7 +3536,6 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
        return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 /* Register IRQ sources and initialize IRQ callbacks */
 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 {
@@ -3749,7 +3724,6 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
 
        return 0;
 }
-#endif
 
 /*
  * Acquires the lock for the atomic state object and returns
@@ -4253,7 +4227,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* Use Outbox interrupt */
        switch (adev->ip_versions[DCE_HWIP][0]) {
        case IP_VERSION(3, 0, 0):
@@ -4286,7 +4259,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        break;
                }
        }
-#endif
 
        /* Disable vblank IRQs aggressively for power-saving. */
        adev_to_drm(adev)->vblank_disable_immediate = true;
@@ -4382,7 +4354,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
                break;
        default:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                switch (adev->ip_versions[DCE_HWIP][0]) {
                case IP_VERSION(1, 0, 0):
                case IP_VERSION(1, 0, 1):
@@ -4408,7 +4379,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                                        adev->ip_versions[DCE_HWIP][0]);
                        goto fail;
                }
-#endif
                break;
        }
 
@@ -4557,7 +4527,7 @@ static int dm_early_init(void *handle)
                adev->mode_info.num_dig = 6;
                break;
        default:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
                switch (adev->ip_versions[DCE_HWIP][0]) {
                case IP_VERSION(2, 0, 2):
                case IP_VERSION(3, 0, 0):
@@ -4594,7 +4564,6 @@ static int dm_early_init(void *handle)
                                        adev->ip_versions[DCE_HWIP][0]);
                        return -EINVAL;
                }
-#endif
                break;
        }
 
@@ -5413,17 +5382,19 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 
 static void
 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
-                              bool *per_pixel_alpha, bool *global_alpha,
-                              int *global_alpha_value)
+                              bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+                              bool *global_alpha, int *global_alpha_value)
 {
        *per_pixel_alpha = false;
+       *pre_multiplied_alpha = true;
        *global_alpha = false;
        *global_alpha_value = 0xff;
 
        if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
                return;
 
-       if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+       if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
+               plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
                static const uint32_t alpha_formats[] = {
                        DRM_FORMAT_ARGB8888,
                        DRM_FORMAT_RGBA8888,
@@ -5438,6 +5409,9 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
                                break;
                        }
                }
+
+               if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+                       *pre_multiplied_alpha = false;
        }
 
        if (plane_state->alpha < 0xffff) {
@@ -5600,7 +5574,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                return ret;
 
        fill_blending_from_plane_state(
-               plane_state, &plane_info->per_pixel_alpha,
+               plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
                &plane_info->global_alpha, &plane_info->global_alpha_value);
 
        return 0;
@@ -5647,6 +5621,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->tiling_info = plane_info.tiling_info;
        dc_plane_state->visible = plane_info.visible;
        dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+       dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
        dc_plane_state->global_alpha = plane_info.global_alpha;
        dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
        dc_plane_state->dcc = plane_info.dcc;
@@ -6648,10 +6623,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct amdgpu_display_manager *dm = &adev->dm;
        struct vblank_control_work *work;
-#endif
        int rc = 0;
 
        if (enable) {
@@ -6674,7 +6647,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
        if (amdgpu_in_reset(adev))
                return 0;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dm->vblank_control_workqueue) {
                work = kzalloc(sizeof(*work), GFP_ATOMIC);
                if (!work)
@@ -6692,7 +6664,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
 
                queue_work(dm->vblank_control_workqueue, &work->work);
        }
-#endif
 
        return 0;
 }
@@ -7951,7 +7922,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
        if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
            plane_cap && plane_cap->per_pixel_alpha) {
                unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
-                                         BIT(DRM_MODE_BLEND_PREMULTI);
+                                         BIT(DRM_MODE_BLEND_PREMULTI) |
+                                         BIT(DRM_MODE_BLEND_COVERAGE);
 
                drm_plane_create_alpha_property(plane);
                drm_plane_create_blend_mode_property(plane, blend_caps);
@@ -9361,14 +9333,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        /* Update the planes if changed or disable if we don't have any. */
        if ((planes_count || acrtc_state->active_planes == 0) &&
                acrtc_state->stream) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                /*
                 * If PSR or idle optimizations are enabled then flush out
                 * any pending work before hardware programming.
                 */
                if (dm->vblank_control_workqueue)
                        flush_workqueue(dm->vblank_control_workqueue);
-#endif
 
                bundle->stream_update.stream = acrtc_state->stream;
                if (new_pcrtc_state->mode_changed) {
@@ -9706,21 +9676,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        if (dc_state) {
                /* if there mode set or reset, disable eDP PSR */
                if (mode_set_reset_required) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (dm->vblank_control_workqueue)
                                flush_workqueue(dm->vblank_control_workqueue);
-#endif
+
                        amdgpu_dm_psr_disable_all(dm);
                }
 
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               /* Allow idle optimization when vblank count is 0 for display off */
-               if (dm->active_vblank_irq_count == 0)
-                   dc_allow_idle_optimizations(dm->dc,true);
-#endif
+
+               /* Allow idle optimization when vblank count is 0 for display off */
+               if (dm->active_vblank_irq_count == 0)
+                       dc_allow_idle_optimizations(dm->dc, true);
                mutex_unlock(&dm->dc_lock);
        }