From: Greg Kroah-Hartman Date: Wed, 12 Sep 2018 18:43:20 +0000 (+0200) Subject: 4.18-stable patches X-Git-Tag: v4.4.156~23 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a4154c092bd367ae270be85a080e50518db9fb2c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.18-stable patches added patches: drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch drm-amdgpu-keep-track-of-amount-of-pinned-cpu-visible-vram.patch drm-amdgpu-make-pin_size-values-atomic.patch drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch drm-i915-set-dp-main-stream-attribute-for-color-range-on-ddi-platforms.patch x86-tsc-prevent-result-truncation-on-32bit.patch --- diff --git a/queue-4.18/drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch b/queue-4.18/drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch new file mode 100644 index 00000000000..6e352c20766 --- /dev/null +++ b/queue-4.18/drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch @@ -0,0 +1,39 @@ +From 456607d816d89a442a3d5ec98b02c8bc950b5228 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Thu, 19 Jul 2018 17:38:18 +0200 +Subject: drm/amdgpu: Don't warn on destroying a pinned BO +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michel Dänzer + +commit 456607d816d89a442a3d5ec98b02c8bc950b5228 upstream. + +The warning turned out to be not so useful, as BO destruction tends to +be deferred to a workqueue. + +Also, we should be preventing any damage from this now, so not really +important anymore to fix code doing this. + +Acked-by: Alex Deucher +Tested-by: Mike Lothian +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -76,7 +76,7 @@ static void amdgpu_ttm_bo_destroy(struct + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + +- if (WARN_ON_ONCE(bo->pin_count > 0)) ++ if (bo->pin_count > 0) + amdgpu_bo_subtract_pin_size(bo); + + if (bo->kfd_bo) diff --git a/queue-4.18/drm-amdgpu-keep-track-of-amount-of-pinned-cpu-visible-vram.patch b/queue-4.18/drm-amdgpu-keep-track-of-amount-of-pinned-cpu-visible-vram.patch new file mode 100644 index 00000000000..a37a7172627 --- /dev/null +++ b/queue-4.18/drm-amdgpu-keep-track-of-amount-of-pinned-cpu-visible-vram.patch @@ -0,0 +1,142 @@ +From ddc21af4d0f37f42b33c54cb69b215997fe5b082 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Wed, 11 Jul 2018 12:06:31 +0200 +Subject: drm/amdgpu: Keep track of amount of pinned CPU visible VRAM +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michel Dänzer + +commit ddc21af4d0f37f42b33c54cb69b215997fe5b082 upstream. + +Instead of CPU invisible VRAM. Preparation for the following, no +functional change intended. + +v2: +* Also change amdgpu_vram_mgr_bo_invisible_size to + amdgpu_vram_mgr_bo_visible_size, allowing further simplification + (Christian König) + +Cc: stable@vger.kernel.org +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + + +--- + drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ++--- + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 ++-- + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 20 ++++++++------------ + 5 files changed, 14 insertions(+), 19 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +@@ -1580,7 +1580,7 @@ struct amdgpu_device { + + /* tracking pinned memory */ + u64 vram_pin_size; +- u64 invisible_pin_size; ++ u64 visible_pin_size; + u64 gart_pin_size; + + /* amdkfd interface */ +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +@@ -497,7 +497,7 @@ static int amdgpu_info_ioctl(struct drm_ + vram_gtt.vram_size = adev->gmc.real_vram_size; + vram_gtt.vram_size -= adev->vram_pin_size; + vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size; +- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); ++ vram_gtt.vram_cpu_accessible_size -= adev->visible_pin_size; + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size *= PAGE_SIZE; + vram_gtt.gtt_size -= adev->gart_pin_size; +@@ -518,8 +518,7 @@ static int amdgpu_info_ioctl(struct drm_ + mem.cpu_accessible_vram.total_heap_size = + adev->gmc.visible_vram_size; + mem.cpu_accessible_vram.usable_heap_size = +- adev->gmc.visible_vram_size - +- (adev->vram_pin_size - adev->invisible_pin_size); ++ adev->gmc.visible_vram_size - adev->visible_pin_size; + mem.cpu_accessible_vram.heap_usage = + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + mem.cpu_accessible_vram.max_allocation = +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -762,7 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdg + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + adev->vram_pin_size += amdgpu_bo_size(bo); +- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); ++ adev->visible_pin_size += amdgpu_vram_mgr_bo_visible_size(bo); + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { + adev->gart_pin_size += amdgpu_bo_size(bo); + } +@@ -792,7 +792,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + adev->vram_pin_size -= amdgpu_bo_size(bo); +- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); ++ adev->visible_pin_size -= amdgpu_vram_mgr_bo_visible_size(bo); + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { + adev->gart_pin_size -= amdgpu_bo_size(bo); + } +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct + uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); + int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); + +-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); ++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); + uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); + uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(stru + } + + /** +- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size ++ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size + * + * @bo: &amdgpu_bo buffer object (must be in VRAM) + * + * Returns: +- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. ++ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. + */ +-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) ++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) + { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_mem_reg *mem = &bo->tbo.mem; + struct drm_mm_node *nodes = mem->mm_node; + unsigned pages = mem->num_pages; +- u64 usage = 0; ++ u64 usage; + + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) +- return 0; ++ return amdgpu_bo_size(bo); + + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) +- return amdgpu_bo_size(bo); ++ return 0; + +- while (nodes && pages) { +- usage += nodes->size << PAGE_SHIFT; +- usage -= amdgpu_vram_mgr_vis_size(adev, nodes); +- pages -= nodes->size; +- ++nodes; +- } ++ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) ++ usage += amdgpu_vram_mgr_vis_size(adev, nodes); + + return usage; + } diff --git a/queue-4.18/drm-amdgpu-make-pin_size-values-atomic.patch b/queue-4.18/drm-amdgpu-make-pin_size-values-atomic.patch new file mode 100644 index 00000000000..0168c68bf06 --- /dev/null +++ b/queue-4.18/drm-amdgpu-make-pin_size-values-atomic.patch @@ -0,0 +1,143 @@ +From a5ccfe5c20740f2fbf00291490cdf8d2373ec255 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Wed, 11 Jul 2018 12:00:40 +0200 +Subject: drm/amdgpu: Make pin_size values atomic +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michel Dänzer + +commit a5ccfe5c20740f2fbf00291490cdf8d2373ec255 upstream. + +Concurrent execution of the non-atomic arithmetic could result in +completely bogus values. + +v2: +* Rebased on v2 of the previous patch + +Cc: stable@vger.kernel.org +Bugzilla: https://bugs.freedesktop.org/106872 +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++--- + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 22 +++++++++++----------- + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 14 ++++++++------ + 4 files changed, 23 insertions(+), 21 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +@@ -1579,9 +1579,9 @@ struct amdgpu_device { + DECLARE_HASHTABLE(mn_hash, 7); + + /* tracking pinned memory */ +- u64 vram_pin_size; +- u64 visible_pin_size; +- u64 gart_pin_size; ++ atomic64_t vram_pin_size; ++ atomic64_t visible_pin_size; ++ atomic64_t gart_pin_size; + + /* amdkfd interface */ + struct kfd_dev *kfd; +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_ + return; + } + +- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size; ++ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); + used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +@@ -494,13 +494,13 @@ static int amdgpu_info_ioctl(struct drm_ + case AMDGPU_INFO_VRAM_GTT: { + struct drm_amdgpu_info_vram_gtt vram_gtt; + +- vram_gtt.vram_size = adev->gmc.real_vram_size; +- vram_gtt.vram_size -= adev->vram_pin_size; +- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size; +- vram_gtt.vram_cpu_accessible_size -= adev->visible_pin_size; ++ vram_gtt.vram_size = adev->gmc.real_vram_size - ++ atomic64_read(&adev->vram_pin_size); ++ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - ++ atomic64_read(&adev->visible_pin_size); + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size *= PAGE_SIZE; +- vram_gtt.gtt_size -= adev->gart_pin_size; ++ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); + return copy_to_user(out, &vram_gtt, + min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; + } +@@ -509,16 +509,16 @@ static int amdgpu_info_ioctl(struct drm_ + + memset(&mem, 0, sizeof(mem)); + mem.vram.total_heap_size = adev->gmc.real_vram_size; +- mem.vram.usable_heap_size = +- adev->gmc.real_vram_size - adev->vram_pin_size; ++ mem.vram.usable_heap_size = adev->gmc.real_vram_size - ++ atomic64_read(&adev->vram_pin_size); + mem.vram.heap_usage = + amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; + + mem.cpu_accessible_vram.total_heap_size = + adev->gmc.visible_vram_size; +- mem.cpu_accessible_vram.usable_heap_size = +- adev->gmc.visible_vram_size - adev->visible_pin_size; ++ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - ++ atomic64_read(&adev->visible_pin_size); + mem.cpu_accessible_vram.heap_usage = + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + mem.cpu_accessible_vram.max_allocation = +@@ -526,8 +526,8 @@ static int amdgpu_info_ioctl(struct drm_ + + mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; + mem.gtt.total_heap_size *= PAGE_SIZE; +- mem.gtt.usable_heap_size = mem.gtt.total_heap_size +- - adev->gart_pin_size; ++ mem.gtt.usable_heap_size = mem.gtt.total_heap_size - ++ atomic64_read(&adev->gart_pin_size); + mem.gtt.heap_usage = + amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); + mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -761,10 +761,11 @@ int amdgpu_bo_pin_restricted(struct amdg + + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { +- adev->vram_pin_size += amdgpu_bo_size(bo); +- adev->visible_pin_size += amdgpu_vram_mgr_bo_visible_size(bo); ++ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); ++ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), ++ &adev->visible_pin_size); + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { +- adev->gart_pin_size += amdgpu_bo_size(bo); ++ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size); + } + + error: +@@ -791,10 +792,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo + return 0; + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { +- adev->vram_pin_size -= amdgpu_bo_size(bo); +- adev->visible_pin_size -= amdgpu_vram_mgr_bo_visible_size(bo); ++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); ++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), ++ &adev->visible_pin_size); + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { +- adev->gart_pin_size -= amdgpu_bo_size(bo); ++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); + } + + for (i = 0; i < bo->placement.num_placement; i++) { diff --git a/queue-4.18/drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch b/queue-4.18/drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch new file mode 100644 index 00000000000..fe9f1a4b46f --- /dev/null +++ b/queue-4.18/drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch @@ -0,0 +1,80 @@ +From 15e6b76880e65be24250e30986084b5569b7a06f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Wed, 11 Jul 2018 12:42:55 +0200 +Subject: drm/amdgpu: Warn and update pin_size values when destroying a pinned BO +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michel Dänzer + +commit 15e6b76880e65be24250e30986084b5569b7a06f upstream. + +This shouldn't happen, but if it does, we'll get a backtrace of the +caller, and update the pin_size values as needed. + +v2: +* Check bo->pin_count instead of placement flags (Christian König) + +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 32 ++++++++++++++++++++++------- + 1 file changed, 25 insertions(+), 7 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -50,11 +50,35 @@ static bool amdgpu_need_backup(struct am + return true; + } + ++/** ++ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting ++ * ++ * @bo: &amdgpu_bo buffer object ++ * ++ * This function is called when a BO stops being pinned, and updates the ++ * &amdgpu_device pin_size values accordingly. ++ */ ++static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) ++{ ++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); ++ ++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { ++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); ++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), ++ &adev->visible_pin_size); ++ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { ++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); ++ } ++} ++ + static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) + { + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + ++ if (WARN_ON_ONCE(bo->pin_count > 0)) ++ amdgpu_bo_subtract_pin_size(bo); ++ + if (bo->kfd_bo) + amdgpu_amdkfd_unreserve_system_memory_limit(bo); + +@@ -791,13 +815,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo + if (bo->pin_count) + return 0; + +- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { +- atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); +- atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), +- &adev->visible_pin_size); +- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { +- atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); +- } ++ amdgpu_bo_subtract_pin_size(bo); + + for (i = 0; i < bo->placement.num_placement; i++) { + bo->placements[i].lpfn = 0; diff --git a/queue-4.18/drm-i915-set-dp-main-stream-attribute-for-color-range-on-ddi-platforms.patch b/queue-4.18/drm-i915-set-dp-main-stream-attribute-for-color-range-on-ddi-platforms.patch new file mode 100644 index 00000000000..2bf1cd1136b --- /dev/null +++ b/queue-4.18/drm-i915-set-dp-main-stream-attribute-for-color-range-on-ddi-platforms.patch @@ -0,0 +1,88 @@ +From 6209c285e7a5e68dbcdf8fd2456c6dd68433806b Mon Sep 17 00:00:00 2001 +From: Jani Nikula +Date: Tue, 14 Aug 2018 09:00:01 +0300 +Subject: drm/i915: set DP Main Stream Attribute for color range on DDI platforms +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jani Nikula + +commit 6209c285e7a5e68dbcdf8fd2456c6dd68433806b upstream. + +Since Haswell we have no color range indication either in the pipe or +port registers for DP. Instead, there's a separate register for setting +the DP Main Stream Attributes (MSA) directly. The MSA register +definition makes no references to colorimetry, just a vague reference to +the DP spec. The connection to the color range was lost. + +Apparently we've failed to set the proper MSA bit for limited, or CEA, +range ever since the first DDI platforms. We've started setting other +MSA parameters since commit dae847991a43 ("drm/i915: add +intel_ddi_set_pipe_settings"). + +Without the crucial bit of information, the DP sink has no way of +knowing the source is actually transmitting limited range RGB, leading +to "washed out" colors. With the colorimetry information, compliant +sinks should be able to handle the limited range properly. Native +(i.e. non-LSPCON) HDMI was not affected because we do pass the color +range via AVI infoframes. + +Though not the root cause, the problem was made worse for DDI platforms +with commit 55bc60db5988 ("drm/i915: Add "Automatic" mode for the +"Broadcast RGB" property"), which selects limited range RGB +automatically based on the mode, as per the DP, HDMI and CEA specs. + +After all these years, the fix boils down to flipping one bit. + +[Per testing reports, this fixes DP sinks, but not the LSPCON. My + educated guess is that the LSPCON fails to turn the CEA range MSA into + AVI infoframes for HDMI.] + +Reported-by: Michał Kopeć +Reported-by: N. W. +Reported-by: Nicholas Stommel +Reported-by: Tom Yan +Tested-by: Nicholas Stommel +References: https://bugs.freedesktop.org/show_bug.cgi?id=100023 +Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107476 +Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=94921 +Cc: Paulo Zanoni +Cc: Rodrigo Vivi +Cc: Ville Syrjälä +Cc: # v3.9+ +Reviewed-by: Rodrigo Vivi +Signed-off-by: Jani Nikula +Link: https://patchwork.freedesktop.org/patch/msgid/20180814060001.18224-1-jani.nikula@intel.com +(cherry picked from commit dc5977da99ea28094b8fa4e9bacbd29bedc41de5) +Signed-off-by: Rodrigo Vivi +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_reg.h | 1 + + drivers/gpu/drm/i915/intel_ddi.c | 4 ++++ + 2 files changed, 5 insertions(+) + +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -8825,6 +8825,7 @@ enum skl_power_gate { + #define TRANS_MSA_10_BPC (2<<5) + #define TRANS_MSA_12_BPC (3<<5) + #define TRANS_MSA_16_BPC (4<<5) ++#define TRANS_MSA_CEA_RANGE (1<<3) + + /* LCPLL Control */ + #define LCPLL_CTL _MMIO(0x130040) +--- a/drivers/gpu/drm/i915/intel_ddi.c ++++ b/drivers/gpu/drm/i915/intel_ddi.c +@@ -1659,6 +1659,10 @@ void intel_ddi_set_pipe_settings(const s + WARN_ON(transcoder_is_dsi(cpu_transcoder)); + + temp = TRANS_MSA_SYNC_CLK; ++ ++ if (crtc_state->limited_color_range) ++ temp |= TRANS_MSA_CEA_RANGE; ++ + switch (crtc_state->pipe_bpp) { + case 18: + temp |= TRANS_MSA_6_BPC; diff --git a/queue-4.18/series b/queue-4.18/series index 8dc8620a52b..c28a35b46e9 100644 --- a/queue-4.18/series +++ b/queue-4.18/series @@ -181,3 +181,9 @@ drm-amdgpu-fix-incorrect-use-of-drm_file-pid.patch drm-i915-re-apply-perform-link-quality-check-unconditionally-during-long-pulse.patch uapi-linux-keyctl.h-don-t-use-c-reserved-keyword-as-a-struct-member-name.patch mm-respect-arch_dup_mmap-return-value.patch +drm-i915-set-dp-main-stream-attribute-for-color-range-on-ddi-platforms.patch +x86-tsc-prevent-result-truncation-on-32bit.patch +drm-amdgpu-keep-track-of-amount-of-pinned-cpu-visible-vram.patch +drm-amdgpu-make-pin_size-values-atomic.patch +drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch +drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch diff --git a/queue-4.18/x86-tsc-prevent-result-truncation-on-32bit.patch b/queue-4.18/x86-tsc-prevent-result-truncation-on-32bit.patch new file mode 100644 index 00000000000..cfbb468df12 --- /dev/null +++ b/queue-4.18/x86-tsc-prevent-result-truncation-on-32bit.patch @@ -0,0 +1,64 @@ +From 17f6bac2249356c795339e03a0742cd79be3cab8 Mon Sep 17 00:00:00 2001 +From: Chuanhua Lei +Date: Thu, 6 Sep 2018 18:03:23 +0800 +Subject: x86/tsc: Prevent result truncation on 32bit + +From: Chuanhua Lei + +commit 17f6bac2249356c795339e03a0742cd79be3cab8 upstream. + +Loops per jiffy is calculated by multiplying tsc_khz with 1e3 and then +dividing it by HZ. + +Both tsc_khz and the temporary variable holding the multiplication result +are of type unsigned long, so on 32bit the result is truncated to the lower +32bit. + +Use u64 as type for the temporary variable and cast tsc_khz to it before +multiplying. + +[ tglx: Massaged changelog and removed pointless braces ] + +[ tglx: Backport to stable. Due to massive code changes is the upstream + commit not applicable anymore. The issue has gone unnoticed in + kernels pre 4.19 because the bogus LPJ value gets fixed up in a + later stage of early boot, but it still might cause subtle and hard + to debug issues between these two points. ] + +Fixes: cf7a63ef4e02 ("x86/tsc: Calibrate tsc only once") +Signed-off-by: Chuanhua Lei +Signed-off-by: Thomas Gleixner +Cc: yixin.zhu@linux.intel.com +Cc: "H. Peter Anvin" +Cc: Peter Zijlstra +Cc: Len Brown +Cc: Pavel Tatashin +Cc: Rajvi Jingar +Cc: Dou Liyang +Link: https://lkml.kernel.org/r/1536228203-18701-1-git-send-email-chuanhua.lei@linux.intel.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/tsc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1343,7 +1343,7 @@ device_initcall(init_tsc_clocksource); + + void __init tsc_early_delay_calibrate(void) + { +- unsigned long lpj; ++ u64 lpj; + + if (!boot_cpu_has(X86_FEATURE_TSC)) + return; +@@ -1355,7 +1355,7 @@ void __init tsc_early_delay_calibrate(vo + if (!tsc_khz) + return; + +- lpj = tsc_khz * 1000; ++ lpj = (u64)tsc_khz * 1000; + do_div(lpj, HZ); + loops_per_jiffy = lpj; + }