--- /dev/null
+From 38e4ced804796c5725e2a52ec3601951552c4a97 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 6 Apr 2023 12:08:21 +0800
+Subject: drm/amd/pm: conditionally disable pcie lane switching for some sienna_cichlid SKUs
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit 38e4ced804796c5725e2a52ec3601951552c4a97 upstream.
+
+Disable the pcie lane switching for some sienna_cichlid SKUs since it
+might not work well on some platforms.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 92 ++++++++++++----
+ 1 file changed, 74 insertions(+), 18 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -1928,33 +1928,94 @@ static int sienna_cichlid_get_power_limi
+ return 0;
+ }
+
++static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
++ uint32_t *gen_speed_override,
++ uint32_t *lane_width_override)
++{
++ struct amdgpu_device *adev = smu->adev;
++
++ *gen_speed_override = 0xff;
++ *lane_width_override = 0xff;
++
++ switch (adev->pdev->device) {
++ case 0x73A0:
++ case 0x73A1:
++ case 0x73A2:
++ case 0x73A3:
++ case 0x73AB:
++ case 0x73AE:
++ /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
++ *lane_width_override = 6;
++ break;
++ case 0x73E0:
++ case 0x73E1:
++ case 0x73E3:
++ *lane_width_override = 4;
++ break;
++ case 0x7420:
++ case 0x7421:
++ case 0x7422:
++ case 0x7423:
++ case 0x7424:
++ *lane_width_override = 3;
++ break;
++ default:
++ break;
++ }
++}
++
++#define MAX(a, b) ((a) > (b) ? (a) : (b))
++
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+-
+- uint32_t smu_pcie_arg;
++ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
++ uint32_t gen_speed_override, lane_width_override;
+ uint8_t *table_member1, *table_member2;
++ uint32_t min_gen_speed, max_gen_speed;
++ uint32_t min_lane_width, max_lane_width;
++ uint32_t smu_pcie_arg;
+ int ret, i;
+
+ GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
+ GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+
+- /* lclk dpm table setup */
+- for (i = 0; i < MAX_PCIE_CONF; i++) {
+- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
+- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
++ sienna_cichlid_get_override_pcie_settings(smu,
++ &gen_speed_override,
++ &lane_width_override);
++
++ /* PCIE gen speed override */
++ if (gen_speed_override != 0xff) {
++ min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
++ max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
++ } else {
++ min_gen_speed = MAX(0, table_member1[0]);
++ max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
++ min_gen_speed = min_gen_speed > max_gen_speed ?
++ max_gen_speed : min_gen_speed;
+ }
++ pcie_table->pcie_gen[0] = min_gen_speed;
++ pcie_table->pcie_gen[1] = max_gen_speed;
++
++ /* PCIE lane width override */
++ if (lane_width_override != 0xff) {
++ min_lane_width = MIN(pcie_width_cap, lane_width_override);
++ max_lane_width = MIN(pcie_width_cap, lane_width_override);
++ } else {
++ min_lane_width = MAX(1, table_member2[0]);
++ max_lane_width = MIN(pcie_width_cap, table_member2[1]);
++ min_lane_width = min_lane_width > max_lane_width ?
++ max_lane_width : min_lane_width;
++ }
++ pcie_table->pcie_lane[0] = min_lane_width;
++ pcie_table->pcie_lane[1] = max_lane_width;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+- smu_pcie_arg = (i << 16) |
+- ((table_member1[i] <= pcie_gen_cap) ?
+- (table_member1[i] << 8) :
+- (pcie_gen_cap << 8)) |
+- ((table_member2[i] <= pcie_width_cap) ?
+- table_member2[i] :
+- pcie_width_cap);
++ smu_pcie_arg = (i << 16 |
++ pcie_table->pcie_gen[i] << 8 |
++ pcie_table->pcie_lane[i]);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+@@ -1962,11 +2023,6 @@ static int sienna_cichlid_update_pcie_pa
+ NULL);
+ if (ret)
+ return ret;
+-
+- if (table_member1[i] > pcie_gen_cap)
+- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+- if (table_member2[i] > pcie_width_cap)
+- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
--- /dev/null
+From b447b079cf3a9971ea4d31301e673f49612ccc18 Mon Sep 17 00:00:00 2001
+From: Chia-I Wu <olvaffe@gmail.com>
+Date: Thu, 1 Jun 2023 14:48:08 -0700
+Subject: drm/amdgpu: fix xclk freq on CHIP_STONEY
+
+From: Chia-I Wu <olvaffe@gmail.com>
+
+commit b447b079cf3a9971ea4d31301e673f49612ccc18 upstream.
+
+According to Alex, most APUs from that time seem to have the same issue
+(vbios says 48Mhz, actual is 100Mhz). I only have a CHIP_STONEY so I
+limit the fixup to CHIP_STONEY
+
+Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_dev
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 tmp;
+
+- if (adev->flags & AMD_IS_APU)
+- return reference_clock;
++ if (adev->flags & AMD_IS_APU) {
++ switch (adev->asic_type) {
++ case CHIP_STONEY:
++ /* vbios says 48Mhz, but the actual freq is 100Mhz */
++ return 10000;
++ default:
++ return reference_clock;
++ }
++ }
+
+ tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
--- /dev/null
+From 40023959dbab3c6ad56fa7213770e63d197b69fb Mon Sep 17 00:00:00 2001
+From: Andi Shyti <andi.shyti@linux.intel.com>
+Date: Fri, 26 May 2023 14:41:38 +0200
+Subject: drm/i915/gt: Use the correct error value when kernel_context() fails
+
+From: Andi Shyti <andi.shyti@linux.intel.com>
+
+commit 40023959dbab3c6ad56fa7213770e63d197b69fb upstream.
+
+kernel_context() returns an error pointer. Use pointer-error
+conversion functions to evaluate its return value, rather than
+checking for a '0' return.
+
+Fixes: eb5c10cbbc2f ("drm/i915: Remove I915_USER_PRIORITY_SHIFT")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.13+
+Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>
+Acked-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230526124138.2006110-1-andi.shyti@linux.intel.com
+(cherry picked from commit edad9ee94f17adc75d3b13ab51bbe3d615ce1e7e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/selftest_execlists.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
++++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
+@@ -1531,8 +1531,8 @@ static int live_busywait_preempt(void *a
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+- int err = -ENOMEM;
+ u32 *map;
++ int err;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+@@ -1540,13 +1540,17 @@ static int live_busywait_preempt(void *a
+ */
+
+ ctx_hi = kernel_context(gt->i915, NULL);
+- if (!ctx_hi)
+- return -ENOMEM;
++ if (IS_ERR(ctx_hi))
++ return PTR_ERR(ctx_hi);
++
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(gt->i915, NULL);
+- if (!ctx_lo)
++ if (IS_ERR(ctx_lo)) {
++ err = PTR_ERR(ctx_lo);
+ goto err_ctx_hi;
++ }
++
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);