--- /dev/null
+From e701156ccc6c7a5f104a968dda74cd6434178712 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri, 7 Jul 2023 21:26:09 -0500
+Subject: drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation with SMU13
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit e701156ccc6c7a5f104a968dda74cd6434178712 upstream.
+
+SMU13 overrides dynamic PCIe lane width and dynamic speed by when on
+certain hosts. commit 38e4ced80479 ("drm/amd/pm: conditionally disable
+pcie lane switching for some sienna_cichlid SKUs") worked around this
+issue by setting up certain SKUs to set up certain limits, but the same
+fundamental problem with those hosts affects all SMU11 implmentations
+as well, so align the SMU11 and SMU13 driver handling.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 93 +++-------------
+ 1 file changed, 20 insertions(+), 73 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2081,89 +2081,36 @@ static int sienna_cichlid_display_disabl
+ return ret;
+ }
+
+-static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
+- uint32_t *gen_speed_override,
+- uint32_t *lane_width_override)
+-{
+- struct amdgpu_device *adev = smu->adev;
+-
+- *gen_speed_override = 0xff;
+- *lane_width_override = 0xff;
+-
+- switch (adev->pdev->device) {
+- case 0x73A0:
+- case 0x73A1:
+- case 0x73A2:
+- case 0x73A3:
+- case 0x73AB:
+- case 0x73AE:
+- /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
+- *lane_width_override = 6;
+- break;
+- case 0x73E0:
+- case 0x73E1:
+- case 0x73E3:
+- *lane_width_override = 4;
+- break;
+- case 0x7420:
+- case 0x7421:
+- case 0x7422:
+- case 0x7423:
+- case 0x7424:
+- *lane_width_override = 3;
+- break;
+- default:
+- break;
+- }
+-}
+-
+-#define MAX(a, b) ((a) > (b) ? (a) : (b))
+-
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+- uint32_t gen_speed_override, lane_width_override;
+- uint8_t *table_member1, *table_member2;
+- uint32_t min_gen_speed, max_gen_speed;
+- uint32_t min_lane_width, max_lane_width;
+- uint32_t smu_pcie_arg;
++ u32 smu_pcie_arg;
+ int ret, i;
+
+- GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
+- GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+-
+- sienna_cichlid_get_override_pcie_settings(smu,
+- &gen_speed_override,
+- &lane_width_override);
+-
+- /* PCIE gen speed override */
+- if (gen_speed_override != 0xff) {
+- min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+- max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+- } else {
+- min_gen_speed = MAX(0, table_member1[0]);
+- max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+- min_gen_speed = min_gen_speed > max_gen_speed ?
+- max_gen_speed : min_gen_speed;
+- }
+- pcie_table->pcie_gen[0] = min_gen_speed;
+- pcie_table->pcie_gen[1] = max_gen_speed;
+-
+- /* PCIE lane width override */
+- if (lane_width_override != 0xff) {
+- min_lane_width = MIN(pcie_width_cap, lane_width_override);
+- max_lane_width = MIN(pcie_width_cap, lane_width_override);
++ /* PCIE gen speed and lane width override */
++ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
++ pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
++
++ if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
++ pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
++
++ /* Force all levels to use the same settings */
++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
++ pcie_table->pcie_gen[i] = pcie_gen_cap;
++ pcie_table->pcie_lane[i] = pcie_width_cap;
++ }
+ } else {
+- min_lane_width = MAX(1, table_member2[0]);
+- max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+- min_lane_width = min_lane_width > max_lane_width ?
+- max_lane_width : min_lane_width;
++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
++ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
++ pcie_table->pcie_gen[i] = pcie_gen_cap;
++ if (pcie_table->pcie_lane[i] > pcie_width_cap)
++ pcie_table->pcie_lane[i] = pcie_width_cap;
++ }
+ }
+- pcie_table->pcie_lane[0] = min_lane_width;
+- pcie_table->pcie_lane[1] = max_lane_width;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16 |
--- /dev/null
+From 2b02d746c1818baf741f4eeeff9b97ab4b81e1cf Mon Sep 17 00:00:00 2001
+From: Agustin Gutierrez <agustin.gutierrez@amd.com>
+Date: Tue, 21 Feb 2023 16:08:15 -0500
+Subject: drm/amd/display: Keep PHY active for dp config
+
+From: Agustin Gutierrez <agustin.gutierrez@amd.com>
+
+commit 2b02d746c1818baf741f4eeeff9b97ab4b81e1cf upstream.
+
+[Why]
+Current hotplug sequence causes temporary hang at the re-entry of the
+optimized power state.
+
+[How]
+Keep a PHY active when detecting DP signal + DPMS active
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Agustin Gutierrez <agustin.gutierrez@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
++
++ /* Checking stream / link detection ensuring that PHY is active*/
++ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
++ display_count++;
++
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
--- /dev/null
+From 188623076d0f1a500583d392b6187056bf7cc71a Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri, 7 Jul 2023 21:26:08 -0500
+Subject: drm/amd: Move helper for dynamic speed switch check out of smu13
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 188623076d0f1a500583d392b6187056bf7cc71a upstream.
+
+This helper is used for checking if the connected host supports
+the feature, it can be moved into generic code to be used by other
+smu implementations as well.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 +++++++++++++++++++
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 21 +--------------------
+ 3 files changed, 21 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1262,6 +1262,7 @@ int amdgpu_device_gpu_recover(struct amd
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
++bool amdgpu_device_pcie_dynamic_switching_supported(void);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+ bool amdgpu_device_aspm_support_quirk(void);
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1333,6 +1333,25 @@ bool amdgpu_device_need_post(struct amdg
+ return true;
+ }
+
++/*
++ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
++ * speed switching. Until we have confirmation from Intel that a specific host
++ * supports it, it's safer that we keep it disabled for all.
++ *
++ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
++ */
++bool amdgpu_device_pcie_dynamic_switching_supported(void)
++{
++#if IS_ENABLED(CONFIG_X86)
++ struct cpuinfo_x86 *c = &cpu_data(0);
++
++ if (c->x86_vendor == X86_VENDOR_INTEL)
++ return false;
++#endif
++ return true;
++}
++
+ /**
+ * amdgpu_device_should_use_aspm - check if the device should program ASPM
+ *
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2490,25 +2490,6 @@ int smu_v13_0_mode1_reset(struct smu_con
+ return ret;
+ }
+
+-/*
+- * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+- * speed switching. Until we have confirmation from Intel that a specific host
+- * supports it, it's safer that we keep it disabled for all.
+- *
+- * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
+- */
+-static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
+-{
+-#if IS_ENABLED(CONFIG_X86)
+- struct cpuinfo_x86 *c = &cpu_data(0);
+-
+- if (c->x86_vendor == X86_VENDOR_INTEL)
+- return false;
+-#endif
+- return true;
+-}
+-
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+@@ -2520,7 +2501,7 @@ int smu_v13_0_update_pcie_parameters(str
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+- if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
++ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
--- /dev/null
+From 9cf42bca30e98a1c6c9e8abf876940a551eaa3d1 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Tue, 2 Aug 2022 11:00:16 +0200
+Subject: efi: libstub: use EFI_LOADER_CODE region when moving the kernel in memory
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 9cf42bca30e98a1c6c9e8abf876940a551eaa3d1 upstream.
+
+The EFI spec is not very clear about which permissions are being given
+when allocating pages of a certain type. However, it is quite obvious
+that EFI_LOADER_CODE is more likely to permit execution than
+EFI_LOADER_DATA, which becomes relevant once we permit booting the
+kernel proper with the firmware's 1:1 mapping still active.
+
+Ostensibly, recent systems such as the Surface Pro X grant executable
+permissions to EFI_LOADER_CODE regions but not EFI_LOADER_DATA regions.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/alignedmem.c | 5 +++--
+ drivers/firmware/efi/libstub/arm64-stub.c | 6 ++++--
+ drivers/firmware/efi/libstub/efistub.h | 6 ++++--
+ drivers/firmware/efi/libstub/mem.c | 3 ++-
+ drivers/firmware/efi/libstub/randomalloc.c | 5 +++--
+ 5 files changed, 16 insertions(+), 9 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/alignedmem.c
++++ b/drivers/firmware/efi/libstub/alignedmem.c
+@@ -22,7 +22,8 @@
+ * Return: status code
+ */
+ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+- unsigned long max, unsigned long align)
++ unsigned long max, unsigned long align,
++ int memory_type)
+ {
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+@@ -36,7 +37,7 @@ efi_status_t efi_allocate_pages_aligned(
+ slack = align / EFI_PAGE_SIZE - 1;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+- EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
++ memory_type, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -180,7 +180,8 @@ efi_status_t handle_kernel_image(unsigne
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+- reserve_addr, phys_seed);
++ reserve_addr, phys_seed,
++ EFI_LOADER_CODE);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+@@ -201,7 +202,8 @@ efi_status_t handle_kernel_image(unsigne
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+- ULONG_MAX, min_kimg_align);
++ ULONG_MAX, min_kimg_align,
++ EFI_LOADER_CODE);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -880,7 +880,8 @@ void efi_get_virtmap(efi_memory_desc_t *
+ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+- unsigned long *addr, unsigned long random_seed);
++ unsigned long *addr, unsigned long random_seed,
++ int memory_type);
+
+ efi_status_t efi_random_get_seed(void);
+
+@@ -907,7 +908,8 @@ efi_status_t efi_allocate_pages(unsigned
+ unsigned long max);
+
+ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+- unsigned long max, unsigned long align);
++ unsigned long max, unsigned long align,
++ int memory_type);
+
+ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+--- a/drivers/firmware/efi/libstub/mem.c
++++ b/drivers/firmware/efi/libstub/mem.c
+@@ -91,7 +91,8 @@ efi_status_t efi_allocate_pages(unsigned
+
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+- EFI_ALLOC_ALIGN);
++ EFI_ALLOC_ALIGN,
++ EFI_LOADER_DATA);
+
+ alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -53,7 +53,8 @@ static unsigned long get_entry_num_slots
+ efi_status_t efi_random_alloc(unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+- unsigned long random_seed)
++ unsigned long random_seed,
++ int memory_type)
+ {
+ unsigned long total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
+@@ -118,7 +119,7 @@ efi_status_t efi_random_alloc(unsigned l
+ pages = size / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+- EFI_LOADER_DATA, pages, &target);
++ memory_type, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
--- /dev/null
+From b389139f12f287b8ed2e2628b72df89a081f0b59 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 26 Jun 2023 00:42:19 +0200
+Subject: netfilter: nf_tables: fix underflow in chain reference counter
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit b389139f12f287b8ed2e2628b72df89a081f0b59 upstream.
+
+Set element addition error path decrements reference counter on chains
+twice: once on element release and again via nft_data_release().
+
+Then, d6b478666ffa ("netfilter: nf_tables: fix underflow in object
+reference counter") incorrectly fixed this by removing the stateful
+object reference count decrement.
+
+Restore the stateful object decrement as in b91d90368837 ("netfilter:
+nf_tables: fix leaking object reference count") and let
+nft_data_release() decrement the chain reference counter, so this is
+done only once.
+
+Fixes: d6b478666ffa ("netfilter: nf_tables: fix underflow in object reference counter")
+Fixes: 628bd3e49cba ("netfilter: nf_tables: drop map element references from preparation phase")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6571,7 +6571,9 @@ err_set_full:
+ err_element_clash:
+ kfree(trans);
+ err_elem_free:
+- nft_set_elem_destroy(set, elem.priv, true);
++ nf_tables_set_elem_destroy(ctx, set, elem.priv);
++ if (obj)
++ obj->use--;
+ err_parse_data:
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ nft_data_release(&elem.data.val, desc.type);
--- /dev/null
+From d6b478666ffa6d2c25386d78bf1c4640d4da305e Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 16 Jun 2023 15:20:08 +0200
+Subject: netfilter: nf_tables: fix underflow in object reference counter
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit d6b478666ffa6d2c25386d78bf1c4640d4da305e upstream.
+
+Since ("netfilter: nf_tables: drop map element references from
+preparation phase"), integration with commit protocol is better,
+therefore drop the workaround that b91d90368837 ("netfilter: nf_tables:
+fix leaking object reference count") provides.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6504,19 +6504,19 @@ static int nft_add_set_elem(struct nft_c
+ if (flags)
+ *nft_set_ext_flags(ext) = flags;
+
++ if (obj) {
++ *nft_set_ext_obj(ext) = obj;
++ obj->use++;
++ }
+ if (ulen > 0) {
+ if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
+ err = -EINVAL;
+- goto err_elem_userdata;
++ goto err_elem_free;
+ }
+ udata = nft_set_ext_userdata(ext);
+ udata->len = ulen - 1;
+ nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
+ }
+- if (obj) {
+- *nft_set_ext_obj(ext) = obj;
+- obj->use++;
+- }
+ err = nft_set_elem_expr_setup(ctx, &tmpl, ext, expr_array, num_exprs);
+ if (err < 0)
+ goto err_elem_free;
+@@ -6571,9 +6571,6 @@ err_set_full:
+ err_element_clash:
+ kfree(trans);
+ err_elem_free:
+- if (obj)
+- obj->use--;
+-err_elem_userdata:
+ nft_set_elem_destroy(set, elem.priv, true);
+ err_parse_data:
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
--- /dev/null
+From 1a73f5b8f079fd42a544c1600beface50c63af7c Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Tue, 16 May 2023 22:16:18 +0800
+Subject: ovl: fix null pointer dereference in ovl_permission()
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit 1a73f5b8f079fd42a544c1600beface50c63af7c upstream.
+
+Following process:
+ P1 P2
+ path_lookupat
+ link_path_walk
+ inode_permission
+ ovl_permission
+ ovl_i_path_real(inode, &realpath)
+ path->dentry = ovl_i_dentry_upper(inode)
+ drop_cache
+ __dentry_kill(ovl_dentry)
+ iput(ovl_inode)
+ ovl_destroy_inode(ovl_inode)
+ dput(oi->__upperdentry)
+ dentry_kill(upperdentry)
+ dentry_unlink_inode
+ upperdentry->d_inode = NULL
+ realinode = d_inode(realpath.dentry) // return NULL
+ inode_permission(realinode)
+ inode->i_sb // NULL pointer dereference
+, will trigger an null pointer dereference at realinode:
+ [ 335.664979] BUG: kernel NULL pointer dereference,
+ address: 0000000000000002
+ [ 335.668032] CPU: 0 PID: 2592 Comm: ls Not tainted 6.3.0
+ [ 335.669956] RIP: 0010:inode_permission+0x33/0x2c0
+ [ 335.678939] Call Trace:
+ [ 335.679165] <TASK>
+ [ 335.679371] ovl_permission+0xde/0x320
+ [ 335.679723] inode_permission+0x15e/0x2c0
+ [ 335.680090] link_path_walk+0x115/0x550
+ [ 335.680771] path_lookupat.isra.0+0xb2/0x200
+ [ 335.681170] filename_lookup+0xda/0x240
+ [ 335.681922] vfs_statx+0xa6/0x1f0
+ [ 335.682233] vfs_fstatat+0x7b/0xb0
+
+Fetch a reproducer in [Link].
+
+Use the helper ovl_i_path_realinode() to get realinode and then do
+non-nullptr checking.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217405
+Fixes: 4b7791b2e958 ("ovl: handle idmappings in ovl_permission()")
+Cc: <stable@vger.kernel.org> # v5.19
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Suggested-by: Christian Brauner <brauner@kernel.org>
+Suggested-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/inode.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -286,8 +286,8 @@ int ovl_permission(struct user_namespace
+ int err;
+
+ /* Careful in RCU walk mode */
+- ovl_i_path_real(inode, &realpath);
+- if (!realpath.dentry) {
++ realinode = ovl_i_path_real(inode, &realpath);
++ if (!realinode) {
+ WARN_ON(!(mask & MAY_NOT_BLOCK));
+ return -ECHILD;
+ }
+@@ -300,7 +300,6 @@ int ovl_permission(struct user_namespace
+ if (err)
+ return err;
+
+- realinode = d_inode(realpath.dentry);
+ old_cred = ovl_override_creds(inode->i_sb);
+ if (!upperinode &&
+ !special_file(realinode->i_mode) && mask & MAY_WRITE) {
--- /dev/null
+From 33c9ab5b493a0e922b06c12fed4fdcb862212cda Mon Sep 17 00:00:00 2001
+From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Fri, 14 Jul 2023 20:14:35 +0530
+Subject: platform/x86/amd/pmf: Notify OS power slider update
+
+From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 33c9ab5b493a0e922b06c12fed4fdcb862212cda upstream.
+
+APMF fn8 can notify EC about the OS slider position change. Add this
+capability to the PMF driver so that it can call the APMF fn8 based on
+the changes in the Platform profile events.
+
+Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Patil Rajesh Reddy <Patil.Reddy@amd.com>
+Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Link: https://lore.kernel.org/r/20230714144435.1239776-2-Shyam-sundar.S-k@amd.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/amd/pmf/acpi.c | 21 ++++++++++
+ drivers/platform/x86/amd/pmf/core.c | 9 +++-
+ drivers/platform/x86/amd/pmf/pmf.h | 16 +++++++
+ drivers/platform/x86/amd/pmf/sps.c | 74 ++++++++++++++++++++++++++++++++++--
+ 4 files changed, 114 insertions(+), 6 deletions(-)
+
+--- a/drivers/platform/x86/amd/pmf/acpi.c
++++ b/drivers/platform/x86/amd/pmf/acpi.c
+@@ -106,6 +106,27 @@ int apmf_get_static_slider_granular(stru
+ data, sizeof(*data));
+ }
+
++int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
++{
++ struct os_power_slider args;
++ struct acpi_buffer params;
++ union acpi_object *info;
++ int err = 0;
++
++ args.size = sizeof(args);
++ args.slider_event = event;
++
++ params.length = sizeof(args);
++ params.pointer = (void *)&args;
++
++ info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, ¶ms);
++ if (!info)
++ err = -EIO;
++
++ kfree(info);
++ return err;
++}
++
+ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+ {
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -71,7 +71,11 @@ static int amd_pmf_pwr_src_notify_call(s
+ return NOTIFY_DONE;
+ }
+
+- amd_pmf_set_sps_power_limits(pmf);
++ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
++ amd_pmf_set_sps_power_limits(pmf);
++
++ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
++ amd_pmf_power_slider_update_event(pmf);
+
+ return NOTIFY_OK;
+ }
+@@ -295,7 +299,8 @@ static void amd_pmf_init_features(struct
+ int ret;
+
+ /* Enable Static Slider */
+- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
++ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ amd_pmf_init_sps(dev);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+--- a/drivers/platform/x86/amd/pmf/pmf.h
++++ b/drivers/platform/x86/amd/pmf/pmf.h
+@@ -21,6 +21,7 @@
+ #define APMF_FUNC_SBIOS_HEARTBEAT 4
+ #define APMF_FUNC_AUTO_MODE 5
+ #define APMF_FUNC_SET_FAN_IDX 7
++#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8
+ #define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
+ #define APMF_FUNC_DYN_SLIDER_AC 11
+ #define APMF_FUNC_DYN_SLIDER_DC 12
+@@ -44,6 +45,14 @@
+ #define GET_STT_LIMIT_APU 0x20
+ #define GET_STT_LIMIT_HS2 0x21
+
++/* OS slider update notification */
++#define DC_BEST_PERF 0
++#define DC_BETTER_PERF 1
++#define DC_BATTERY_SAVER 3
++#define AC_BEST_PERF 4
++#define AC_BETTER_PERF 5
++#define AC_BETTER_BATTERY 6
++
+ /* Fan Index for Auto Mode */
+ #define FAN_INDEX_AUTO 0xFFFFFFFF
+
+@@ -193,6 +202,11 @@ struct amd_pmf_static_slider_granular {
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+ };
+
++struct os_power_slider {
++ u16 size;
++ u8 slider_event;
++} __packed;
++
+ struct fan_table_control {
+ bool manual;
+ unsigned long fan_id;
+@@ -383,6 +397,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev
+ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+ int amd_pmf_get_power_source(void);
+ int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
++int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+
+ /* SPS Layer */
+ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+@@ -393,6 +408,7 @@ void amd_pmf_deinit_sps(struct amd_pmf_d
+ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *output);
+ bool is_pprof_balanced(struct amd_pmf_dev *pmf);
++int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+
+
+ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+--- a/drivers/platform/x86/amd/pmf/sps.c
++++ b/drivers/platform/x86/amd/pmf/sps.c
+@@ -119,14 +119,77 @@ int amd_pmf_get_pprof_modes(struct amd_p
+ return mode;
+ }
+
++int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
++{
++ u8 mode, flag = 0;
++ int src;
++
++ mode = amd_pmf_get_pprof_modes(dev);
++ if (mode < 0)
++ return mode;
++
++ src = amd_pmf_get_power_source();
++
++ if (src == POWER_SOURCE_AC) {
++ switch (mode) {
++ case POWER_MODE_PERFORMANCE:
++ flag |= BIT(AC_BEST_PERF);
++ break;
++ case POWER_MODE_BALANCED_POWER:
++ flag |= BIT(AC_BETTER_PERF);
++ break;
++ case POWER_MODE_POWER_SAVER:
++ flag |= BIT(AC_BETTER_BATTERY);
++ break;
++ default:
++ dev_err(dev->dev, "unsupported platform profile\n");
++ return -EOPNOTSUPP;
++ }
++
++ } else if (src == POWER_SOURCE_DC) {
++ switch (mode) {
++ case POWER_MODE_PERFORMANCE:
++ flag |= BIT(DC_BEST_PERF);
++ break;
++ case POWER_MODE_BALANCED_POWER:
++ flag |= BIT(DC_BETTER_PERF);
++ break;
++ case POWER_MODE_POWER_SAVER:
++ flag |= BIT(DC_BATTERY_SAVER);
++ break;
++ default:
++ dev_err(dev->dev, "unsupported platform profile\n");
++ return -EOPNOTSUPP;
++ }
++ }
++
++ apmf_os_power_slider_update(dev, flag);
++
++ return 0;
++}
++
+ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+ {
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
++ int ret = 0;
+
+ pmf->current_profile = profile;
+
+- return amd_pmf_set_sps_power_limits(pmf);
++ /* Notify EC about the slider position change */
++ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
++ ret = amd_pmf_power_slider_update_event(pmf);
++ if (ret)
++ return ret;
++ }
++
++ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++ ret = amd_pmf_set_sps_power_limits(pmf);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
+ }
+
+ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+@@ -134,10 +197,13 @@ int amd_pmf_init_sps(struct amd_pmf_dev
+ int err;
+
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+- amd_pmf_load_defaults_sps(dev);
+
+- /* update SPS balanced power mode thermals */
+- amd_pmf_set_sps_power_limits(dev);
++ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++ amd_pmf_load_defaults_sps(dev);
++
++ /* update SPS balanced power mode thermals */
++ amd_pmf_set_sps_power_limits(dev);
++ }
+
+ dev->pprof.profile_get = amd_pmf_profile_get;
+ dev->pprof.profile_set = amd_pmf_profile_set;
--- /dev/null
+From 839e90e75e695b3d9ee17f5a2811e7ee5aea8d4a Mon Sep 17 00:00:00 2001
+From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Fri, 14 Jul 2023 20:14:34 +0530
+Subject: platform/x86/amd/pmf: reduce verbosity of apmf_get_system_params
+
+From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 839e90e75e695b3d9ee17f5a2811e7ee5aea8d4a upstream.
+
+apmf_get_system_params() failure is not a critical event, reduce its
+verbosity from dev_err to dev_dbg.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Link: https://lore.kernel.org/r/20230714144435.1239776-1-Shyam-sundar.S-k@amd.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/amd/pmf/acpi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/amd/pmf/acpi.c
++++ b/drivers/platform/x86/amd/pmf/acpi.c
+@@ -310,7 +310,7 @@ int apmf_acpi_init(struct amd_pmf_dev *p
+
+ ret = apmf_get_system_params(pmf_dev);
+ if (ret) {
+- dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
++ dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ goto out;
+ }
+
--- /dev/null
+netfilter-nf_tables-fix-underflow-in-object-reference-counter.patch
+netfilter-nf_tables-fix-underflow-in-chain-reference-counter.patch
+platform-x86-amd-pmf-notify-os-power-slider-update.patch
+platform-x86-amd-pmf-reduce-verbosity-of-apmf_get_system_params.patch
+drm-amd-display-keep-phy-active-for-dp-config.patch
+ovl-fix-null-pointer-dereference-in-ovl_permission.patch
+drm-amd-move-helper-for-dynamic-speed-switch-check-out-of-smu13.patch
+drm-amd-align-smu11-smu_msg_overridepcieparameters-implementation-with-smu13.patch
+efi-libstub-use-efi_loader_code-region-when-moving-the-kernel-in-memory.patch