]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Aug 2023 07:02:34 +0000 (09:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Aug 2023 07:02:34 +0000 (09:02 +0200)
added patches:
drm-display-dp-fix-the-dp-dsc-receiver-cap-size.patch
drm-vmwgfx-fix-shader-stage-validation.patch
x86-fpu-invalidate-fpu-state-correctly-on-exec.patch
x86-fpu-set-x86_feature_osxsave-feature-after-enabling-osxsave-in-cr4.patch

queue-5.15/drm-display-dp-fix-the-dp-dsc-receiver-cap-size.patch [new file with mode: 0644]
queue-5.15/drm-vmwgfx-fix-shader-stage-validation.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/x86-fpu-invalidate-fpu-state-correctly-on-exec.patch [new file with mode: 0644]
queue-5.15/x86-fpu-set-x86_feature_osxsave-feature-after-enabling-osxsave-in-cr4.patch [new file with mode: 0644]

diff --git a/queue-5.15/drm-display-dp-fix-the-dp-dsc-receiver-cap-size.patch b/queue-5.15/drm-display-dp-fix-the-dp-dsc-receiver-cap-size.patch
new file mode 100644 (file)
index 0000000..d2fcb9a
--- /dev/null
@@ -0,0 +1,37 @@
+From 5ad1ab30ac0809d2963ddcf39ac34317a24a2f17 Mon Sep 17 00:00:00 2001
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Date: Fri, 18 Aug 2023 10:14:36 +0530
+Subject: drm/display/dp: Fix the DP DSC Receiver cap size
+
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+
+commit 5ad1ab30ac0809d2963ddcf39ac34317a24a2f17 upstream.
+
+DP DSC Receiver Capabilities are exposed via DPCD 60h-6Fh.
+Fix the DSC RECEIVER CAP SIZE accordingly.
+
+Fixes: ffddc4363c28 ("drm/dp: Add DP DSC DPCD receiver capability size define and missing SHIFT")
+Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
+Cc: Manasi Navare <manasi.d.navare@intel.com>
+Cc: <stable@vger.kernel.org> # v5.0+
+
+Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230818044436.177806-1-ankit.k.nautiyal@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/drm/drm_dp_helper.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -1495,7 +1495,7 @@ u8 drm_dp_get_adjust_request_post_cursor
+ #define DP_BRANCH_OUI_HEADER_SIZE     0xc
+ #define DP_RECEIVER_CAP_SIZE          0xf
+-#define DP_DSC_RECEIVER_CAP_SIZE        0xf
++#define DP_DSC_RECEIVER_CAP_SIZE        0x10 /* DSC Capabilities 0x60 through 0x6F */
+ #define EDP_PSR_RECEIVER_CAP_SIZE     2
+ #define EDP_DISPLAY_CTL_CAP_SIZE      3
+ #define DP_LTTPR_COMMON_CAP_SIZE      8
diff --git a/queue-5.15/drm-vmwgfx-fix-shader-stage-validation.patch b/queue-5.15/drm-vmwgfx-fix-shader-stage-validation.patch
new file mode 100644 (file)
index 0000000..94216db
--- /dev/null
@@ -0,0 +1,175 @@
+From 14abdfae508228a7307f7491b5c4215ae70c6542 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zackr@vmware.com>
+Date: Fri, 16 Jun 2023 15:09:34 -0400
+Subject: drm/vmwgfx: Fix shader stage validation
+
+From: Zack Rusin <zackr@vmware.com>
+
+commit 14abdfae508228a7307f7491b5c4215ae70c6542 upstream.
+
+For multiple commands the driver was not correctly validating the shader
+stages resulting in possible kernel oopses. The validation code was only.
+if ever, checking the upper bound on the shader stages but never a lower
+bound (valid shader stages start at 1 not 0).
+
+Fixes kernel oopses ending up in vmw_binding_add, e.g.:
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 1 PID: 2443 Comm: testcase Not tainted 6.3.0-rc4-vmwgfx #1
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:vmw_binding_add+0x4c/0x140 [vmwgfx]
+Code: 7e 30 49 83 ff 0e 0f 87 ea 00 00 00 4b 8d 04 7f 89 d2 89 cb 48 c1 e0 03 4c 8b b0 40 3d 93 c0 48 8b 80 48 3d 93 c0 49 0f af de <48> 03 1c d0 4c 01 e3 49 8>
+RSP: 0018:ffffb8014416b968 EFLAGS: 00010206
+RAX: ffffffffc0933ec0 RBX: 0000000000000000 RCX: 0000000000000000
+RDX: 00000000ffffffff RSI: ffffb8014416b9c0 RDI: ffffb8014316f000
+RBP: ffffb8014416b998 R08: 0000000000000003 R09: 746f6c735f726564
+R10: ffffffffaaf2bda0 R11: 732e676e69646e69 R12: ffffb8014316f000
+R13: ffffb8014416b9c0 R14: 0000000000000040 R15: 0000000000000006
+FS:  00007fba8c0af740(0000) GS:ffff8a1277c80000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000007c0933eb8 CR3: 0000000118244001 CR4: 00000000003706e0
+Call Trace:
+ <TASK>
+ vmw_view_bindings_add+0xf5/0x1b0 [vmwgfx]
+ ? ___drm_dbg+0x8a/0xb0 [drm]
+ vmw_cmd_dx_set_shader_res+0x8f/0xc0 [vmwgfx]
+ vmw_execbuf_process+0x590/0x1360 [vmwgfx]
+ vmw_execbuf_ioctl+0x173/0x370 [vmwgfx]
+ ? __drm_dev_dbg+0xb4/0xe0 [drm]
+ ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx]
+ drm_ioctl_kernel+0xbc/0x160 [drm]
+ drm_ioctl+0x2d2/0x580 [drm]
+ ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx]
+ ? do_fault+0x1a6/0x420
+ vmw_generic_ioctl+0xbd/0x180 [vmwgfx]
+ vmw_unlocked_ioctl+0x19/0x20 [vmwgfx]
+ __x64_sys_ioctl+0x96/0xd0
+ do_syscall_64+0x5d/0x90
+ ? handle_mm_fault+0xe4/0x2f0
+ ? debug_smp_processor_id+0x1b/0x30
+ ? fpregs_assert_state_consistent+0x2e/0x50
+ ? exit_to_user_mode_prepare+0x40/0x180
+ ? irqentry_exit_to_user_mode+0xd/0x20
+ ? irqentry_exit+0x3f/0x50
+ ? exc_page_fault+0x8b/0x180
+ entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+Signed-off-by: Zack Rusin <zackr@vmware.com>
+Cc: security@openanolis.org
+Reported-by: Ziming Zhang <ezrakiez@gmail.com>
+Testcase-found-by: Niels De Graef <ndegraef@redhat.com>
+Fixes: d80efd5cb3de ("drm/vmwgfx: Initial DX support")
+Cc: <stable@vger.kernel.org> # v4.3+
+Reviewed-by: Maaz Mombasawala<mombasawalam@vmware.com>
+Reviewed-by: Martin Krastev <krastevm@vmware.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230616190934.54828-1-zack@kde.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |   12 ++++++++++++
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |   29 +++++++++++------------------
+ 2 files changed, 23 insertions(+), 18 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1685,4 +1685,16 @@ static inline bool vmw_has_fences(struct
+       return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
+ }
++static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
++                                         u32 shader_type)
++{
++      SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
++
++      if (shader_model >= VMW_SM_5)
++              max_allowed = SVGA3D_SHADERTYPE_MAX;
++      else if (shader_model >= VMW_SM_4)
++              max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
++      return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
++}
++
+ #endif
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2003,7 +2003,7 @@ static int vmw_cmd_set_shader(struct vmw
+       cmd = container_of(header, typeof(*cmd), header);
+-      if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
++      if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
+               VMW_DEBUG_USER("Illegal shader type %u.\n",
+                              (unsigned int) cmd->body.type);
+               return -EINVAL;
+@@ -2125,8 +2125,6 @@ vmw_cmd_dx_set_single_constant_buffer(st
+                                     SVGA3dCmdHeader *header)
+ {
+       VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+-      SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
+-              SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
+       struct vmw_resource *res = NULL;
+       struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+@@ -2143,6 +2141,14 @@ vmw_cmd_dx_set_single_constant_buffer(st
+       if (unlikely(ret != 0))
+               return ret;
++      if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
++          cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
++              VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
++                             (unsigned int) cmd->body.type,
++                             (unsigned int) cmd->body.slot);
++              return -EINVAL;
++      }
++
+       binding.bi.ctx = ctx_node->ctx;
+       binding.bi.res = res;
+       binding.bi.bt = vmw_ctx_binding_cb;
+@@ -2151,14 +2157,6 @@ vmw_cmd_dx_set_single_constant_buffer(st
+       binding.size = cmd->body.sizeInBytes;
+       binding.slot = cmd->body.slot;
+-      if (binding.shader_slot >= max_shader_num ||
+-          binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+-              VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+-                             (unsigned int) cmd->body.type,
+-                             (unsigned int) binding.slot);
+-              return -EINVAL;
+-      }
+-
+       vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+                       binding.slot);
+@@ -2179,15 +2177,13 @@ static int vmw_cmd_dx_set_shader_res(str
+ {
+       VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+               container_of(header, typeof(*cmd), header);
+-      SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+-              SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+       u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+               sizeof(SVGA3dShaderResourceViewId);
+       if ((u64) cmd->body.startView + (u64) num_sr_view >
+           (u64) SVGA3D_DX_MAX_SRVIEWS ||
+-          cmd->body.type >= max_allowed) {
++          !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
+               VMW_DEBUG_USER("Invalid shader binding.\n");
+               return -EINVAL;
+       }
+@@ -2211,8 +2207,6 @@ static int vmw_cmd_dx_set_shader(struct
+                                SVGA3dCmdHeader *header)
+ {
+       VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+-      SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+-              SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+       struct vmw_resource *res = NULL;
+       struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+       struct vmw_ctx_bindinfo_shader binding;
+@@ -2223,8 +2217,7 @@ static int vmw_cmd_dx_set_shader(struct
+       cmd = container_of(header, typeof(*cmd), header);
+-      if (cmd->body.type >= max_allowed ||
+-          cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
++      if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
+               VMW_DEBUG_USER("Illegal shader type %u.\n",
+                              (unsigned int) cmd->body.type);
+               return -EINVAL;
index d04cc8a2c9dd88117d1397984522c99ffc2d8025..96603c3fee0e5a071f761483d5291f493313d25f 100644 (file)
@@ -66,3 +66,7 @@ of-unittest-fix-expect-for-parse_phandle_with_args_map-test.patch
 of-dynamic-refactor-action-prints-to-not-use-pof-inside-devtree_lock.patch
 media-vcodec-fix-potential-array-out-of-bounds-in-encoder-queue_setup.patch
 pci-acpiphp-use-pci_assign_unassigned_bridge_resources-only-for-non-root-bus.patch
+drm-vmwgfx-fix-shader-stage-validation.patch
+drm-display-dp-fix-the-dp-dsc-receiver-cap-size.patch
+x86-fpu-invalidate-fpu-state-correctly-on-exec.patch
+x86-fpu-set-x86_feature_osxsave-feature-after-enabling-osxsave-in-cr4.patch
diff --git a/queue-5.15/x86-fpu-invalidate-fpu-state-correctly-on-exec.patch b/queue-5.15/x86-fpu-invalidate-fpu-state-correctly-on-exec.patch
new file mode 100644 (file)
index 0000000..3c95093
--- /dev/null
@@ -0,0 +1,135 @@
+From 1f69383b203e28cf8a4ca9570e572da1699f76cd Mon Sep 17 00:00:00 2001
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Date: Fri, 18 Aug 2023 10:03:05 -0700
+Subject: x86/fpu: Invalidate FPU state correctly on exec()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+
+commit 1f69383b203e28cf8a4ca9570e572da1699f76cd upstream.
+
+The thread flag TIF_NEED_FPU_LOAD indicates that the FPU saved state is
+valid and should be reloaded when returning to userspace. However, the
+kernel will skip doing this if the FPU registers are already valid as
+determined by fpregs_state_valid(). The logic embedded there considers
+the state valid if two cases are both true:
+
+  1: fpu_fpregs_owner_ctx points to the current tasks FPU state
+  2: the last CPU the registers were live in was the current CPU.
+
+This is usually correct logic. A CPU’s fpu_fpregs_owner_ctx is set to
+the current FPU during the fpregs_restore_userregs() operation, so it
+indicates that the registers have been restored on this CPU. But this
+alone doesn’t preclude that the task hasn’t been rescheduled to a
+different CPU, where the registers were modified, and then back to the
+current CPU. To verify that this was not the case the logic relies on the
+second condition. So the assumption is that if the registers have been
+restored, AND they haven’t had the chance to be modified (by being
+loaded on another CPU), then they MUST be valid on the current CPU.
+
+Besides the lazy FPU optimizations, the other cases where the FPU
+registers might not be valid are when the kernel modifies the FPU register
+state or the FPU saved buffer. In this case the operation modifying the
+FPU state needs to let the kernel know the correspondence has been
+broken. The comment in “arch/x86/kernel/fpu/context.h” has:
+/*
+...
+ * If the FPU register state is valid, the kernel can skip restoring the
+ * FPU state from memory.
+ *
+ * Any code that clobbers the FPU registers or updates the in-memory
+ * FPU state for a task MUST let the rest of the kernel know that the
+ * FPU registers are no longer valid for this task.
+ *
+ * Either one of these invalidation functions is enough. Invalidate
+ * a resource you control: CPU if using the CPU for something else
+ * (with preemption disabled), FPU for the current task, or a task that
+ * is prevented from running by the current task.
+ */
+
+However, this is not completely true. When the kernel modifies the
+registers or saved FPU state, it can only rely on
+__fpu_invalidate_fpregs_state(), which wipes the FPU’s last_cpu
+tracking. The exec path instead relies on fpregs_deactivate(), which sets
+the CPU’s FPU context to NULL. This was observed to fail to restore the
+reset FPU state to the registers when returning to userspace in the
+following scenario:
+
+1. A task is executing in userspace on CPU0
+       - CPU0’s FPU context points to tasks
+       - fpu->last_cpu=CPU0
+
+2. The task exec()’s
+
+3. While in the kernel the task is preempted
+       - CPU0 gets a thread executing in the kernel (such that no other
+               FPU context is activated)
+       - Scheduler sets task’s fpu->last_cpu=CPU0 when scheduling out
+
+4. Task is migrated to CPU1
+
+5. Continuing the exec(), the task gets to
+   fpu_flush_thread()->fpu_reset_fpregs()
+       - Sets CPU1’s fpu context to NULL
+       - Copies the init state to the task’s FPU buffer
+       - Sets TIF_NEED_FPU_LOAD on the task
+
+6. The task reschedules back to CPU0 before completing the exec() and
+   returning to userspace
+       - During the reschedule, scheduler finds TIF_NEED_FPU_LOAD is set
+       - Skips saving the registers and updating task’s fpu→last_cpu,
+         because TIF_NEED_FPU_LOAD is the canonical source.
+
+7. Now CPU0’s FPU context is still pointing to the task’s, and
+   fpu->last_cpu is still CPU0. So fpregs_state_valid() returns true even
+   though the reset FPU state has not been restored.
+
+So the root cause is that exec() is doing the wrong kind of invalidate. It
+should reset fpu->last_cpu via __fpu_invalidate_fpregs_state(). Further,
+fpu__drop() doesn't really seem appropriate as the task (and FPU) are not
+going away, they are just getting reset as part of an exec. So switch to
+__fpu_invalidate_fpregs_state().
+
+Also, delete the misleading comment that says that either kind of
+invalidate will be enough, because it’s not always the case.
+
+Fixes: 33344368cb08 ("x86/fpu: Clean up the fpu__clear() variants")
+Reported-by: Lei Wang <lei4.wang@intel.com>
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Lijun Pan <lijun.pan@intel.com>
+Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
+Acked-by: Lijun Pan <lijun.pan@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230818170305.502891-1-rick.p.edgecombe@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/internal.h |    3 +--
+ arch/x86/kernel/fpu/core.c          |    2 +-
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -416,8 +416,7 @@ DECLARE_PER_CPU(struct fpu *, fpu_fpregs
+  * FPU state for a task MUST let the rest of the kernel know that the
+  * FPU registers are no longer valid for this task.
+  *
+- * Either one of these invalidation functions is enough. Invalidate
+- * a resource you control: CPU if using the CPU for something else
++ * Invalidate a resource you control: CPU if using the CPU for something else
+  * (with preemption disabled), FPU for the current task, or a task that
+  * is prevented from running by the current task.
+  */
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -330,7 +330,7 @@ static void fpu_reset_fpstate(void)
+       struct fpu *fpu = &current->thread.fpu;
+       fpregs_lock();
+-      fpu__drop(fpu);
++      __fpu_invalidate_fpregs_state(fpu);
+       /*
+        * This does not change the actual hardware registers. It just
+        * resets the memory image and sets TIF_NEED_FPU_LOAD so a
diff --git a/queue-5.15/x86-fpu-set-x86_feature_osxsave-feature-after-enabling-osxsave-in-cr4.patch b/queue-5.15/x86-fpu-set-x86_feature_osxsave-feature-after-enabling-osxsave-in-cr4.patch
new file mode 100644 (file)
index 0000000..e8787a7
--- /dev/null
@@ -0,0 +1,61 @@
+From 2c66ca3949dc701da7f4c9407f2140ae425683a5 Mon Sep 17 00:00:00 2001
+From: Feng Tang <feng.tang@intel.com>
+Date: Wed, 23 Aug 2023 14:57:47 +0800
+Subject: x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
+
+From: Feng Tang <feng.tang@intel.com>
+
+commit 2c66ca3949dc701da7f4c9407f2140ae425683a5 upstream.
+
+0-Day found a 34.6% regression in stress-ng's 'af-alg' test case, and
+bisected it to commit b81fac906a8f ("x86/fpu: Move FPU initialization into
+arch_cpu_finalize_init()"), which optimizes the FPU init order, and moves
+the CR4_OSXSAVE enabling into a later place:
+
+   arch_cpu_finalize_init
+       identify_boot_cpu
+          identify_cpu
+              generic_identify
+                   get_cpu_cap --> setup cpu capability
+       ...
+       fpu__init_cpu
+           fpu__init_cpu_xstate
+               cr4_set_bits(X86_CR4_OSXSAVE);
+
+As the FPU is not yet initialized the CPU capability setup fails to set
+X86_FEATURE_OSXSAVE. Many security module like 'camellia_aesni_avx_x86_64'
+depend on this feature and therefore fail to load, causing the regression.
+
+Cure this by setting X86_FEATURE_OSXSAVE feature right after OSXSAVE
+enabling.
+
+[ tglx: Moved it into the actual BSP FPU initialization code and added a comment ]
+
+Fixes: b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()")
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Signed-off-by: Feng Tang <feng.tang@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/lkml/202307192135.203ac24e-oliver.sang@intel.com
+Link: https://lore.kernel.org/lkml/20230823065747.92257-1-feng.tang@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/xstate.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -809,6 +809,13 @@ void __init fpu__init_system_xstate(void
+               goto out_disable;
+       }
++      /*
++       * CPU capabilities initialization runs before FPU init. So
++       * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
++       * functional, set the feature bit so depending code works.
++       */
++      setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
++
+       print_xstate_offset_size();
+       pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
+               xfeatures_mask_all,