--- /dev/null
+From 5a41237ad1d4b62008f93163af1d9b1da90729d8 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 9 Jan 2023 17:04:49 -0600
+Subject: gcc: disable -Warray-bounds for gcc-11 too
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 5a41237ad1d4b62008f93163af1d9b1da90729d8 upstream.
+
+We had already disabled this warning for gcc-12 due to bugs in the value
+range analysis, but it turns out we end up having some similar problems
+with gcc-11.3 too, so let's disable it there too.
+
+Older gcc versions end up being increasingly less relevant, and
+hopefully clang and newer version of gcc (ie gcc-13) end up working
+reliably enough that we still get the build coverage even when we
+disable this for some versions.
+
+Link: https://lore.kernel.org/all/20221227002941.GA2691687@roeck-us.net/
+Link: https://lore.kernel.org/all/D8BDBF66-E44C-45D4-9758-BAAA4F0C1998@kernel.org/
+Cc: Kees Cook <kees@kernel.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/Kconfig | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -892,13 +892,17 @@ config CC_IMPLICIT_FALLTHROUGH
+ default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+
+-# Currently, disable gcc-12 array-bounds globally.
++# Currently, disable gcc-11,12 array-bounds globally.
+ # We may want to target only particular configurations some day.
++config GCC11_NO_ARRAY_BOUNDS
++ def_bool y
++
+ config GCC12_NO_ARRAY_BOUNDS
+ def_bool y
+
+ config CC_NO_ARRAY_BOUNDS
+ bool
++ default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
+ default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
+
+ #
--- /dev/null
+From 7827c81f0248e3c2f40d438b020f3d222f002171 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Fri, 6 Jan 2023 12:43:37 -0500
+Subject: Revert "SUNRPC: Use RMW bitops in single-threaded hot paths"
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 7827c81f0248e3c2f40d438b020f3d222f002171 upstream.
+
+The premise that "Once an svc thread is scheduled and executing an
+RPC, no other processes will touch svc_rqst::rq_flags" is false.
+svc_xprt_enqueue() examines the RQ_BUSY flag in scheduled nfsd
+threads when determining which thread to wake up next.
+
+Found via KCSAN.
+
+Fixes: 28df0988815f ("SUNRPC: Use RMW bitops in single-threaded hot paths")
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4proc.c | 7 +++----
+ fs/nfsd/nfs4xdr.c | 2 +-
+ net/sunrpc/auth_gss/svcauth_gss.c | 4 ++--
+ net/sunrpc/svc.c | 6 +++---
+ net/sunrpc/svc_xprt.c | 2 +-
+ net/sunrpc/svcsock.c | 8 ++++----
+ net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 +-
+ 7 files changed, 15 insertions(+), 16 deletions(-)
+
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -928,7 +928,7 @@ nfsd4_read(struct svc_rqst *rqstp, struc
+ * the client wants us to do more in this compound:
+ */
+ if (!nfsd4_last_compound_op(rqstp))
+- __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+
+ /* check stateid */
+ status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+@@ -2615,12 +2615,11 @@ nfsd4_proc_compound(struct svc_rqst *rqs
+ cstate->minorversion = args->minorversion;
+ fh_init(current_fh, NFS4_FHSIZE);
+ fh_init(save_fh, NFS4_FHSIZE);
+-
+ /*
+ * Don't use the deferral mechanism for NFSv4; compounds make it
+ * too hard to avoid non-idempotency problems.
+ */
+- __clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++ clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+
+ /*
+ * According to RFC3010, this takes precedence over all other errors.
+@@ -2742,7 +2741,7 @@ encode_op:
+ out:
+ cstate->status = status;
+ /* Reset deferral mechanism for RPC deferrals */
+- __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ return rpc_success;
+ }
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2464,7 +2464,7 @@ nfsd4_decode_compound(struct nfsd4_compo
+ argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
+
+ if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
+- __clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
++ clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
+
+ return true;
+ }
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -900,7 +900,7 @@ unwrap_integ_data(struct svc_rqst *rqstp
+ * rejecting the server-computed MIC in this somewhat rare case,
+ * do not use splice with the GSS integrity service.
+ */
+- __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+
+ /* Did we already verify the signature on the original pass through? */
+ if (rqstp->rq_deferred)
+@@ -972,7 +972,7 @@ unwrap_priv_data(struct svc_rqst *rqstp,
+ int pad, remaining_len, offset;
+ u32 rseqno;
+
+- __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+
+ priv_len = svc_getnl(&buf->head[0]);
+ if (rqstp->rq_deferred) {
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1244,10 +1244,10 @@ svc_process_common(struct svc_rqst *rqst
+ goto err_short_len;
+
+ /* Will be turned off by GSS integrity and privacy services */
+- __set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ /* Will be turned off only when NFSv4 Sessions are used */
+- __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+- __clear_bit(RQ_DROPME, &rqstp->rq_flags);
++ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++ clear_bit(RQ_DROPME, &rqstp->rq_flags);
+
+ svc_putu32(resv, rqstp->rq_xid);
+
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1238,7 +1238,7 @@ static struct cache_deferred_req *svc_de
+ trace_svc_defer(rqstp);
+ svc_xprt_get(rqstp->rq_xprt);
+ dr->xprt = rqstp->rq_xprt;
+- __set_bit(RQ_DROPME, &rqstp->rq_flags);
++ set_bit(RQ_DROPME, &rqstp->rq_flags);
+
+ dr->handle.revisit = svc_revisit;
+ return &dr->handle;
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -298,9 +298,9 @@ static void svc_sock_setbufsize(struct s
+ static void svc_sock_secure_port(struct svc_rqst *rqstp)
+ {
+ if (svc_port_is_privileged(svc_addr(rqstp)))
+- __set_bit(RQ_SECURE, &rqstp->rq_flags);
++ set_bit(RQ_SECURE, &rqstp->rq_flags);
+ else
+- __clear_bit(RQ_SECURE, &rqstp->rq_flags);
++ clear_bit(RQ_SECURE, &rqstp->rq_flags);
+ }
+
+ /*
+@@ -1008,9 +1008,9 @@ static int svc_tcp_recvfrom(struct svc_r
+ rqstp->rq_xprt_ctxt = NULL;
+ rqstp->rq_prot = IPPROTO_TCP;
+ if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
+- __set_bit(RQ_LOCAL, &rqstp->rq_flags);
++ set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ else
+- __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
++ clear_bit(RQ_LOCAL, &rqstp->rq_flags);
+
+ p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ calldir = p[1];
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -602,7 +602,7 @@ static int svc_rdma_has_wspace(struct sv
+
+ static void svc_rdma_secure_port(struct svc_rqst *rqstp)
+ {
+- __set_bit(RQ_SECURE, &rqstp->rq_flags);
++ set_bit(RQ_SECURE, &rqstp->rq_flags);
+ }
+
+ static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
--- /dev/null
+From 6ea25770b043c7997ab21d1ce95ba5de4d3d85d9 Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:32 -0800
+Subject: selftests/vm/pkeys: Add a regression test for setting PKRU through ptrace
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 6ea25770b043c7997ab21d1ce95ba5de4d3d85d9 upstream.
+
+This tests PTRACE_SETREGSET with NT_X86_XSTATE modifying PKRU directly and
+removing the PKRU bit from XSTATE_BV.
+
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-7-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/vm/pkey-x86.h | 12 ++
+ tools/testing/selftests/vm/protection_keys.c | 131 ++++++++++++++++++++++++++-
+ 2 files changed, 141 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/vm/pkey-x86.h
++++ b/tools/testing/selftests/vm/pkey-x86.h
+@@ -104,6 +104,18 @@ static inline int cpu_has_pkeys(void)
+ return 1;
+ }
+
++static inline int cpu_max_xsave_size(void)
++{
++ unsigned long XSTATE_CPUID = 0xd;
++ unsigned int eax;
++ unsigned int ebx;
++ unsigned int ecx;
++ unsigned int edx;
++
++ __cpuid_count(XSTATE_CPUID, 0, eax, ebx, ecx, edx);
++ return ecx;
++}
++
+ static inline u32 pkey_bit_position(int pkey)
+ {
+ return pkey * PKEY_BITS_PER_PKEY;
+--- a/tools/testing/selftests/vm/protection_keys.c
++++ b/tools/testing/selftests/vm/protection_keys.c
+@@ -18,12 +18,13 @@
+ * do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
+ *
+ * Compile like this:
+- * gcc -o protection_keys -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+- * gcc -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
++ * gcc -mxsave -o protection_keys -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
++ * gcc -mxsave -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+ */
+ #define _GNU_SOURCE
+ #define __SANE_USERSPACE_TYPES__
+ #include <errno.h>
++#include <linux/elf.h>
+ #include <linux/futex.h>
+ #include <time.h>
+ #include <sys/time.h>
+@@ -1550,6 +1551,129 @@ void test_implicit_mprotect_exec_only_me
+ do_not_expect_pkey_fault("plain read on recently PROT_EXEC area");
+ }
+
++#if defined(__i386__) || defined(__x86_64__)
++void test_ptrace_modifies_pkru(int *ptr, u16 pkey)
++{
++ u32 new_pkru;
++ pid_t child;
++ int status, ret;
++ int pkey_offset = pkey_reg_xstate_offset();
++ size_t xsave_size = cpu_max_xsave_size();
++ void *xsave;
++ u32 *pkey_register;
++ u64 *xstate_bv;
++ struct iovec iov;
++
++ new_pkru = ~read_pkey_reg();
++ /* Don't make PROT_EXEC mappings inaccessible */
++ new_pkru &= ~3;
++
++ child = fork();
++ pkey_assert(child >= 0);
++ dprintf3("[%d] fork() ret: %d\n", getpid(), child);
++ if (!child) {
++ ptrace(PTRACE_TRACEME, 0, 0, 0);
++ /* Stop and allow the tracer to modify PKRU directly */
++ raise(SIGSTOP);
++
++ /*
++ * need __read_pkey_reg() version so we do not do shadow_pkey_reg
++ * checking
++ */
++ if (__read_pkey_reg() != new_pkru)
++ exit(1);
++
++ /* Stop and allow the tracer to clear XSTATE_BV for PKRU */
++ raise(SIGSTOP);
++
++ if (__read_pkey_reg() != 0)
++ exit(1);
++
++ /* Stop and allow the tracer to examine PKRU */
++ raise(SIGSTOP);
++
++ exit(0);
++ }
++
++ pkey_assert(child == waitpid(child, &status, 0));
++ dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++ pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++ xsave = (void *)malloc(xsave_size);
++ pkey_assert(xsave > 0);
++
++ /* Modify the PKRU register directly */
++ iov.iov_base = xsave;
++ iov.iov_len = xsave_size;
++ ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++
++ pkey_register = (u32 *)(xsave + pkey_offset);
++ pkey_assert(*pkey_register == read_pkey_reg());
++
++ *pkey_register = new_pkru;
++
++ ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++
++ /* Test that the modification is visible in ptrace before any execution */
++ memset(xsave, 0xCC, xsave_size);
++ ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++ pkey_assert(*pkey_register == new_pkru);
++
++ /* Execute the tracee */
++ ret = ptrace(PTRACE_CONT, child, 0, 0);
++ pkey_assert(ret == 0);
++
++ /* Test that the tracee saw the PKRU value change */
++ pkey_assert(child == waitpid(child, &status, 0));
++ dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++ pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++ /* Test that the modification is visible in ptrace after execution */
++ memset(xsave, 0xCC, xsave_size);
++ ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++ pkey_assert(*pkey_register == new_pkru);
++
++ /* Clear the PKRU bit from XSTATE_BV */
++ xstate_bv = (u64 *)(xsave + 512);
++ *xstate_bv &= ~(1 << 9);
++
++ ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++
++ /* Test that the modification is visible in ptrace before any execution */
++ memset(xsave, 0xCC, xsave_size);
++ ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++ pkey_assert(*pkey_register == 0);
++
++ ret = ptrace(PTRACE_CONT, child, 0, 0);
++ pkey_assert(ret == 0);
++
++ /* Test that the tracee saw the PKRU value go to 0 */
++ pkey_assert(child == waitpid(child, &status, 0));
++ dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++ pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++ /* Test that the modification is visible in ptrace after execution */
++ memset(xsave, 0xCC, xsave_size);
++ ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++ pkey_assert(ret == 0);
++ pkey_assert(*pkey_register == 0);
++
++ ret = ptrace(PTRACE_CONT, child, 0, 0);
++ pkey_assert(ret == 0);
++ pkey_assert(child == waitpid(child, &status, 0));
++ dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++ pkey_assert(WIFEXITED(status));
++ pkey_assert(WEXITSTATUS(status) == 0);
++ free(xsave);
++}
++#endif
++
+ void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
+ {
+ int size = PAGE_SIZE;
+@@ -1585,6 +1709,9 @@ void (*pkey_tests[])(int *ptr, u16 pkey)
+ test_pkey_syscalls_bad_args,
+ test_pkey_alloc_exhaust,
+ test_pkey_alloc_free_attach_pkey0,
++#if defined(__i386__) || defined(__x86_64__)
++ test_ptrace_modifies_pkru,
++#endif
+ };
+
+ void run_tests_once(void)
parisc-align-parisc-madv_xxx-constants-with-all-other-architectures.patch
+x86-fpu-take-task_struct-in-copy_sigframe_from_user_to_xstate.patch
+x86-fpu-add-a-pkru-argument-to-copy_uabi_from_kernel_to_xstate.patch
+x86-fpu-add-a-pkru-argument-to-copy_uabi_to_xstate.patch
+x86-fpu-allow-pkru-to-be-once-again-written-by-ptrace.patch
+x86-fpu-emulate-xrstor-s-behavior-if-the-xfeatures-pkru-bit-is-not-set.patch
+selftests-vm-pkeys-add-a-regression-test-for-setting-pkru-through-ptrace.patch
+revert-sunrpc-use-rmw-bitops-in-single-threaded-hot-paths.patch
+gcc-disable-warray-bounds-for-gcc-11-too.patch
--- /dev/null
+From 1c813ce0305571e1b2e4cc4acca451da9e6ad18f Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:28 -0800
+Subject: x86/fpu: Add a pkru argument to copy_uabi_from_kernel_to_xstate().
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 1c813ce0305571e1b2e4cc4acca451da9e6ad18f upstream.
+
+Both KVM (through KVM_SET_XSTATE) and ptrace (through PTRACE_SETREGSET
+with NT_X86_XSTATE) ultimately call copy_uabi_from_kernel_to_xstate(),
+but the canonical locations for the current PKRU value for KVM guests
+and processes in a ptrace stop are different (in the kvm_vcpu_arch and
+the thread_state structs respectively).
+
+In preparation for eventually handling PKRU in
+copy_uabi_to_xstate, pass in a pointer to the PKRU location.
+
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-3-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/core.c | 2 +-
+ arch/x86/kernel/fpu/regset.c | 2 +-
+ arch/x86/kernel/fpu/xstate.c | 2 +-
+ arch/x86/kernel/fpu/xstate.h | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -406,7 +406,7 @@ int fpu_copy_uabi_to_guest_fpstate(struc
+ if (ustate->xsave.header.xfeatures & ~xcr0)
+ return -EINVAL;
+
+- ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
++ ret = copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
+ if (ret)
+ return ret;
+
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -167,7 +167,7 @@ int xstateregs_set(struct task_struct *t
+ }
+
+ fpu_force_restore(fpu);
+- ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf);
++ ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru);
+
+ out:
+ vfree(tmpbuf);
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1268,7 +1268,7 @@ static int copy_uabi_to_xstate(struct fp
+ * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
+ * format and copy to the target thread. Used by ptrace and KVM.
+ */
+-int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf)
++int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru)
+ {
+ return copy_uabi_to_xstate(fpstate, kbuf, NULL);
+ }
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -46,7 +46,7 @@ extern void __copy_xstate_to_uabi_buf(st
+ u32 pkru_val, enum xstate_copy_mode copy_mode);
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode mode);
+-extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
++extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
+ extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
+
+
--- /dev/null
+From 2c87767c35ee9744f666ccec869d5fe742c3de0a Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:29 -0800
+Subject: x86/fpu: Add a pkru argument to copy_uabi_to_xstate()
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 2c87767c35ee9744f666ccec869d5fe742c3de0a upstream.
+
+In preparation for moving PKRU handling code out of
+fpu_copy_uabi_to_guest_fpstate() and into copy_uabi_to_xstate(), add an
+argument that copy_uabi_from_kernel_to_xstate() can use to pass the
+canonical location of the PKRU value. For
+copy_sigframe_from_user_to_xstate() the kernel will actually restore the
+PKRU value from the fpstate, but pass in the thread_struct's pkru location
+anyways for consistency.
+
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-4-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/xstate.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1200,8 +1200,18 @@ static int copy_from_buffer(void *dst, u
+ }
+
+
++/**
++ * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate
++ * @fpstate: The fpstate buffer to copy to
++ * @kbuf: The UABI format buffer, if it comes from the kernel
++ * @ubuf: The UABI format buffer, if it comes from userspace
++ * @pkru: unused
++ *
++ * Converts from the UABI format into the kernel internal hardware
++ * dependent format.
++ */
+ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+- const void __user *ubuf)
++ const void __user *ubuf, u32 *pkru)
+ {
+ struct xregs_state *xsave = &fpstate->regs.xsave;
+ unsigned int offset, size;
+@@ -1270,7 +1280,7 @@ static int copy_uabi_to_xstate(struct fp
+ */
+ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru)
+ {
+- return copy_uabi_to_xstate(fpstate, kbuf, NULL);
++ return copy_uabi_to_xstate(fpstate, kbuf, NULL, pkru);
+ }
+
+ /*
+@@ -1281,7 +1291,7 @@ int copy_uabi_from_kernel_to_xstate(stru
+ int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
+ const void __user *ubuf)
+ {
+- return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf);
++ return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru);
+ }
+
+ static bool validate_independent_components(u64 mask)
--- /dev/null
+From 4a804c4f8356393d6b5eff7600f07615d7869c13 Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:30 -0800
+Subject: x86/fpu: Allow PKRU to be (once again) written by ptrace.
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 4a804c4f8356393d6b5eff7600f07615d7869c13 upstream.
+
+Move KVM's PKRU handling code in fpu_copy_uabi_to_guest_fpstate() to
+copy_uabi_to_xstate() so that it is shared with other APIs that write the
+XSTATE such as PTRACE_SETREGSET with NT_X86_XSTATE.
+
+This restores the pre-5.14 behavior of ptrace. The regression can be seen
+by running gdb and executing `p $pkru`, `set $pkru = 42`, and `p $pkru`.
+On affected kernels (5.14+) the write to the PKRU register (which gdb
+performs through ptrace) is ignored.
+
+[ dhansen: removed stable@ tag for now. The ABI was broken for long
+ enough that this is not urgent material. Let's let it stew
+ in tip for a few weeks before it's submitted to stable
+ because there are so many ABIs potentially affected. ]
+
+Fixes: e84ba47e313d ("x86/fpu: Hook up PKRU into ptrace()")
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-5-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/core.c | 13 +------------
+ arch/x86/kernel/fpu/xstate.c | 21 ++++++++++++++++++++-
+ 2 files changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -391,8 +391,6 @@ int fpu_copy_uabi_to_guest_fpstate(struc
+ {
+ struct fpstate *kstate = gfpu->fpstate;
+ const union fpregs_state *ustate = buf;
+- struct pkru_state *xpkru;
+- int ret;
+
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+ if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
+@@ -406,16 +404,7 @@ int fpu_copy_uabi_to_guest_fpstate(struc
+ if (ustate->xsave.header.xfeatures & ~xcr0)
+ return -EINVAL;
+
+- ret = copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
+- if (ret)
+- return ret;
+-
+- /* Retrieve PKRU if not in init state */
+- if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
+- xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
+- *vpkru = xpkru->pkru;
+- }
+- return 0;
++ return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
+ }
+ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
+ #endif /* CONFIG_KVM */
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1205,10 +1205,22 @@ static int copy_from_buffer(void *dst, u
+ * @fpstate: The fpstate buffer to copy to
+ * @kbuf: The UABI format buffer, if it comes from the kernel
+ * @ubuf: The UABI format buffer, if it comes from userspace
+- * @pkru: unused
++ * @pkru: The location to write the PKRU value to
+ *
+ * Converts from the UABI format into the kernel internal hardware
+ * dependent format.
++ *
++ * This function ultimately has three different callers with distinct PKRU
++ * behavior.
++ * 1. When called from sigreturn the PKRU register will be restored from
++ * @fpstate via an XRSTOR. Correctly copying the UABI format buffer to
++ * @fpstate is sufficient to cover this case, but the caller will also
++ * pass a pointer to the thread_struct's pkru field in @pkru and updating
++ * it is harmless.
++ * 2. When called from ptrace the PKRU register will be restored from the
++ * thread_struct's pkru field. A pointer to that is passed in @pkru.
++ * 3. When called from KVM the PKRU register will be restored from the vcpu's
++ * pkru field. A pointer to that is passed in @pkru.
+ */
+ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+ const void __user *ubuf, u32 *pkru)
+@@ -1260,6 +1272,13 @@ static int copy_uabi_to_xstate(struct fp
+ }
+ }
+
++ if (hdr.xfeatures & XFEATURE_MASK_PKRU) {
++ struct pkru_state *xpkru;
++
++ xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU);
++ *pkru = xpkru->pkru;
++ }
++
+ /*
+ * The state that came in from userspace was user-state only.
+ * Mask all the user states out of 'xfeatures':
--- /dev/null
+From d7e5aceace514a2b1b3ca3dc44f93f1704766ca7 Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:31 -0800
+Subject: x86/fpu: Emulate XRSTOR's behavior if the xfeatures PKRU bit is not set
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit d7e5aceace514a2b1b3ca3dc44f93f1704766ca7 upstream.
+
+The hardware XRSTOR instruction resets the PKRU register to its hardware
+init value (namely 0) if the PKRU bit is not set in the xfeatures mask.
+Emulating that here restores the pre-5.14 behavior for PTRACE_SET_REGSET
+with NT_X86_XSTATE, and makes sigreturn (which still uses XRSTOR) and
+ptrace behave identically. KVM has never used XRSTOR and never had this
+behavior, so KVM opts-out of this emulation by passing a NULL pkru pointer
+to copy_uabi_to_xstate().
+
+Fixes: e84ba47e313d ("x86/fpu: Hook up PKRU into ptrace()")
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-6-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/core.c | 8 ++++++++
+ arch/x86/kernel/fpu/xstate.c | 15 ++++++++++++++-
+ 2 files changed, 22 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -404,6 +404,14 @@ int fpu_copy_uabi_to_guest_fpstate(struc
+ if (ustate->xsave.header.xfeatures & ~xcr0)
+ return -EINVAL;
+
++ /*
++ * Nullify @vpkru to preserve its current value if PKRU's bit isn't set
++ * in the header. KVM's odd ABI is to leave PKRU untouched in this
++ * case (all other components are eventually re-initialized).
++ */
++ if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU))
++ vpkru = NULL;
++
+ return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
+ }
+ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1219,8 +1219,14 @@ static int copy_from_buffer(void *dst, u
+ * it is harmless.
+ * 2. When called from ptrace the PKRU register will be restored from the
+ * thread_struct's pkru field. A pointer to that is passed in @pkru.
++ * The kernel will restore it manually, so the XRSTOR behavior that resets
++ * the PKRU register to the hardware init value (0) if the corresponding
++ * xfeatures bit is not set is emulated here.
+ * 3. When called from KVM the PKRU register will be restored from the vcpu's
+- * pkru field. A pointer to that is passed in @pkru.
++ * pkru field. A pointer to that is passed in @pkru. KVM hasn't used
++ * XRSTOR and hasn't had the PKRU resetting behavior described above. To
++ * preserve that KVM behavior, it passes NULL for @pkru if the xfeatures
++ * bit is not set.
+ */
+ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+ const void __user *ubuf, u32 *pkru)
+@@ -1277,6 +1283,13 @@ static int copy_uabi_to_xstate(struct fp
+
+ xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU);
+ *pkru = xpkru->pkru;
++ } else {
++ /*
++ * KVM may pass NULL here to indicate that it does not need
++ * PKRU updated.
++ */
++ if (pkru)
++ *pkru = 0;
+ }
+
+ /*
--- /dev/null
+From 6a877d2450ace4f27c012519e5a1ae818f931983 Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 15 Nov 2022 15:09:27 -0800
+Subject: x86/fpu: Take task_struct* in copy_sigframe_from_user_to_xstate()
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 6a877d2450ace4f27c012519e5a1ae818f931983 upstream.
+
+This will allow copy_sigframe_from_user_to_xstate() to grab the address of
+thread_struct's pkru value in a later patch.
+
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20221115230932.7126-2-khuey%40kylehuey.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/signal.c | 2 +-
+ arch/x86/kernel/fpu/xstate.c | 4 ++--
+ arch/x86/kernel/fpu/xstate.h | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -396,7 +396,7 @@ static bool __fpu_restore_sig(void __use
+
+ fpregs = &fpu->fpstate->regs;
+ if (use_xsave() && !fx_only) {
+- if (copy_sigframe_from_user_to_xstate(fpu->fpstate, buf_fx))
++ if (copy_sigframe_from_user_to_xstate(tsk, buf_fx))
+ return false;
+ } else {
+ if (__copy_from_user(&fpregs->fxsave, buf_fx,
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1278,10 +1278,10 @@ int copy_uabi_from_kernel_to_xstate(stru
+ * XSAVE[S] format and copy to the target thread. This is called from the
+ * sigreturn() and rt_sigreturn() system calls.
+ */
+-int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate,
++int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
+ const void __user *ubuf)
+ {
+- return copy_uabi_to_xstate(fpstate, NULL, ubuf);
++ return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf);
+ }
+
+ static bool validate_independent_components(u64 mask)
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -47,7 +47,7 @@ extern void __copy_xstate_to_uabi_buf(st
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode mode);
+ extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
+-extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
++extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
+
+
+ extern void fpu__init_cpu_xstate(void);