--- /dev/null
+From 3c12466b6b7bf1e56f9b32c366a3d83d87afb4de Mon Sep 17 00:00:00 2001
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+Date: Wed, 6 Dec 2023 12:55:34 +0800
+Subject: erofs: fix lz4 inplace decompression
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+commit 3c12466b6b7bf1e56f9b32c366a3d83d87afb4de upstream.
+
+Currently EROFS can map another compressed buffer for inplace
+decompression, that was used to handle the cases that some pages of
+compressed data are actually not in-place I/O.
+
+However, like most simple LZ77 algorithms, LZ4 expects the compressed
+data is arranged at the end of the decompressed buffer and it
+explicitly uses memmove() to handle overlapping:
+ __________________________________________________________
+ |_ direction of decompression --> ____ |_ compressed data _|
+
+Although EROFS arranges compressed data like this, it typically maps two
+individual virtual buffers so the relative order is uncertain.
+Previously, it was hardly observed since LZ4 only uses memmove() for
+short overlapped literals and x86/arm64 memmove implementations seem to
+completely cover it up and they don't have this issue. Juhyung reported
+that EROFS data corruption can be found on a new Intel x86 processor.
+After some analysis, it seems that recent x86 processors with the new
+FSRM feature expose this issue with "rep movsb".
+
+Let's strictly use the decompressed buffer for lz4 inplace
+decompression for now. Later, as an useful improvement, we could try
+to tie up these two buffers together in the correct order.
+
+Reported-and-tested-by: Juhyung Park <qkrwngud825@gmail.com>
+Closes: https://lore.kernel.org/r/CAD14+f2AVKf8Fa2OO1aAUdDNTDsVzzR6ctU_oJSmTyd6zSYR2Q@mail.gmail.com
+Fixes: 0ffd71bcc3a0 ("staging: erofs: introduce LZ4 decompression inplace")
+Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend")
+Cc: stable <stable@vger.kernel.org> # 5.4+
+Tested-by: Yifan Zhao <zhaoyifan@sjtu.edu.cn>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20231206045534.3920847-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/decompressor.c | 24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -24,7 +24,8 @@ struct z_erofs_decompressor {
+ */
+ int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool);
+- int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
++ int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out,
++ u8 *obase);
+ char *name;
+ };
+
+@@ -114,10 +115,13 @@ static void *generic_copy_inplace_data(s
+ return tmp;
+ }
+
+-static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
++static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out,
++ u8 *obase)
+ {
++ const uint nrpages_out = PAGE_ALIGN(rq->pageofs_out +
++ rq->outputsize) >> PAGE_SHIFT;
+ unsigned int inputmargin, inlen;
+- u8 *src;
++ u8 *src, *src2;
+ bool copied, support_0padding;
+ int ret;
+
+@@ -125,6 +129,7 @@ static int z_erofs_lz4_decompress(struct
+ return -EOPNOTSUPP;
+
+ src = kmap_atomic(*rq->in);
++ src2 = src;
+ inputmargin = 0;
+ support_0padding = false;
+
+@@ -148,16 +153,15 @@ static int z_erofs_lz4_decompress(struct
+ if (rq->inplace_io) {
+ const uint oend = (rq->pageofs_out +
+ rq->outputsize) & ~PAGE_MASK;
+- const uint nr = PAGE_ALIGN(rq->pageofs_out +
+- rq->outputsize) >> PAGE_SHIFT;
+-
+ if (rq->partial_decoding || !support_0padding ||
+- rq->out[nr - 1] != rq->in[0] ||
++ rq->out[nrpages_out - 1] != rq->in[0] ||
+ rq->inputsize - oend <
+ LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
+ src = generic_copy_inplace_data(rq, src, inputmargin);
+ inputmargin = 0;
+ copied = true;
++ } else {
++ src = obase + ((nrpages_out - 1) << PAGE_SHIFT);
+ }
+ }
+
+@@ -187,7 +191,7 @@ static int z_erofs_lz4_decompress(struct
+ if (copied)
+ erofs_put_pcpubuf(src);
+ else
+- kunmap_atomic(src);
++ kunmap_atomic(src2);
+ return ret;
+ }
+
+@@ -257,7 +261,7 @@ static int z_erofs_decompress_generic(st
+ return PTR_ERR(dst);
+
+ rq->inplace_io = false;
+- ret = alg->decompress(rq, dst);
++ ret = alg->decompress(rq, dst, NULL);
+ if (!ret)
+ copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
+ rq->outputsize);
+@@ -291,7 +295,7 @@ static int z_erofs_decompress_generic(st
+ dst_maptype = 2;
+
+ dstmap_out:
+- ret = alg->decompress(rq, dst + rq->pageofs_out);
++ ret = alg->decompress(rq, dst + rq->pageofs_out, dst);
+
+ if (!dst_maptype)
+ kunmap_atomic(dst);
--- /dev/null
+From 1df931d95f4dc1c11db1123e85d4e08156e46ef9 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Tue, 7 Jun 2022 17:00:53 +0200
+Subject: x86: drop bogus "cc" clobber from __try_cmpxchg_user_asm()
+
+From: Jan Beulich <jbeulich@suse.com>
+
+commit 1df931d95f4dc1c11db1123e85d4e08156e46ef9 upstream.
+
+As noted (and fixed) a couple of times in the past, "=@cc<cond>" outputs
+and clobbering of "cc" don't work well together. The compiler appears to
+mean to reject such, but doesn't - in its upstream form - quite manage
+to yet for "cc". Furthermore two similar macros don't clobber "cc", and
+clobbering "cc" is pointless in asm()-s for x86 anyway - the compiler
+always assumes status flags to be clobbered there.
+
+Fixes: 989b5db215a2 ("x86/uaccess: Implement macros for CMPXCHG on user addresses")
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Message-Id: <485c0c0b-a3a7-0b7c-5264-7d00c01de032@suse.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/uaccess.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -471,7 +471,7 @@ do { \
+ [ptr] "+m" (*_ptr), \
+ [old] "+a" (__old) \
+ : [new] ltype (__new) \
+- : "memory", "cc"); \
++ : "memory"); \
+ if (unlikely(__err)) \
+ goto label; \
+ if (unlikely(!success)) \