]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Thu, 24 Oct 2024 11:16:26 +0000 (07:16 -0400)
committerSasha Levin <sashal@kernel.org>
Thu, 24 Oct 2024 11:16:26 +0000 (07:16 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/arm64-force-position-independent-veneers.patch [new file with mode: 0644]
queue-5.10/asoc-fsl_sai-enable-fifo-continue-on-error-fcont-bit.patch [new file with mode: 0644]
queue-5.10/block-bfq-fix-procress-reference-leakage-for-bfqq-in.patch [new file with mode: 0644]
queue-5.10/drm-vboxvideo-replace-fake-vla-at-end-of-vbva_mouse_.patch [new file with mode: 0644]
queue-5.10/exec-don-t-warn-for-racy-path_noexec-check.patch [new file with mode: 0644]
queue-5.10/iomap-update-ki_pos-a-little-later-in-iomap_dio_comp.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-force-position-independent-veneers.patch b/queue-5.10/arm64-force-position-independent-veneers.patch
new file mode 100644 (file)
index 0000000..89309eb
--- /dev/null
@@ -0,0 +1,113 @@
+From 0f20c34f49f8b173e24aa54aa68d84f7c540144d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Sep 2024 11:18:38 +0100
+Subject: arm64: Force position-independent veneers
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 9abe390e689f4f5c23c5f507754f8678431b4f72 ]
+
+Certain portions of code always need to be position-independent
+regardless of CONFIG_RELOCATABLE, including code which is executed in an
+idmap or which is executed before relocations are applied. In some
+kernel configurations the LLD linker generates position-dependent
+veneers for such code, and when executed these result in early boot-time
+failures.
+
+Marc Zyngier encountered a boot failure resulting from this when
+building a (particularly cursed) configuration with LLVM, as he reported
+to the list:
+
+  https://lore.kernel.org/linux-arm-kernel/86wmjwvatn.wl-maz@kernel.org/
+
+In Marc's kernel configuration, the .head.text and .rodata.text sections
+end up more than 128MiB apart, requiring a veneer to branch between the
+two:
+
+| [mark@lakrids:~/src/linux]% usekorg 14.1.0 aarch64-linux-objdump -t vmlinux | grep -w _text
+| ffff800080000000 g       .head.text     0000000000000000 _text
+| [mark@lakrids:~/src/linux]% usekorg 14.1.0 aarch64-linux-objdump -t vmlinux | grep -w primary_entry
+| ffff8000889df0e0 g       .rodata.text   000000000000006c primary_entry,
+
+... consequently, LLD inserts a position-dependent veneer for the branch
+from _stext (in .head.text) to primary_entry (in .rodata.text):
+
+| ffff800080000000 <_text>:
+| ffff800080000000:       fa405a4d        ccmp    x18, #0x0, #0xd, pl     // pl = nfrst
+| ffff800080000004:       14003fff        b       ffff800080010000 <__AArch64AbsLongThunk_primary_entry>
+...
+| ffff800080010000 <__AArch64AbsLongThunk_primary_entry>:
+| ffff800080010000:       58000050        ldr     x16, ffff800080010008 <__AArch64AbsLongThunk_primary_entry+0x8>
+| ffff800080010004:       d61f0200        br      x16
+| ffff800080010008:       889df0e0        .word   0x889df0e0
+| ffff80008001000c:       ffff8000        .word   0xffff8000
+
+... and as this is executed early in boot before the kernel is mapped in
+TTBR1 this results in a silent boot failure.
+
+Fix this by passing '--pic-veneer' to the linker, which will cause the
+linker to use position-independent veneers, e.g.
+
+| ffff800080000000 <_text>:
+| ffff800080000000:       fa405a4d        ccmp    x18, #0x0, #0xd, pl     // pl = nfrst
+| ffff800080000004:       14003fff        b       ffff800080010000 <__AArch64ADRPThunk_primary_entry>
+...
+| ffff800080010000 <__AArch64ADRPThunk_primary_entry>:
+| ffff800080010000:       f004e3f0        adrp    x16, ffff800089c8f000 <__idmap_text_start>
+| ffff800080010004:       91038210        add     x16, x16, #0xe0
+| ffff800080010008:       d61f0200        br      x16
+
+I've opted to pass '--pic-veneer' unconditionally, as:
+
+* In addition to solving the boot failure, these sequences are generally
+  nicer as they require fewer instructions and don't need to perform
+  data accesses.
+
+* While the position-independent veneer sequences have a limited +/-2GiB
+  range, this is not a new restriction. Even kernels built with
+  CONFIG_RELOCATABLE=n are limited to 2GiB in size as we have several
+  structues using 32-bit relative offsets and PPREL32 relocations, which
+  are similarly limited to +/-2GiB in range. These include extable
+  entries, jump table entries, and alt_instr entries.
+
+* GNU LD defaults to using position-independent veneers, and supports
+  the same '--pic-veneer' option, so this change is not expected to
+  adversely affect GNU LD.
+
+I've tested with GNU LD 2.30 to 2.42 inclusive and LLVM 13.0.1 to 19.1.0
+inclusive, using the kernel.org binaries from:
+
+* https://mirrors.edge.kernel.org/pub/tools/crosstool/
+* https://mirrors.edge.kernel.org/pub/tools/llvm/
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reported-by: Marc Zyngier <maz@kernel.org>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Will Deacon <will@kernel.org>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Link: https://lore.kernel.org/r/20240927101838.3061054-1-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 485b7dbd4f9e3..96dcddc358c78 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -10,7 +10,7 @@
+ #
+ # Copyright (C) 1995-2001 by Russell King
+-LDFLAGS_vmlinux       :=--no-undefined -X
++LDFLAGS_vmlinux       :=--no-undefined -X --pic-veneer
+ ifeq ($(CONFIG_RELOCATABLE), y)
+ # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+-- 
+2.43.0
+
diff --git a/queue-5.10/asoc-fsl_sai-enable-fifo-continue-on-error-fcont-bit.patch b/queue-5.10/asoc-fsl_sai-enable-fifo-continue-on-error-fcont-bit.patch
new file mode 100644 (file)
index 0000000..fb40357
--- /dev/null
@@ -0,0 +1,63 @@
+From b1eebb67a90043c28d9330264b6673012c075943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Sep 2024 14:08:28 +0800
+Subject: ASoC: fsl_sai: Enable 'FIFO continue on error' FCONT bit
+
+From: Shengjiu Wang <shengjiu.wang@nxp.com>
+
+[ Upstream commit 72455e33173c1a00c0ce93d2b0198eb45d5f4195 ]
+
+FCONT=1 means On FIFO error, the SAI will continue from the
+same word that caused the FIFO error to set after the FIFO
+warning flag has been cleared.
+
+Set FCONT bit in control register to avoid the channel swap
+issue after SAI xrun.
+
+Signed-off-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Link: https://patch.msgid.link/1727676508-22830-1-git-send-email-shengjiu.wang@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_sai.c | 5 ++++-
+ sound/soc/fsl/fsl_sai.h | 1 +
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 03731d14d4757..998102711da09 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -490,6 +490,9 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+       val_cr4 |= FSL_SAI_CR4_FRSZ(slots);
++      /* Set to avoid channel swap */
++      val_cr4 |= FSL_SAI_CR4_FCONT;
++
+       /* Set to output mode to avoid tri-stated data pins */
+       if (tx)
+               val_cr4 |= FSL_SAI_CR4_CHMOD;
+@@ -515,7 +518,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+                          FSL_SAI_CR3_TRCE((1 << pins) - 1));
+       regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+                          FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
+-                         FSL_SAI_CR4_CHMOD_MASK,
++                         FSL_SAI_CR4_CHMOD_MASK | FSL_SAI_CR4_FCONT_MASK,
+                          val_cr4);
+       regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
+                          FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 691847d54b17d..eff3b7b2dd3e8 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -132,6 +132,7 @@
+ /* SAI Transmit and Receive Configuration 4 Register */
++#define FSL_SAI_CR4_FCONT_MASK        BIT(28)
+ #define FSL_SAI_CR4_FCONT     BIT(28)
+ #define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
+ #define FSL_SAI_CR4_FCOMB_SOFT  BIT(27)
+-- 
+2.43.0
+
diff --git a/queue-5.10/block-bfq-fix-procress-reference-leakage-for-bfqq-in.patch b/queue-5.10/block-bfq-fix-procress-reference-leakage-for-bfqq-in.patch
new file mode 100644 (file)
index 0000000..37cc87d
--- /dev/null
@@ -0,0 +1,148 @@
+From e391a5c2fddd557ee3b8a70f8138643348213c58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Oct 2024 11:43:14 +0800
+Subject: block, bfq: fix procress reference leakage for bfqq in merge chain
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit 73aeab373557fa6ee4ae0b742c6211ccd9859280 ]
+
+Original state:
+
+        Process 1       Process 2       Process 3       Process 4
+         (BIC1)          (BIC2)          (BIC3)          (BIC4)
+          Λ                |               |               |
+           \--------------\ \-------------\ \-------------\|
+                           V               V               V
+          bfqq1--------->bfqq2---------->bfqq3----------->bfqq4
+    ref    0               1               2               4
+
+After commit 0e456dba86c7 ("block, bfq: choose the last bfqq from merge
+chain in bfq_setup_cooperator()"), if P1 issues a new IO:
+
+Without the patch:
+
+        Process 1       Process 2       Process 3       Process 4
+         (BIC1)          (BIC2)          (BIC3)          (BIC4)
+          Λ                |               |               |
+           \------------------------------\ \-------------\|
+                                           V               V
+          bfqq1--------->bfqq2---------->bfqq3----------->bfqq4
+    ref    0               0               2               4
+
+bfqq3 will be used to handle IO from P1, this is not expected, IO
+should be redirected to bfqq4;
+
+With the patch:
+
+          -------------------------------------------
+          |                                         |
+        Process 1       Process 2       Process 3   |   Process 4
+         (BIC1)          (BIC2)          (BIC3)     |    (BIC4)
+                           |               |        |      |
+                            \-------------\ \-------------\|
+                                           V               V
+          bfqq1--------->bfqq2---------->bfqq3----------->bfqq4
+    ref    0               0               2               4
+
+IO is redirected to bfqq4, however, procress reference of bfqq3 is still
+2, while there is only P2 using it.
+
+Fix the problem by calling bfq_merge_bfqqs() for each bfqq in the merge
+chain. Also change bfqq_merge_bfqqs() to return new_bfqq to simplify
+code.
+
+Fixes: 0e456dba86c7 ("block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator()")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20240909134154.954924-3-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c | 33 ++++++++++++++++-----------------
+ 1 file changed, 16 insertions(+), 17 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 515e3c1a54759..c1600e3ac3339 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2774,10 +2774,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+       bfq_put_queue(bfqq);
+ }
+-static void
+-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+-              struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
++                                       struct bfq_io_cq *bic,
++                                       struct bfq_queue *bfqq)
+ {
++      struct bfq_queue *new_bfqq = bfqq->new_bfqq;
++
+       bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+               (unsigned long)new_bfqq->pid);
+       /* Save weight raising and idle window of the merged queues */
+@@ -2845,6 +2847,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+       new_bfqq->pid = -1;
+       bfqq->bic = NULL;
+       bfq_release_process_ref(bfqd, bfqq);
++
++      return new_bfqq;
+ }
+ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+@@ -2880,14 +2884,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+                * fulfilled, i.e., bic can be redirected to new_bfqq
+                * and bfqq can be put.
+                */
+-              bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
+-                              new_bfqq);
+-              /*
+-               * If we get here, bio will be queued into new_queue,
+-               * so use new_bfqq to decide whether bio and rq can be
+-               * merged.
+-               */
+-              bfqq = new_bfqq;
++              while (bfqq != new_bfqq)
++                      bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
+               /*
+                * Change also bqfd->bio_bfqq, as
+@@ -5444,6 +5442,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+       bool waiting, idle_timer_disabled = false;
+       if (new_bfqq) {
++              struct bfq_queue *old_bfqq = bfqq;
+               /*
+                * Release the request's reference to the old bfqq
+                * and make sure one is taken to the shared queue.
+@@ -5459,18 +5458,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+                * then complete the merge and redirect it to
+                * new_bfqq.
+                */
+-              if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+-                      bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+-                                      bfqq, new_bfqq);
++              if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) {
++                      while (bfqq != new_bfqq)
++                              bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
++              }
+-              bfq_clear_bfqq_just_created(bfqq);
++              bfq_clear_bfqq_just_created(old_bfqq);
+               /*
+                * rq is about to be enqueued into new_bfqq,
+                * release rq reference on bfqq
+                */
+-              bfq_put_queue(bfqq);
++              bfq_put_queue(old_bfqq);
+               rq->elv.priv[1] = new_bfqq;
+-              bfqq = new_bfqq;
+       }
+       bfq_update_io_thinktime(bfqd, bfqq);
+-- 
+2.43.0
+
diff --git a/queue-5.10/drm-vboxvideo-replace-fake-vla-at-end-of-vbva_mouse_.patch b/queue-5.10/drm-vboxvideo-replace-fake-vla-at-end-of-vbva_mouse_.patch
new file mode 100644 (file)
index 0000000..abeeff7
--- /dev/null
@@ -0,0 +1,72 @@
+From e8adab2e321032ed52b74a80fe8064af6ad804a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 12:45:23 +0200
+Subject: drm/vboxvideo: Replace fake VLA at end of vbva_mouse_pointer_shape
+ with real VLA
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit d92b90f9a54d9300a6e883258e79f36dab53bfae ]
+
+Replace the fake VLA at end of the vbva_mouse_pointer_shape shape with
+a real VLA to fix a "memcpy: detected field-spanning write error" warning:
+
+[   13.319813] memcpy: detected field-spanning write (size 16896) of single field "p->data" at drivers/gpu/drm/vboxvideo/hgsmi_base.c:154 (size 4)
+[   13.319841] WARNING: CPU: 0 PID: 1105 at drivers/gpu/drm/vboxvideo/hgsmi_base.c:154 hgsmi_update_pointer_shape+0x192/0x1c0 [vboxvideo]
+[   13.320038] Call Trace:
+[   13.320173]  hgsmi_update_pointer_shape [vboxvideo]
+[   13.320184]  vbox_cursor_atomic_update [vboxvideo]
+
+Note as mentioned in the added comment it seems the original length
+calculation for the allocated and send hgsmi buffer is 4 bytes too large.
+Changing this is not the goal of this patch, so this behavior is kept.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240827104523.17442-1-hdegoede@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vboxvideo/hgsmi_base.c | 10 +++++++++-
+ drivers/gpu/drm/vboxvideo/vboxvideo.h  |  4 +---
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+index 361d3193258ea..7edc9cf6a6069 100644
+--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
++++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+@@ -135,7 +135,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+               flags |= VBOX_MOUSE_POINTER_VISIBLE;
+       }
+-      p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
++      /*
++       * The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
++       * from having a 4 bytes fixed array at the end to using a proper VLA
++       * at the end. These 4 extra bytes were not subtracted from sizeof(*p)
++       * before the switch to the VLA, so this way the behavior is unchanged.
++       * Chances are these 4 extra bytes are not necessary but they are kept
++       * to avoid regressions.
++       */
++      p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
+                              VBVA_MOUSE_POINTER_SHAPE);
+       if (!p)
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
+index a5de40fe1a76a..bed285fe083c8 100644
+--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
++++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
+@@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
+        * Bytes in the gap between the AND and the XOR mask are undefined.
+        * XOR mask scanlines have no gap between them and size of XOR mask is:
+        * xor_len = width * 4 * height.
+-       *
+-       * Preallocate 4 bytes for accessing actual data as p->data.
+        */
+-      u8 data[4];
++      u8 data[];
+ } __packed;
+ /* pointer is visible */
+-- 
+2.43.0
+
diff --git a/queue-5.10/exec-don-t-warn-for-racy-path_noexec-check.patch b/queue-5.10/exec-don-t-warn-for-racy-path_noexec-check.patch
new file mode 100644 (file)
index 0000000..422188e
--- /dev/null
@@ -0,0 +1,100 @@
+From bd9e43cf23cac7e4ccdd5fd6a672641550cee158 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 15:45:51 -0300
+Subject: exec: don't WARN for racy path_noexec check
+
+From: Mateusz Guzik <mjguzik@gmail.com>
+
+[ Upstream commit 0d196e7589cefe207d5d41f37a0a28a1fdeeb7c6 ]
+
+Both i_mode and noexec checks wrapped in WARN_ON stem from an artifact
+of the previous implementation. They used to legitimately check for the
+condition, but that got moved up in two commits:
+633fb6ac3980 ("exec: move S_ISREG() check earlier")
+0fd338b2d2cd ("exec: move path_noexec() check earlier")
+
+Instead of being removed said checks are WARN_ON'ed instead, which
+has some debug value.
+
+However, the spurious path_noexec check is racy, resulting in
+unwarranted warnings should someone race with setting the noexec flag.
+
+One can note there is more to perm-checking whether execve is allowed
+and none of the conditions are guaranteed to still hold after they were
+tested for.
+
+Additionally this does not validate whether the code path did any perm
+checking to begin with -- it will pass if the inode happens to be
+regular.
+
+Keep the redundant path_noexec() check even though it's mindless
+nonsense checking for guarantee that isn't given so drop the WARN.
+
+Reword the commentary and do small tidy ups while here.
+
+Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
+Link: https://lore.kernel.org/r/20240805131721.765484-1-mjguzik@gmail.com
+[brauner: keep redundant path_noexec() check]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+[cascardo: keep exit label and use it]
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exec.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 6e5324c7e9b69..7144c541818f6 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -144,13 +144,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
+               goto out;
+       /*
+-       * may_open() has already checked for this, so it should be
+-       * impossible to trip now. But we need to be extra cautious
+-       * and check again at the very end too.
++       * Check do_open_execat() for an explanation.
+        */
+       error = -EACCES;
+-      if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+-                       path_noexec(&file->f_path)))
++      if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
++          path_noexec(&file->f_path))
+               goto exit;
+       fsnotify_open(file);
+@@ -919,16 +917,16 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+       file = do_filp_open(fd, name, &open_exec_flags);
+       if (IS_ERR(file))
+-              goto out;
++              return file;
+       /*
+-       * may_open() has already checked for this, so it should be
+-       * impossible to trip now. But we need to be extra cautious
+-       * and check again at the very end too.
++       * In the past the regular type check was here. It moved to may_open() in
++       * 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
++       * an invariant that all non-regular files error out before we get here.
+        */
+       err = -EACCES;
+-      if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+-                       path_noexec(&file->f_path)))
++      if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
++          path_noexec(&file->f_path))
+               goto exit;
+       err = deny_write_access(file);
+@@ -938,7 +936,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+       if (name->name[0] != '\0')
+               fsnotify_open(file);
+-out:
+       return file;
+ exit:
+-- 
+2.43.0
+
diff --git a/queue-5.10/iomap-update-ki_pos-a-little-later-in-iomap_dio_comp.patch b/queue-5.10/iomap-update-ki_pos-a-little-later-in-iomap_dio_comp.patch
new file mode 100644 (file)
index 0000000..fd3cc42
--- /dev/null
@@ -0,0 +1,80 @@
+From 2691949de4249f59486050000559e671bc2fc3f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 18:33:46 +0200
+Subject: iomap: update ki_pos a little later in iomap_dio_complete
+
+From: Christoph Hellwig <hch@lst.de>
+
+upstream 936e114a245b6e38e0dbf706a67e7611fc993da1 commit.
+
+Move the ki_pos update down a bit to prepare for a better common helper
+that invalidates pages based of an iocb.
+
+Link: https://lkml.kernel.org/r/20230601145904.1385409-3-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andreas Gruenbacher <agruenba@redhat.com>
+Cc: Anna Schumaker <anna@kernel.org>
+Cc: Chao Yu <chao@kernel.org>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Ilya Dryomov <idryomov@gmail.com>
+Cc: Jaegeuk Kim <jaegeuk@kernel.org>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Miklos Szeredi <miklos@szeredi.hu>
+Cc: Miklos Szeredi <mszeredi@redhat.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
+Cc: Xiubo Li <xiubli@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mahmoud Adam <mngyadam@amazon.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/iomap/direct-io.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index 933f234d5becd..8a49c0d3a7b46 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -93,7 +93,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+               if (offset + ret > dio->i_size &&
+                   !(dio->flags & IOMAP_DIO_WRITE))
+                       ret = dio->i_size - offset;
+-              iocb->ki_pos += ret;
+       }
+       /*
+@@ -119,15 +118,18 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+       }
+       inode_dio_end(file_inode(iocb->ki_filp));
+-      /*
+-       * If this is a DSYNC write, make sure we push it to stable storage now
+-       * that we've written data.
+-       */
+-      if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
+-              ret = generic_write_sync(iocb, ret);
+-      kfree(dio);
++      if (ret > 0) {
++              iocb->ki_pos += ret;
++              /*
++               * If this is a DSYNC write, make sure we push it to stable
++               * storage now that we've written data.
++               */
++              if (dio->flags & IOMAP_DIO_NEED_SYNC)
++                      ret = generic_write_sync(iocb, ret);
++      }
++      kfree(dio);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(iomap_dio_complete);
+-- 
+2.43.0
+
index 36c53b4aa74ca93f060170f0b0195b67c0e13fbe..bdff3ee792bf21320a75dab46c0e0c93474b9e94 100644 (file)
@@ -22,3 +22,9 @@ kvm-s390-gaccess-refactor-gpa-and-length-calculation.patch
 kvm-s390-gaccess-refactor-access-address-range-check.patch
 kvm-s390-gaccess-cleanup-access-to-guest-pages.patch
 kvm-s390-gaccess-check-if-guest-address-is-in-memslo.patch
+block-bfq-fix-procress-reference-leakage-for-bfqq-in.patch
+exec-don-t-warn-for-racy-path_noexec-check.patch
+iomap-update-ki_pos-a-little-later-in-iomap_dio_comp.patch
+drm-vboxvideo-replace-fake-vla-at-end-of-vbva_mouse_.patch
+asoc-fsl_sai-enable-fifo-continue-on-error-fcont-bit.patch
+arm64-force-position-independent-veneers.patch