]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Sun, 15 Jun 2025 22:47:45 +0000 (18:47 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 15 Jun 2025 22:47:45 +0000 (18:47 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.6/bio-fix-bio_first_folio-for-sparsemem-without-vmemma.patch [new file with mode: 0644]
queue-6.6/block-fix-bvec_set_folio-for-very-large-folios.patch [new file with mode: 0644]
queue-6.6/fs-filesystems-fix-potential-unsigned-integer-underf.patch [new file with mode: 0644]
queue-6.6/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch [new file with mode: 0644]
queue-6.6/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/bio-fix-bio_first_folio-for-sparsemem-without-vmemma.patch b/queue-6.6/bio-fix-bio_first_folio-for-sparsemem-without-vmemma.patch
new file mode 100644 (file)
index 0000000..3a1b6f3
--- /dev/null
@@ -0,0 +1,39 @@
+From 829af614049664fe7d5fcd7d308ff0cbd51f4cbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Jun 2025 15:41:25 +0100
+Subject: bio: Fix bio_first_folio() for SPARSEMEM without VMEMMAP
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit f826ec7966a63d48e16e0868af4e038bf9a1a3ae ]
+
+It is possible for physically contiguous folios to have discontiguous
+struct pages if SPARSEMEM is enabled and SPARSEMEM_VMEMMAP is not.
+This is correctly handled by folio_page_idx(), so remove this open-coded
+implementation.
+
+Fixes: 640d1930bef4 (block: Add bio_for_each_folio_all())
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20250612144126.2849931-1-willy@infradead.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bio.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index b893418c3cc02..f193aef4fac08 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -294,7 +294,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+       fi->folio = page_folio(bvec->bv_page);
+       fi->offset = bvec->bv_offset +
+-                      PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
++                      PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
+       fi->_seg_count = bvec->bv_len;
+       fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+       fi->_next = folio_next(fi->folio);
+-- 
+2.39.5
+
diff --git a/queue-6.6/block-fix-bvec_set_folio-for-very-large-folios.patch b/queue-6.6/block-fix-bvec_set_folio-for-very-large-folios.patch
new file mode 100644 (file)
index 0000000..a9f179f
--- /dev/null
@@ -0,0 +1,46 @@
+From fbca0606ef4bc677ef9a9c64ef70b5608ae8f7ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Jun 2025 15:42:53 +0100
+Subject: block: Fix bvec_set_folio() for very large folios
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 5e223e06ee7c6d8f630041a0645ac90e39a42cc6 ]
+
+Similarly to 26064d3e2b4d ("block: fix adding folio to bio"), if
+we attempt to add a folio that is larger than 4GB, we'll silently
+truncate the offset and len.  Widen the parameters to size_t, assert
+that the length is less than 4GB and set the first page that contains
+the interesting data rather than the first page of the folio.
+
+Fixes: 26db5ee15851 (block: add a bvec_set_folio helper)
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20250612144255.2850278-1-willy@infradead.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bvec.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/bvec.h b/include/linux/bvec.h
+index bd1e361b351c5..99ab7b2bba27c 100644
+--- a/include/linux/bvec.h
++++ b/include/linux/bvec.h
+@@ -57,9 +57,12 @@ static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+  * @offset:   offset into the folio
+  */
+ static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+-              unsigned int len, unsigned int offset)
++              size_t len, size_t offset)
+ {
+-      bvec_set_page(bv, &folio->page, len, offset);
++      unsigned long nr = offset / PAGE_SIZE;
++
++      WARN_ON_ONCE(len > UINT_MAX);
++      bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
+ }
+ /**
+-- 
+2.39.5
+
diff --git a/queue-6.6/fs-filesystems-fix-potential-unsigned-integer-underf.patch b/queue-6.6/fs-filesystems-fix-potential-unsigned-integer-underf.patch
new file mode 100644 (file)
index 0000000..1eec1db
--- /dev/null
@@ -0,0 +1,55 @@
+From 709b23d22846ed13459a6a46561a966e88b2a2a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Apr 2025 19:45:27 +0800
+Subject: fs/filesystems: Fix potential unsigned integer underflow in fs_name()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+[ Upstream commit 1363c134ade81e425873b410566e957fecebb261 ]
+
+fs_name() has @index as unsigned int, so there is underflow risk for
+operation '@index--'.
+
+Fix by breaking the for loop when '@index == 0' which is also more proper
+than '@index <= 0' for unsigned integer comparison.
+
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/20250410-fix_fs-v1-1-7c14ccc8ebaa@quicinc.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/filesystems.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index 58b9067b2391c..95e5256821a53 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name)
+ static int fs_name(unsigned int index, char __user * buf)
+ {
+       struct file_system_type * tmp;
+-      int len, res;
++      int len, res = -EINVAL;
+       read_lock(&file_systems_lock);
+-      for (tmp = file_systems; tmp; tmp = tmp->next, index--)
+-              if (index <= 0 && try_module_get(tmp->owner))
++      for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
++              if (index == 0) {
++                      if (try_module_get(tmp->owner))
++                              res = 0;
+                       break;
++              }
++      }
+       read_unlock(&file_systems_lock);
+-      if (!tmp)
+-              return -EINVAL;
++      if (res)
++              return res;
+       /* OK, we got the reference, so we can safely block */
+       len = strlen(tmp->name) + 1;
+-- 
+2.39.5
+
diff --git a/queue-6.6/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch b/queue-6.6/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch
new file mode 100644 (file)
index 0000000..95cbb1d
--- /dev/null
@@ -0,0 +1,95 @@
+From 2a81f03d4d5350b0e21c16f58e5d0f6747e562f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 14:23:03 +0200
+Subject: nvmet-fcloop: access fcpreq only when holding reqlock
+
+From: Daniel Wagner <wagi@kernel.org>
+
+[ Upstream commit 47a827cd7929d0550c3496d70b417fcb5649b27b ]
+
+The abort handling logic expects that the state and the fcpreq are only
+accessed when holding the reqlock lock.
+
+While at it, only handle the aborts in the abort handler.
+
+Signed-off-by: Daniel Wagner <wagi@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fcloop.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 4b35bdcac185f..aeeb7455fc2e7 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -613,12 +613,13 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ {
+       struct fcloop_fcpreq *tfcp_req =
+               container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+-      struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
++      struct nvmefc_fcp_req *fcpreq;
+       unsigned long flags;
+       int ret = 0;
+       bool aborted = false;
+       spin_lock_irqsave(&tfcp_req->reqlock, flags);
++      fcpreq = tfcp_req->fcpreq;
+       switch (tfcp_req->inistate) {
+       case INI_IO_START:
+               tfcp_req->inistate = INI_IO_ACTIVE;
+@@ -633,16 +634,19 @@ fcloop_fcp_recv_work(struct work_struct *work)
+       }
+       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+-      if (unlikely(aborted))
+-              ret = -ECANCELED;
+-      else {
+-              if (likely(!check_for_drop(tfcp_req)))
+-                      ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+-                              &tfcp_req->tgt_fcp_req,
+-                              fcpreq->cmdaddr, fcpreq->cmdlen);
+-              else
+-                      pr_info("%s: dropped command ********\n", __func__);
++      if (unlikely(aborted)) {
++              /* the abort handler will call fcloop_call_host_done */
++              return;
++      }
++
++      if (unlikely(check_for_drop(tfcp_req))) {
++              pr_info("%s: dropped command ********\n", __func__);
++              return;
+       }
++
++      ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
++                                 &tfcp_req->tgt_fcp_req,
++                                 fcpreq->cmdaddr, fcpreq->cmdlen);
+       if (ret)
+               fcloop_call_host_done(fcpreq, tfcp_req, ret);
+ }
+@@ -657,9 +661,10 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+       unsigned long flags;
+       spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-      fcpreq = tfcp_req->fcpreq;
+       switch (tfcp_req->inistate) {
+       case INI_IO_ABORTED:
++              fcpreq = tfcp_req->fcpreq;
++              tfcp_req->fcpreq = NULL;
+               break;
+       case INI_IO_COMPLETED:
+               completed = true;
+@@ -681,10 +686,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+               nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+                                       &tfcp_req->tgt_fcp_req);
+-      spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-      tfcp_req->fcpreq = NULL;
+-      spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+-
+       fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+       /* call_host_done releases reference for abort downcall */
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.6/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch b/queue-6.6/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch
new file mode 100644 (file)
index 0000000..324c74b
--- /dev/null
@@ -0,0 +1,98 @@
+From 93b59070a74b6eea5c343885be7096a0752f3c7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jan 2025 10:54:50 +0100
+Subject: perf: Ensure bpf_perf_link path is properly serialized
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 7ed9138a72829d2035ecbd8dbd35b1bc3c137c40 ]
+
+Ravi reported that the bpf_perf_link_attach() usage of
+perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the
+PERF_EVENT_IOC_SET_BPF case.
+
+Reported-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 34 ++++++++++++++++++++++++++++++----
+ 1 file changed, 30 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6460f79280ed2..563f39518f7fe 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5913,6 +5913,9 @@ static int perf_event_set_output(struct perf_event *event,
+ static int perf_event_set_filter(struct perf_event *event, void __user *arg);
+ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+                         struct perf_event_attr *attr);
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie);
+ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
+ {
+@@ -5981,7 +5984,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
+               if (IS_ERR(prog))
+                       return PTR_ERR(prog);
+-              err = perf_event_set_bpf_prog(event, prog, 0);
++              err = __perf_event_set_bpf_prog(event, prog, 0);
+               if (err) {
+                       bpf_prog_put(prog);
+                       return err;
+@@ -10583,8 +10586,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
+       return false;
+ }
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+-                          u64 bpf_cookie)
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie)
+ {
+       bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
+@@ -10622,6 +10626,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+       return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
+ }
++int perf_event_set_bpf_prog(struct perf_event *event,
++                          struct bpf_prog *prog,
++                          u64 bpf_cookie)
++{
++      struct perf_event_context *ctx;
++      int ret;
++
++      ctx = perf_event_ctx_lock(event);
++      ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
++      perf_event_ctx_unlock(event, ctx);
++
++      return ret;
++}
++
+ void perf_event_free_bpf_prog(struct perf_event *event)
+ {
+       if (!perf_event_is_tracing(event)) {
+@@ -10641,7 +10659,15 @@ static void perf_event_free_filter(struct perf_event *event)
+ {
+ }
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie)
++{
++      return -ENOENT;
++}
++
++int perf_event_set_bpf_prog(struct perf_event *event,
++                          struct bpf_prog *prog,
+                           u64 bpf_cookie)
+ {
+       return -ENOENT;
+-- 
+2.39.5
+
index 005e884134b977eb22886147e5fd7d39f6789aa8..c7054f0fdbf0dbdfe38722f3c69ad85d19135bb5 100644 (file)
@@ -330,3 +330,8 @@ net_sched-prio-fix-a-race-in-prio_tune.patch
 net_sched-red-fix-a-race-in-__red_change.patch
 net_sched-tbf-fix-a-race-in-tbf_change.patch
 net_sched-ets-fix-a-race-in-ets_qdisc_change.patch
+fs-filesystems-fix-potential-unsigned-integer-underf.patch
+nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch
+perf-ensure-bpf_perf_link-path-is-properly-serialize.patch
+bio-fix-bio_first_folio-for-sparsemem-without-vmemma.patch
+block-fix-bvec_set_folio-for-very-large-folios.patch