]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Sun, 15 Jun 2025 22:47:45 +0000 (18:47 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 15 Jun 2025 22:47:45 +0000 (18:47 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch [new file with mode: 0644]
queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch [new file with mode: 0644]
queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch b/queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch
new file mode 100644 (file)
index 0000000..118d215
--- /dev/null
@@ -0,0 +1,55 @@
+From 1f5c681a0d233837a25ea85160ef647e672bbba2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Apr 2025 19:45:27 +0800
+Subject: fs/filesystems: Fix potential unsigned integer underflow in fs_name()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+[ Upstream commit 1363c134ade81e425873b410566e957fecebb261 ]
+
+fs_name() has @index as unsigned int, so there is underflow risk for
+operation '@index--'.
+
+Fix by breaking the for loop when '@index == 0' which is also more proper
+than '@index <= 0' for unsigned integer comparison.
+
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/20250410-fix_fs-v1-1-7c14ccc8ebaa@quicinc.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/filesystems.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index 58b9067b2391c..95e5256821a53 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name)
+ static int fs_name(unsigned int index, char __user * buf)
+ {
+       struct file_system_type * tmp;
+-      int len, res;
++      int len, res = -EINVAL;
+       read_lock(&file_systems_lock);
+-      for (tmp = file_systems; tmp; tmp = tmp->next, index--)
+-              if (index <= 0 && try_module_get(tmp->owner))
++      for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
++              if (index == 0) {
++                      if (try_module_get(tmp->owner))
++                              res = 0;
+                       break;
++              }
++      }
+       read_unlock(&file_systems_lock);
+-      if (!tmp)
+-              return -EINVAL;
++      if (res)
++              return res;
+       /* OK, we got the reference, so we can safely block */
+       len = strlen(tmp->name) + 1;
+-- 
+2.39.5
+
diff --git a/queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch b/queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch
new file mode 100644 (file)
index 0000000..d6433ff
--- /dev/null
@@ -0,0 +1,95 @@
+From 17e55452531f2ca00d46a8f201622e7ec8d2df70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 14:23:03 +0200
+Subject: nvmet-fcloop: access fcpreq only when holding reqlock
+
+From: Daniel Wagner <wagi@kernel.org>
+
+[ Upstream commit 47a827cd7929d0550c3496d70b417fcb5649b27b ]
+
+The abort handling logic expects that the state and the fcpreq are only
+accessed when holding the reqlock lock.
+
+While at it, only handle the aborts in the abort handler.
+
+Signed-off-by: Daniel Wagner <wagi@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fcloop.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 787dfb3859a0d..74fffcab88155 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -613,12 +613,13 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ {
+       struct fcloop_fcpreq *tfcp_req =
+               container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+-      struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
++      struct nvmefc_fcp_req *fcpreq;
+       unsigned long flags;
+       int ret = 0;
+       bool aborted = false;
+       spin_lock_irqsave(&tfcp_req->reqlock, flags);
++      fcpreq = tfcp_req->fcpreq;
+       switch (tfcp_req->inistate) {
+       case INI_IO_START:
+               tfcp_req->inistate = INI_IO_ACTIVE;
+@@ -633,16 +634,19 @@ fcloop_fcp_recv_work(struct work_struct *work)
+       }
+       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+-      if (unlikely(aborted))
+-              ret = -ECANCELED;
+-      else {
+-              if (likely(!check_for_drop(tfcp_req)))
+-                      ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+-                              &tfcp_req->tgt_fcp_req,
+-                              fcpreq->cmdaddr, fcpreq->cmdlen);
+-              else
+-                      pr_info("%s: dropped command ********\n", __func__);
++      if (unlikely(aborted)) {
++              /* the abort handler will call fcloop_call_host_done */
++              return;
++      }
++
++      if (unlikely(check_for_drop(tfcp_req))) {
++              pr_info("%s: dropped command ********\n", __func__);
++              return;
+       }
++
++      ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
++                                 &tfcp_req->tgt_fcp_req,
++                                 fcpreq->cmdaddr, fcpreq->cmdlen);
+       if (ret)
+               fcloop_call_host_done(fcpreq, tfcp_req, ret);
+@@ -659,9 +663,10 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+       unsigned long flags;
+       spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-      fcpreq = tfcp_req->fcpreq;
+       switch (tfcp_req->inistate) {
+       case INI_IO_ABORTED:
++              fcpreq = tfcp_req->fcpreq;
++              tfcp_req->fcpreq = NULL;
+               break;
+       case INI_IO_COMPLETED:
+               completed = true;
+@@ -683,10 +688,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+               nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+                                       &tfcp_req->tgt_fcp_req);
+-      spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-      tfcp_req->fcpreq = NULL;
+-      spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+-
+       fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+       /* call_host_done releases reference for abort downcall */
+ }
+-- 
+2.39.5
+
diff --git a/queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch b/queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch
new file mode 100644 (file)
index 0000000..e432be2
--- /dev/null
@@ -0,0 +1,98 @@
+From 6281a41b6b6d0c725e1f401ef3ea35187e39938e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jan 2025 10:54:50 +0100
+Subject: perf: Ensure bpf_perf_link path is properly serialized
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 7ed9138a72829d2035ecbd8dbd35b1bc3c137c40 ]
+
+Ravi reported that the bpf_perf_link_attach() usage of
+perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the
+PERF_EVENT_IOC_SET_BPF case.
+
+Reported-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 34 ++++++++++++++++++++++++++++++----
+ 1 file changed, 30 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cb0c8aa71c98b..dd9ae1cae8e3c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5822,6 +5822,9 @@ static int perf_event_set_output(struct perf_event *event,
+ static int perf_event_set_filter(struct perf_event *event, void __user *arg);
+ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+                         struct perf_event_attr *attr);
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie);
+ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
+ {
+@@ -5890,7 +5893,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
+               if (IS_ERR(prog))
+                       return PTR_ERR(prog);
+-              err = perf_event_set_bpf_prog(event, prog, 0);
++              err = __perf_event_set_bpf_prog(event, prog, 0);
+               if (err) {
+                       bpf_prog_put(prog);
+                       return err;
+@@ -10360,8 +10363,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
+       return false;
+ }
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+-                          u64 bpf_cookie)
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie)
+ {
+       bool is_kprobe, is_tracepoint, is_syscall_tp;
+@@ -10395,6 +10399,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+       return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
+ }
++int perf_event_set_bpf_prog(struct perf_event *event,
++                          struct bpf_prog *prog,
++                          u64 bpf_cookie)
++{
++      struct perf_event_context *ctx;
++      int ret;
++
++      ctx = perf_event_ctx_lock(event);
++      ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
++      perf_event_ctx_unlock(event, ctx);
++
++      return ret;
++}
++
+ void perf_event_free_bpf_prog(struct perf_event *event)
+ {
+       if (!perf_event_is_tracing(event)) {
+@@ -10414,7 +10432,15 @@ static void perf_event_free_filter(struct perf_event *event)
+ {
+ }
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++                                   struct bpf_prog *prog,
++                                   u64 bpf_cookie)
++{
++      return -ENOENT;
++}
++
++int perf_event_set_bpf_prog(struct perf_event *event,
++                          struct bpf_prog *prog,
+                           u64 bpf_cookie)
+ {
+       return -ENOENT;
+-- 
+2.39.5
+
index 6e0409b247831bc9b0675db6de892e715a1e49bb..a2d4a311df449068318a92ab64058bf22401cdcb 100644 (file)
@@ -162,3 +162,6 @@ net_sched-red-fix-a-race-in-__red_change.patch
 net_sched-tbf-fix-a-race-in-tbf_change.patch
 sch_ets-make-est_qlen_notify-idempotent.patch
 net_sched-ets-fix-a-race-in-ets_qdisc_change.patch
+fs-filesystems-fix-potential-unsigned-integer-underf.patch
+nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch
+perf-ensure-bpf_perf_link-path-is-properly-serialize.patch