From: Sasha Levin Date: Sun, 15 Jun 2025 22:47:45 +0000 (-0400) Subject: Fixes for 5.15 X-Git-Tag: v6.6.94~54 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=844255d344487b83ba04a3ee6df5b63c5a254442;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.15 Signed-off-by: Sasha Levin --- diff --git a/queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch b/queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch new file mode 100644 index 0000000000..118d215634 --- /dev/null +++ b/queue-5.15/fs-filesystems-fix-potential-unsigned-integer-underf.patch @@ -0,0 +1,55 @@ +From 1f5c681a0d233837a25ea85160ef647e672bbba2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 10 Apr 2025 19:45:27 +0800 +Subject: fs/filesystems: Fix potential unsigned integer underflow in fs_name() + +From: Zijun Hu + +[ Upstream commit 1363c134ade81e425873b410566e957fecebb261 ] + +fs_name() has @index as unsigned int, so there is underflow risk for +operation '@index--'. + +Fix by breaking the for loop when '@index == 0' which is also more proper +than '@index <= 0' for unsigned integer comparison. + +Signed-off-by: Zijun Hu +Link: https://lore.kernel.org/20250410-fix_fs-v1-1-7c14ccc8ebaa@quicinc.com +Signed-off-by: Christian Brauner +Signed-off-by: Sasha Levin +--- + fs/filesystems.c | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/fs/filesystems.c b/fs/filesystems.c +index 58b9067b2391c..95e5256821a53 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name) + static int fs_name(unsigned int index, char __user * buf) + { + struct file_system_type * tmp; +- int len, res; ++ int len, res = -EINVAL; + + read_lock(&file_systems_lock); +- for (tmp = file_systems; tmp; tmp = tmp->next, index--) +- if (index <= 0 && try_module_get(tmp->owner)) ++ for (tmp = file_systems; tmp; tmp = tmp->next, index--) { ++ if (index == 0) { ++ if (try_module_get(tmp->owner)) ++ res = 0; + break; ++ } ++ } + read_unlock(&file_systems_lock); +- if (!tmp) +- return -EINVAL; ++ if (res) ++ return res; + + /* OK, we got the reference, so we can safely block */ + len = strlen(tmp->name) + 1; +-- +2.39.5 + diff --git a/queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch b/queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch new file mode 100644 index 0000000000..d6433ffcce --- /dev/null +++ b/queue-5.15/nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch @@ -0,0 +1,95 @@ +From 17e55452531f2ca00d46a8f201622e7ec8d2df70 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 May 2025 14:23:03 +0200 +Subject: nvmet-fcloop: access fcpreq only when holding reqlock + +From: Daniel Wagner + +[ Upstream commit 47a827cd7929d0550c3496d70b417fcb5649b27b ] + +The abort handling logic expects that the state and the fcpreq are only +accessed when holding the reqlock lock. + +While at it, only handle the aborts in the abort handler. + +Signed-off-by: Daniel Wagner +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/target/fcloop.c | 31 ++++++++++++++++--------------- + 1 file changed, 16 insertions(+), 15 deletions(-) + +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c +index 787dfb3859a0d..74fffcab88155 100644 +--- a/drivers/nvme/target/fcloop.c ++++ b/drivers/nvme/target/fcloop.c +@@ -613,12 +613,13 @@ fcloop_fcp_recv_work(struct work_struct *work) + { + struct fcloop_fcpreq *tfcp_req = + container_of(work, struct fcloop_fcpreq, fcp_rcv_work); +- struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; ++ struct nvmefc_fcp_req *fcpreq; + unsigned long flags; + int ret = 0; + bool aborted = false; + + spin_lock_irqsave(&tfcp_req->reqlock, flags); ++ fcpreq = tfcp_req->fcpreq; + switch (tfcp_req->inistate) { + case INI_IO_START: + tfcp_req->inistate = INI_IO_ACTIVE; +@@ -633,16 +634,19 @@ fcloop_fcp_recv_work(struct work_struct *work) + } + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); + +- if (unlikely(aborted)) +- ret = -ECANCELED; +- else { +- if (likely(!check_for_drop(tfcp_req))) +- ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, +- &tfcp_req->tgt_fcp_req, +- fcpreq->cmdaddr, fcpreq->cmdlen); +- else +- pr_info("%s: dropped command ********\n", __func__); ++ if (unlikely(aborted)) { ++ /* the abort handler will call fcloop_call_host_done */ ++ return; ++ } ++ ++ if (unlikely(check_for_drop(tfcp_req))) { ++ pr_info("%s: dropped command ********\n", __func__); ++ return; + } ++ ++ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, ++ &tfcp_req->tgt_fcp_req, ++ fcpreq->cmdaddr, fcpreq->cmdlen); + if (ret) + fcloop_call_host_done(fcpreq, tfcp_req, ret); + +@@ -659,9 +663,10 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) + unsigned long flags; + + spin_lock_irqsave(&tfcp_req->reqlock, flags); +- fcpreq = tfcp_req->fcpreq; + switch (tfcp_req->inistate) { + case INI_IO_ABORTED: ++ fcpreq = tfcp_req->fcpreq; ++ tfcp_req->fcpreq = NULL; + break; + case INI_IO_COMPLETED: + completed = true; +@@ -683,10 +688,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) + nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, + &tfcp_req->tgt_fcp_req); + +- spin_lock_irqsave(&tfcp_req->reqlock, flags); +- tfcp_req->fcpreq = NULL; +- spin_unlock_irqrestore(&tfcp_req->reqlock, flags); +- + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); + /* call_host_done releases reference for abort downcall */ + } +-- +2.39.5 + diff --git a/queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch b/queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch new file mode 100644 index 0000000000..e432be24ca --- /dev/null +++ b/queue-5.15/perf-ensure-bpf_perf_link-path-is-properly-serialize.patch @@ -0,0 +1,98 @@ +From 6281a41b6b6d0c725e1f401ef3ea35187e39938e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 17 Jan 2025 10:54:50 +0100 +Subject: perf: Ensure bpf_perf_link path is properly serialized + +From: Peter Zijlstra + +[ Upstream commit 7ed9138a72829d2035ecbd8dbd35b1bc3c137c40 ] + +Ravi reported that the bpf_perf_link_attach() usage of +perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the +PERF_EVENT_IOC_SET_BPF case. + +Reported-by: Ravi Bangoria +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: Ravi Bangoria +Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org +Signed-off-by: Sasha Levin +--- + kernel/events/core.c | 34 ++++++++++++++++++++++++++++++---- + 1 file changed, 30 insertions(+), 4 deletions(-) + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index cb0c8aa71c98b..dd9ae1cae8e3c 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -5822,6 +5822,9 @@ static int perf_event_set_output(struct perf_event *event, + static int perf_event_set_filter(struct perf_event *event, void __user *arg); + static int perf_copy_attr(struct perf_event_attr __user *uattr, + struct perf_event_attr *attr); ++static int __perf_event_set_bpf_prog(struct perf_event *event, ++ struct bpf_prog *prog, ++ u64 bpf_cookie); + + static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) + { +@@ -5890,7 +5893,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon + if (IS_ERR(prog)) + return PTR_ERR(prog); + +- err = perf_event_set_bpf_prog(event, prog, 0); ++ err = __perf_event_set_bpf_prog(event, prog, 0); + if (err) { + bpf_prog_put(prog); + return err; +@@ -10360,8 +10363,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event) + return false; + } + +-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, +- u64 bpf_cookie) ++static int __perf_event_set_bpf_prog(struct perf_event *event, ++ struct bpf_prog *prog, ++ u64 bpf_cookie) + { + bool is_kprobe, is_tracepoint, is_syscall_tp; + +@@ -10395,6 +10399,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, + return perf_event_attach_bpf_prog(event, prog, bpf_cookie); + } + ++int perf_event_set_bpf_prog(struct perf_event *event, ++ struct bpf_prog *prog, ++ u64 bpf_cookie) ++{ ++ struct perf_event_context *ctx; ++ int ret; ++ ++ ctx = perf_event_ctx_lock(event); ++ ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie); ++ perf_event_ctx_unlock(event, ctx); ++ ++ return ret; ++} ++ + void perf_event_free_bpf_prog(struct perf_event *event) + { + if (!perf_event_is_tracing(event)) { +@@ -10414,7 +10432,15 @@ static void perf_event_free_filter(struct perf_event *event) + { + } + +-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, ++static int __perf_event_set_bpf_prog(struct perf_event *event, ++ struct bpf_prog *prog, ++ u64 bpf_cookie) ++{ ++ return -ENOENT; ++} ++ ++int perf_event_set_bpf_prog(struct perf_event *event, ++ struct bpf_prog *prog, + u64 bpf_cookie) + { + return -ENOENT; +-- +2.39.5 + diff --git a/queue-5.15/series b/queue-5.15/series index 6e0409b247..a2d4a311df 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -162,3 +162,6 @@ net_sched-red-fix-a-race-in-__red_change.patch net_sched-tbf-fix-a-race-in-tbf_change.patch sch_ets-make-est_qlen_notify-idempotent.patch net_sched-ets-fix-a-race-in-ets_qdisc_change.patch +fs-filesystems-fix-potential-unsigned-integer-underf.patch +nvmet-fcloop-access-fcpreq-only-when-holding-reqlock.patch +perf-ensure-bpf_perf_link-path-is-properly-serialize.patch