--- /dev/null
+From d2be9ea9a75550a35c5127a6c2633658bc38c76b Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Tue, 29 Jul 2025 19:37:12 +0900
+Subject: ata: libata-scsi: Return aborted command when missing sense and result TF
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit d2be9ea9a75550a35c5127a6c2633658bc38c76b upstream.
+
+ata_gen_ata_sense() is always called for a failed qc missing sense data
+so that a sense key, code and code qualifier can be generated using
+ata_to_sense_error() from the qc status and error fields of its result
+task file. However, if the qc does not have its result task file filled,
+ata_gen_ata_sense() returns early without setting a sense key.
+
+Improve this by defaulting to returning ABORTED COMMAND without any
+additional sense code, since we do not know the reason for the failure.
+The same fix is also applied in ata_gen_passthru_sense() with the
+additional check that the qc failed (qc->err_mask is set).
+
+Fixes: 816be86c7993 ("ata: libata-scsi: Check ATA_QCFLAG_RTF_FILLED before using result_tf")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-scsi.c | 27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -934,6 +934,8 @@ static void ata_gen_passthru_sense(struc
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate ATA PT sense data\n");
++ if (qc->err_mask)
++ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ return;
+ }
+
+@@ -991,8 +993,8 @@ static void ata_gen_ata_sense(struct ata
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+- "missing result TF: can't generate sense data\n");
+- return;
++ "Missing result TF: reporting aborted command\n");
++ goto aborted;
+ }
+
+ /* Use ata_to_sense_error() to map status register bits
+@@ -1003,19 +1005,20 @@ static void ata_gen_ata_sense(struct ata
+ ata_to_sense_error(tf->status, tf->error,
+ &sense_key, &asc, &ascq);
+ ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
+- } else {
+- /* Could not decode error */
+- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
+- tf->status, qc->err_mask);
+- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+- return;
+- }
+
+- block = ata_tf_read_block(&qc->result_tf, dev);
+- if (block == U64_MAX)
++ block = ata_tf_read_block(&qc->result_tf, dev);
++ if (block != U64_MAX)
++ scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE,
++ block);
+ return;
++ }
+
+- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
++ /* Could not decode error */
++ ata_dev_warn(dev,
++ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
++ tf->error, tf->status, qc->err_mask);
++aborted:
++ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ }
+
+ void ata_scsi_sdev_config(struct scsi_device *sdev)
--- /dev/null
+From 41b70df5b38bc80967d2e0ed55cc3c3896bba781 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 12 Aug 2025 08:30:11 -0600
+Subject: io_uring/net: commit partial buffers on retry
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 41b70df5b38bc80967d2e0ed55cc3c3896bba781 upstream.
+
+Ring provided buffers are potentially only valid within the single
+execution context in which they were acquired. io_uring deals with this
+and invalidates them on retry. But on the networking side, if
+MSG_WAITALL is set, or if the socket is of the streaming type and too
+little was processed, then it will hang on to the buffer rather than
+recycle or commit it. This is problematic for two reasons:
+
+1) If someone unregisters the provided buffer ring before a later retry,
+ then the req->buf_list will no longer be valid.
+
+2) If multiple sockers are using the same buffer group, then multiple
+ receives can consume the same memory. This can cause data corruption
+ in the application, as either receive could land in the same
+ userspace buffer.
+
+Fix this by disallowing partial retries from pinning a provided buffer
+across multiple executions, if ring provided buffers are used.
+
+Cc: stable@vger.kernel.org
+Reported-by: pt x <superman.xpt@gmail.com>
+Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -498,6 +498,15 @@ static int io_bundle_nbufs(struct io_asy
+ return nbufs;
+ }
+
++static int io_net_kbuf_recyle(struct io_kiocb *req,
++ struct io_async_msghdr *kmsg, int len)
++{
++ req->flags |= REQ_F_BL_NO_RECYCLE;
++ if (req->flags & REQ_F_BUFFERS_COMMIT)
++ io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
++ return -EAGAIN;
++}
++
+ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
+ struct io_async_msghdr *kmsg,
+ unsigned issue_flags)
+@@ -566,8 +575,7 @@ int io_sendmsg(struct io_kiocb *req, uns
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_control = NULL;
+ sr->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+@@ -664,8 +672,7 @@ retry_bundle:
+ sr->len -= ret;
+ sr->buf += ret;
+ sr->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+@@ -1068,8 +1075,7 @@ retry_multishot:
+ }
+ if (ret > 0 && io_net_retry(sock, flags)) {
+ sr->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+@@ -1211,8 +1217,7 @@ retry_multishot:
+ sr->len -= ret;
+ sr->buf += ret;
+ sr->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+@@ -1441,8 +1446,7 @@ int io_send_zc(struct io_kiocb *req, uns
+ zc->len -= ret;
+ zc->buf += ret;
+ zc->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+@@ -1502,8 +1506,7 @@ int io_sendmsg_zc(struct io_kiocb *req,
+
+ if (ret > 0 && io_net_retry(sock, flags)) {
+ sr->done_io += ret;
+- req->flags |= REQ_F_BL_NO_RECYCLE;
+- return -EAGAIN;
++ return io_net_kbuf_recyle(req, kmsg, ret);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
--- /dev/null
+From f0c6eab5e45c529f449fbc595873719e00de6d79 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <arighi@nvidia.com>
+Date: Tue, 25 Mar 2025 10:32:12 +0100
+Subject: sched_ext: initialize built-in idle state before ops.init()
+
+From: Andrea Righi <arighi@nvidia.com>
+
+commit f0c6eab5e45c529f449fbc595873719e00de6d79 upstream.
+
+A BPF scheduler may want to use the built-in idle cpumasks in ops.init()
+before the scheduler is fully initialized, either directly or through a
+BPF timer for example.
+
+However, this would result in an error, since the idle state has not
+been properly initialized yet.
+
+This can be easily verified by modifying scx_simple to call
+scx_bpf_get_idle_cpumask() in ops.init():
+
+$ sudo scx_simple
+
+DEBUG DUMP
+===========================================================================
+
+scx_simple[121] triggered exit kind 1024:
+ runtime error (built-in idle tracking is disabled)
+...
+
+Fix this by properly initializing the idle state before ops.init() is
+called. With this change applied:
+
+$ sudo scx_simple
+local=2 global=0
+local=19 global=11
+local=23 global=11
+...
+
+Fixes: d73249f88743d ("sched_ext: idle: Make idle static keys private")
+Signed-off-by: Andrea Righi <arighi@nvidia.com>
+Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+[ Backport to 6.12:
+ - Original commit doesn't apply cleanly to 6.12 since d73249f88743d is
+ not present.
+ - This backport applies the same logical fix to prevent BPF scheduler
+ failures while accessing idle cpumasks from ops.init(). ]
+Signed-off-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -5220,6 +5220,13 @@ static int scx_ops_enable(struct sched_e
+ for_each_possible_cpu(cpu)
+ cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
+
++ if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
++ reset_idle_masks();
++ static_branch_enable(&scx_builtin_idle_enabled);
++ } else {
++ static_branch_disable(&scx_builtin_idle_enabled);
++ }
++
+ /*
+ * Keep CPUs stable during enable so that the BPF scheduler can track
+ * online CPUs by watching ->on/offline_cpu() after ->init().
+@@ -5287,13 +5294,6 @@ static int scx_ops_enable(struct sched_e
+ if (scx_ops.cpu_acquire || scx_ops.cpu_release)
+ static_branch_enable(&scx_ops_cpu_preempt);
+
+- if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+- reset_idle_masks();
+- static_branch_enable(&scx_builtin_idle_enabled);
+- } else {
+- static_branch_disable(&scx_builtin_idle_enabled);
+- }
+-
+ /*
+ * Lock out forks, cgroup on/offlining and moves before opening the
+ * floodgate so that they don't wander into the operations prematurely.
btrfs-send-use-fallocate-for-hole-punching-with-send-stream-v2.patch
btrfs-send-make-fs_path_len-inline-and-constify-its-argument.patch
netfs-fix-unbuffered-write-error-handling.patch
+io_uring-net-commit-partial-buffers-on-retry.patch
+ata-libata-scsi-return-aborted-command-when-missing-sense-and-result-tf.patch
+sched_ext-initialize-built-in-idle-state-before-ops.init.patch