]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 11:37:39 +0000 (13:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 11:37:39 +0000 (13:37 +0200)
added patches:
mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch
nvme-fix-miss-command-type-check.patch

queue-6.1/mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch [new file with mode: 0644]
queue-6.1/nvme-fix-miss-command-type-check.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch b/queue-6.1/mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch
new file mode 100644 (file)
index 0000000..c46ba57
--- /dev/null
@@ -0,0 +1,61 @@
+From 65291dcfcf8936e1b23cfd7718fdfde7cfaf7706 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Tue, 26 Mar 2024 15:32:08 +0100
+Subject: mm/secretmem: fix GUP-fast succeeding on secretmem folios
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 65291dcfcf8936e1b23cfd7718fdfde7cfaf7706 upstream.
+
+folio_is_secretmem() currently relies on secretmem folios being LRU
+folios, to save some cycles.
+
+However, folios might reside in a folio batch without the LRU flag set, or
+temporarily have their LRU flag cleared.  Consequently, the LRU flag is
+unreliable for this purpose.
+
+In particular, this is the case when secretmem_fault() allocates a fresh
+page and calls filemap_add_folio()->folio_add_lru().  The folio might be
+added to the per-cpu folio batch and won't get the LRU flag set until the
+batch was drained using e.g., lru_add_drain().
+
+Consequently, folio_is_secretmem() might not detect secretmem folios and
+GUP-fast can succeed in grabbing a secretmem folio, crashing the kernel
+when we would later try reading/writing to the folio, because the folio
+has been unmapped from the directmap.
+
+Fix it by removing that unreliable check.
+
+Link: https://lkml.kernel.org/r/20240326143210.291116-2-david@redhat.com
+Fixes: 1507f51255c9 ("mm: introduce memfd_secret system call to create "secret" memory areas")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: xingwei lee <xrivendell7@gmail.com>
+Reported-by: yue sun <samsun1006219@gmail.com>
+Closes: https://lore.kernel.org/lkml/CABOYnLyevJeravW=QrH0JUPYEcDN160aZFb7kwndm-J2rmz0HQ@mail.gmail.com/
+Debugged-by: Miklos Szeredi <miklos@szeredi.hu>
+Tested-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/secretmem.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/secretmem.h
++++ b/include/linux/secretmem.h
+@@ -14,10 +14,10 @@ static inline bool page_is_secretmem(str
+        * Using page_mapping() is quite slow because of the actual call
+        * instruction and repeated compound_head(page) inside the
+        * page_mapping() function.
+-       * We know that secretmem pages are not compound and LRU so we can
++       * We know that secretmem pages are not compound, so we can
+        * save a couple of cycles here.
+        */
+-      if (PageCompound(page) || !PageLRU(page))
++      if (PageCompound(page))
+               return false;
+       mapping = (struct address_space *)
diff --git a/queue-6.1/nvme-fix-miss-command-type-check.patch b/queue-6.1/nvme-fix-miss-command-type-check.patch
new file mode 100644 (file)
index 0000000..206f67f
--- /dev/null
@@ -0,0 +1,99 @@
+From 31a5978243d24d77be4bacca56c78a0fbc43b00d Mon Sep 17 00:00:00 2001
+From: "min15.li" <min15.li@samsung.com>
+Date: Fri, 26 May 2023 17:06:56 +0000
+Subject: nvme: fix miss command type check
+
+From: min15.li <min15.li@samsung.com>
+
+commit 31a5978243d24d77be4bacca56c78a0fbc43b00d upstream.
+
+In the function nvme_passthru_end(), only the value of the command
+opcode is checked, without checking the command type (IO command or
+Admin command). When we send a Dataset Management command (The opcode
+of the Dataset Management command is the same as the Set Feature
+command), kernel thinks it is a set feature command, then sets the
+controller's keep alive interval, and calls nvme_keep_alive_work().
+
+Signed-off-by: min15.li <min15.li@samsung.com>
+Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Fixes: b58da2d270db ("nvme: update keep alive interval when kato is modified")
+Signed-off-by: Tokunori Ikegami <ikegami.t@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/core.c       |    4 +++-
+ drivers/nvme/host/ioctl.c      |    3 ++-
+ drivers/nvme/host/nvme.h       |    2 +-
+ drivers/nvme/target/passthru.c |    3 ++-
+ 4 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1151,7 +1151,7 @@ static u32 nvme_passthru_start(struct nv
+       return effects;
+ }
+-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
++void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+                      struct nvme_command *cmd, int status)
+ {
+       if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+@@ -1167,6 +1167,8 @@ void nvme_passthru_end(struct nvme_ctrl
+               nvme_queue_scan(ctrl);
+               flush_work(&ctrl->scan_work);
+       }
++      if (ns)
++              return;
+       switch (cmd->common.opcode) {
+       case nvme_admin_set_features:
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -147,6 +147,7 @@ static int nvme_submit_user_cmd(struct r
+               unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+               u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+ {
++      struct nvme_ns *ns = q->queuedata;
+       struct nvme_ctrl *ctrl;
+       struct request *req;
+       void *meta = NULL;
+@@ -181,7 +182,7 @@ static int nvme_submit_user_cmd(struct r
+       blk_mq_free_request(req);
+       if (effects)
+-              nvme_passthru_end(ctrl, effects, cmd, ret);
++              nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+       return ret;
+ }
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -1063,7 +1063,7 @@ static inline void nvme_auth_free(struct
+ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+                        u8 opcode);
+ int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
++void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+                      struct nvme_command *cmd, int status);
+ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -216,6 +216,7 @@ static void nvmet_passthru_execute_cmd_w
+       struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+       struct request *rq = req->p.rq;
+       struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
++      struct nvme_ns *ns = rq->q->queuedata;
+       u32 effects;
+       int status;
+@@ -242,7 +243,7 @@ static void nvmet_passthru_execute_cmd_w
+       blk_mq_free_request(rq);
+       if (effects)
+-              nvme_passthru_end(ctrl, effects, req->cmd, status);
++              nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
+ }
+ static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
index 575ff602aa13534f4741ee3d7a6d9728349052f5..89f02b9790a865114937988431c6a10067649575 100644 (file)
@@ -134,3 +134,5 @@ smb-client-fix-potential-uaf-in-cifs_signal_cifsd_for_reconnect.patch
 selftests-mptcp-join-fix-dev-in-check_endpoint.patch
 mptcp-don-t-account-accept-of-non-mpc-client-as-fallback-to-tcp.patch
 selftests-mptcp-display-simult-in-extra_msg.patch
+mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch
+nvme-fix-miss-command-type-check.patch