]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jun 2023 20:07:13 +0000 (22:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jun 2023 20:07:13 +0000 (22:07 +0200)
added patches:
afs-fix-dangling-folio-ref-counts-in-writeback.patch
afs-fix-waiting-for-writeback-then-skipping-folio.patch
ksmbd-fix-out-of-bound-read-in-smb2_write.patch
ksmbd-validate-command-payload-size.patch
ksmbd-validate-session-id-and-tree-id-in-the-compound-request.patch
revert-efi-random-refresh-non-volatile-random-seed-when-rng-is-initialized.patch
tick-common-align-tick-period-during-sched_timer-setup.patch
udmabuf-revert-add-support-for-mapping-hugepages-v4.patch

queue-6.3/afs-fix-dangling-folio-ref-counts-in-writeback.patch [new file with mode: 0644]
queue-6.3/afs-fix-waiting-for-writeback-then-skipping-folio.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-out-of-bound-read-in-smb2_write.patch [new file with mode: 0644]
queue-6.3/ksmbd-validate-command-payload-size.patch [new file with mode: 0644]
queue-6.3/ksmbd-validate-session-id-and-tree-id-in-the-compound-request.patch [new file with mode: 0644]
queue-6.3/revert-efi-random-refresh-non-volatile-random-seed-when-rng-is-initialized.patch [new file with mode: 0644]
queue-6.3/series
queue-6.3/tick-common-align-tick-period-during-sched_timer-setup.patch [new file with mode: 0644]
queue-6.3/udmabuf-revert-add-support-for-mapping-hugepages-v4.patch [new file with mode: 0644]

diff --git a/queue-6.3/afs-fix-dangling-folio-ref-counts-in-writeback.patch b/queue-6.3/afs-fix-dangling-folio-ref-counts-in-writeback.patch
new file mode 100644 (file)
index 0000000..c752623
--- /dev/null
@@ -0,0 +1,41 @@
+From a2b6f2ab3e144f8e23666aafeba0e4d9ea4b7975 Mon Sep 17 00:00:00 2001
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Date: Wed, 7 Jun 2023 13:41:19 -0700
+Subject: afs: Fix dangling folio ref counts in writeback
+
+From: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+
+commit a2b6f2ab3e144f8e23666aafeba0e4d9ea4b7975 upstream.
+
+Commit acc8d8588cb7 converted afs_writepages_region() to write back a
+folio batch. If writeback needs rescheduling, the function exits without
+dropping the references to the folios in fbatch. This patch fixes that.
+
+[DH: Moved the added line before the _leave()]
+
+Fixes: acc8d8588cb7 ("afs: convert afs_writepages_region() to use filemap_get_folios_tag()")
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Link: https://lore.kernel.org/r/20230607204120.89416-1-vishal.moola@gmail.com/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/afs/write.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index c822d6006033..fd433024070e 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -763,6 +763,7 @@ static int afs_writepages_region(struct address_space *mapping,
+                               if (wbc->sync_mode == WB_SYNC_NONE) {
+                                       if (skips >= 5 || need_resched()) {
+                                               *_next = start;
++                                              folio_batch_release(&fbatch);
+                                               _leave(" = 0 [%llx]", *_next);
+                                               return 0;
+                                       }
+-- 
+2.41.0
+
diff --git a/queue-6.3/afs-fix-waiting-for-writeback-then-skipping-folio.patch b/queue-6.3/afs-fix-waiting-for-writeback-then-skipping-folio.patch
new file mode 100644 (file)
index 0000000..5328e29
--- /dev/null
@@ -0,0 +1,55 @@
+From 819da022dd007398d0c42ebcd8dbb1b681acea53 Mon Sep 17 00:00:00 2001
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Date: Wed, 7 Jun 2023 13:41:20 -0700
+Subject: afs: Fix waiting for writeback then skipping folio
+
+From: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+
+commit 819da022dd007398d0c42ebcd8dbb1b681acea53 upstream.
+
+Commit acc8d8588cb7 converted afs_writepages_region() to write back a
+folio batch. The function waits for writeback to a folio, but then
+proceeds to the rest of the batch without trying to write that folio
+again. This patch fixes has it attempt to write the folio again.
+
+[DH: Also remove an 'else' that adding a goto makes redundant]
+
+Fixes: acc8d8588cb7 ("afs: convert afs_writepages_region() to use filemap_get_folios_tag()")
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Link: https://lore.kernel.org/r/20230607204120.89416-2-vishal.moola@gmail.com/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/afs/write.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index fd433024070e..8750b99c3f56 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -731,6 +731,7 @@ static int afs_writepages_region(struct address_space *mapping,
+                        * (changing page->mapping to NULL), or even swizzled
+                        * back from swapper_space to tmpfs file mapping
+                        */
++try_again:
+                       if (wbc->sync_mode != WB_SYNC_NONE) {
+                               ret = folio_lock_killable(folio);
+                               if (ret < 0) {
+@@ -757,9 +758,10 @@ static int afs_writepages_region(struct address_space *mapping,
+ #ifdef CONFIG_AFS_FSCACHE
+                                       folio_wait_fscache(folio);
+ #endif
+-                              } else {
+-                                      start += folio_size(folio);
++                                      goto try_again;
+                               }
++
++                              start += folio_size(folio);
+                               if (wbc->sync_mode == WB_SYNC_NONE) {
+                                       if (skips >= 5 || need_resched()) {
+                                               *_next = start;
+-- 
+2.41.0
+
diff --git a/queue-6.3/ksmbd-fix-out-of-bound-read-in-smb2_write.patch b/queue-6.3/ksmbd-fix-out-of-bound-read-in-smb2_write.patch
new file mode 100644 (file)
index 0000000..f41a8c2
--- /dev/null
@@ -0,0 +1,44 @@
+From 5fe7f7b78290638806211046a99f031ff26164e1 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 15 Jun 2023 22:04:40 +0900
+Subject: ksmbd: fix out-of-bound read in smb2_write
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 5fe7f7b78290638806211046a99f031ff26164e1 upstream.
+
+ksmbd_smb2_check_message doesn't validate hdr->NextCommand. If
+->NextCommand is bigger than Offset + Length of smb2 write, It will
+allow oversized smb2 write length. It will cause OOB read in smb2_write.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-21164
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2misc.c |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -351,10 +351,16 @@ int ksmbd_smb2_check_message(struct ksmb
+       int command;
+       __u32 clc_len;  /* calculated length */
+       __u32 len = get_rfc1002_len(work->request_buf);
+-      __u32 req_struct_size;
++      __u32 req_struct_size, next_cmd = le32_to_cpu(hdr->NextCommand);
+-      if (le32_to_cpu(hdr->NextCommand) > 0)
+-              len = le32_to_cpu(hdr->NextCommand);
++      if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) {
++              pr_err("next command(%u) offset exceeds smb msg size\n",
++                              next_cmd);
++              return 1;
++      }
++
++      if (next_cmd > 0)
++              len = next_cmd;
+       else if (work->next_smb2_rcv_hdr_off)
+               len -= work->next_smb2_rcv_hdr_off;
diff --git a/queue-6.3/ksmbd-validate-command-payload-size.patch b/queue-6.3/ksmbd-validate-command-payload-size.patch
new file mode 100644 (file)
index 0000000..385070a
--- /dev/null
@@ -0,0 +1,86 @@
+From 2b9b8f3b68edb3d67d79962f02e26dbb5ae3808d Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Mon, 5 Jun 2023 01:57:34 +0900
+Subject: ksmbd: validate command payload size
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 2b9b8f3b68edb3d67d79962f02e26dbb5ae3808d upstream.
+
+->StructureSize2 indicates command payload size. ksmbd should validate
+this size with rfc1002 length before accessing it.
+This patch remove unneeded check and add the validation for this.
+
+[    8.912583] BUG: KASAN: slab-out-of-bounds in ksmbd_smb2_check_message+0x12a/0xc50
+[    8.913051] Read of size 2 at addr ffff88800ac7d92c by task kworker/0:0/7
+...
+[    8.914967] Call Trace:
+[    8.915126]  <TASK>
+[    8.915267]  dump_stack_lvl+0x33/0x50
+[    8.915506]  print_report+0xcc/0x620
+[    8.916558]  kasan_report+0xae/0xe0
+[    8.917080]  kasan_check_range+0x35/0x1b0
+[    8.917334]  ksmbd_smb2_check_message+0x12a/0xc50
+[    8.917935]  ksmbd_verify_smb_message+0xae/0xd0
+[    8.918223]  handle_ksmbd_work+0x192/0x820
+[    8.918478]  process_one_work+0x419/0x760
+[    8.918727]  worker_thread+0x2a2/0x6f0
+[    8.919222]  kthread+0x187/0x1d0
+[    8.919723]  ret_from_fork+0x1f/0x30
+[    8.919954]  </TASK>
+
+Cc: stable@vger.kernel.org
+Reported-by: Chih-Yen Chang <cc85nod@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2misc.c |   23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -351,6 +351,7 @@ int ksmbd_smb2_check_message(struct ksmb
+       int command;
+       __u32 clc_len;  /* calculated length */
+       __u32 len = get_rfc1002_len(work->request_buf);
++      __u32 req_struct_size;
+       if (le32_to_cpu(hdr->NextCommand) > 0)
+               len = le32_to_cpu(hdr->NextCommand);
+@@ -373,17 +374,9 @@ int ksmbd_smb2_check_message(struct ksmb
+       }
+       if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
+-              if (command != SMB2_OPLOCK_BREAK_HE &&
+-                  (hdr->Status == 0 || pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+-                      /* error packets have 9 byte structure size */
+-                      ksmbd_debug(SMB,
+-                                  "Illegal request size %u for command %d\n",
+-                                  le16_to_cpu(pdu->StructureSize2), command);
+-                      return 1;
+-              } else if (command == SMB2_OPLOCK_BREAK_HE &&
+-                         hdr->Status == 0 &&
+-                         le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
+-                         le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
++              if (command == SMB2_OPLOCK_BREAK_HE &&
++                  le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
++                  le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
+                       /* special case for SMB2.1 lease break message */
+                       ksmbd_debug(SMB,
+                                   "Illegal request size %d for oplock break\n",
+@@ -392,6 +385,14 @@ int ksmbd_smb2_check_message(struct ksmb
+               }
+       }
++      req_struct_size = le16_to_cpu(pdu->StructureSize2) +
++              __SMB2_HEADER_STRUCTURE_SIZE;
++      if (command == SMB2_LOCK_HE)
++              req_struct_size -= sizeof(struct smb2_lock_element);
++
++      if (req_struct_size > len + 1)
++              return 1;
++
+       if (smb2_calc_size(hdr, &clc_len))
+               return 1;
diff --git a/queue-6.3/ksmbd-validate-session-id-and-tree-id-in-the-compound-request.patch b/queue-6.3/ksmbd-validate-session-id-and-tree-id-in-the-compound-request.patch
new file mode 100644 (file)
index 0000000..ca6b7f3
--- /dev/null
@@ -0,0 +1,156 @@
+From 5005bcb4219156f1bf7587b185080ec1da08518e Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 15 Jun 2023 22:05:29 +0900
+Subject: ksmbd: validate session id and tree id in the compound request
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 5005bcb4219156f1bf7587b185080ec1da08518e upstream.
+
+This patch validate session id and tree id in compound request.
+If first operation in the compound is SMB2 ECHO request, ksmbd bypass
+session and tree validation. So work->sess and work->tcon could be NULL.
+If secound request in the compound access work->sess or tcon, It cause
+NULL pointer dereferecing error.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-21165
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/server.c  |   33 ++++++++++++++++++++-------------
+ fs/ksmbd/smb2pdu.c |   44 +++++++++++++++++++++++++++++++++++++++-----
+ 2 files changed, 59 insertions(+), 18 deletions(-)
+
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -185,24 +185,31 @@ static void __handle_ksmbd_work(struct k
+               goto send;
+       }
+-      if (conn->ops->check_user_session) {
+-              rc = conn->ops->check_user_session(work);
+-              if (rc < 0) {
+-                      command = conn->ops->get_cmd_val(work);
+-                      conn->ops->set_rsp_status(work,
+-                                      STATUS_USER_SESSION_DELETED);
+-                      goto send;
+-              } else if (rc > 0) {
+-                      rc = conn->ops->get_ksmbd_tcon(work);
++      do {
++              if (conn->ops->check_user_session) {
++                      rc = conn->ops->check_user_session(work);
+                       if (rc < 0) {
+-                              conn->ops->set_rsp_status(work,
+-                                      STATUS_NETWORK_NAME_DELETED);
++                              if (rc == -EINVAL)
++                                      conn->ops->set_rsp_status(work,
++                                              STATUS_INVALID_PARAMETER);
++                              else
++                                      conn->ops->set_rsp_status(work,
++                                              STATUS_USER_SESSION_DELETED);
+                               goto send;
++                      } else if (rc > 0) {
++                              rc = conn->ops->get_ksmbd_tcon(work);
++                              if (rc < 0) {
++                                      if (rc == -EINVAL)
++                                              conn->ops->set_rsp_status(work,
++                                                      STATUS_INVALID_PARAMETER);
++                                      else
++                                              conn->ops->set_rsp_status(work,
++                                                      STATUS_NETWORK_NAME_DELETED);
++                                      goto send;
++                              }
+                       }
+               }
+-      }
+-      do {
+               rc = __process_request(work, conn, &command);
+               if (rc == SERVER_HANDLER_ABORT)
+                       break;
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -91,7 +91,6 @@ int smb2_get_ksmbd_tcon(struct ksmbd_wor
+       unsigned int cmd = le16_to_cpu(req_hdr->Command);
+       int tree_id;
+-      work->tcon = NULL;
+       if (cmd == SMB2_TREE_CONNECT_HE ||
+           cmd ==  SMB2_CANCEL_HE ||
+           cmd ==  SMB2_LOGOFF_HE) {
+@@ -105,10 +104,28 @@ int smb2_get_ksmbd_tcon(struct ksmbd_wor
+       }
+       tree_id = le32_to_cpu(req_hdr->Id.SyncId.TreeId);
++
++      /*
++       * If request is not the first in Compound request,
++       * Just validate tree id in header with work->tcon->id.
++       */
++      if (work->next_smb2_rcv_hdr_off) {
++              if (!work->tcon) {
++                      pr_err("The first operation in the compound does not have tcon\n");
++                      return -EINVAL;
++              }
++              if (work->tcon->id != tree_id) {
++                      pr_err("tree id(%u) is different with id(%u) in first operation\n",
++                                      tree_id, work->tcon->id);
++                      return -EINVAL;
++              }
++              return 1;
++      }
++
+       work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
+       if (!work->tcon) {
+               pr_err("Invalid tid %d\n", tree_id);
+-              return -EINVAL;
++              return -ENOENT;
+       }
+       return 1;
+@@ -547,7 +564,6 @@ int smb2_check_user_session(struct ksmbd
+       unsigned int cmd = conn->ops->get_cmd_val(work);
+       unsigned long long sess_id;
+-      work->sess = NULL;
+       /*
+        * SMB2_ECHO, SMB2_NEGOTIATE, SMB2_SESSION_SETUP command do not
+        * require a session id, so no need to validate user session's for
+@@ -558,15 +574,33 @@ int smb2_check_user_session(struct ksmbd
+               return 0;
+       if (!ksmbd_conn_good(conn))
+-              return -EINVAL;
++              return -EIO;
+       sess_id = le64_to_cpu(req_hdr->SessionId);
++
++      /*
++       * If request is not the first in Compound request,
++       * Just validate session id in header with work->sess->id.
++       */
++      if (work->next_smb2_rcv_hdr_off) {
++              if (!work->sess) {
++                      pr_err("The first operation in the compound does not have sess\n");
++                      return -EINVAL;
++              }
++              if (work->sess->id != sess_id) {
++                      pr_err("session id(%llu) is different with the first operation(%lld)\n",
++                                      sess_id, work->sess->id);
++                      return -EINVAL;
++              }
++              return 1;
++      }
++
+       /* Check for validity of user session */
+       work->sess = ksmbd_session_lookup_all(conn, sess_id);
+       if (work->sess)
+               return 1;
+       ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+-      return -EINVAL;
++      return -ENOENT;
+ }
+ static void destroy_previous_session(struct ksmbd_conn *conn,
diff --git a/queue-6.3/revert-efi-random-refresh-non-volatile-random-seed-when-rng-is-initialized.patch b/queue-6.3/revert-efi-random-refresh-non-volatile-random-seed-when-rng-is-initialized.patch
new file mode 100644 (file)
index 0000000..b18016a
--- /dev/null
@@ -0,0 +1,64 @@
+From 69cbeb61ff9093a9155cb19a36d633033f71093a Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 21 Jun 2023 10:58:46 -0700
+Subject: Revert "efi: random: refresh non-volatile random seed when RNG is initialized"
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 69cbeb61ff9093a9155cb19a36d633033f71093a upstream.
+
+This reverts commit e7b813b32a42a3a6281a4fd9ae7700a0257c1d50 (and the
+subsequent fix for it: 41a15855c1ee "efi: random: fix NULL-deref when
+refreshing seed").
+
+It turns otu to cause non-deterministic boot stalls on at least a HP
+6730b laptop.
+
+Reported-and-bisected-by: Sami Korkalainen <sami.korkalainen@proton.me>
+Link: https://lore.kernel.org/all/GQUnKz2al3yke5mB2i1kp3SzNHjK8vi6KJEh7rnLrOQ24OrlljeCyeWveLW9pICEmB9Qc8PKdNt3w1t_g3-Uvxq1l8Wj67PpoMeWDoH8PKk=@proton.me/
+Cc: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Bagas Sanjaya <bagasdotme@gmail.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/efi.c |   21 ---------------------
+ 1 file changed, 21 deletions(-)
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -361,24 +361,6 @@ static void __init efi_debugfs_init(void
+ static inline void efi_debugfs_init(void) {}
+ #endif
+-static void refresh_nv_rng_seed(struct work_struct *work)
+-{
+-      u8 seed[EFI_RANDOM_SEED_SIZE];
+-
+-      get_random_bytes(seed, sizeof(seed));
+-      efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID,
+-                       EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
+-                       EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed);
+-      memzero_explicit(seed, sizeof(seed));
+-}
+-static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data)
+-{
+-      static DECLARE_WORK(work, refresh_nv_rng_seed);
+-      schedule_work(&work);
+-      return NOTIFY_DONE;
+-}
+-static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification };
+-
+ /*
+  * We register the efi subsystem with the firmware subsystem and the
+  * efivars subsystem with the efi subsystem, if the system was booted with
+@@ -451,9 +433,6 @@ static int __init efisubsys_init(void)
+               platform_device_register_simple("efi_secret", 0, NULL, 0);
+ #endif
+-      if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
+-              execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
+-
+       return 0;
+ err_remove_group:
index 0f9ba3423d2bdfebcfe073d7098fc8aca129e335..d9e6630efaea28585583f08d4530546739ace4a5 100644 (file)
@@ -5,3 +5,11 @@ drm-amd-display-fix-the-system-hang-while-disable-ps.patch
 ata-libata-scsi-avoid-deadlock-on-rescan-after-devic.patch
 mm-fix-copy_from_user_nofault.patch
 tpm-tpm_tis-claim-locality-in-interrupt-handler.patch
+ksmbd-validate-command-payload-size.patch
+ksmbd-fix-out-of-bound-read-in-smb2_write.patch
+ksmbd-validate-session-id-and-tree-id-in-the-compound-request.patch
+udmabuf-revert-add-support-for-mapping-hugepages-v4.patch
+revert-efi-random-refresh-non-volatile-random-seed-when-rng-is-initialized.patch
+afs-fix-dangling-folio-ref-counts-in-writeback.patch
+afs-fix-waiting-for-writeback-then-skipping-folio.patch
+tick-common-align-tick-period-during-sched_timer-setup.patch
diff --git a/queue-6.3/tick-common-align-tick-period-during-sched_timer-setup.patch b/queue-6.3/tick-common-align-tick-period-during-sched_timer-setup.patch
new file mode 100644 (file)
index 0000000..26b077e
--- /dev/null
@@ -0,0 +1,95 @@
+From 13bb06f8dd42071cb9a49f6e21099eea05d4b856 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 15 Jun 2023 11:18:30 +0200
+Subject: tick/common: Align tick period during sched_timer setup
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 13bb06f8dd42071cb9a49f6e21099eea05d4b856 upstream.
+
+The tick period is aligned very early while the first clock_event_device is
+registered. At that point the system runs in periodic mode and switches
+later to one-shot mode if possible.
+
+The next wake-up event is programmed based on the aligned value
+(tick_next_period) but the delta value, that is used to program the
+clock_event_device, is computed based on ktime_get().
+
+With the subtracted offset, the device fires earlier than the exact time
+frame. With a large enough offset the system programs the timer for the
+next wake-up and the remaining time left is too small to make any boot
+progress. The system hangs.
+
+Move the alignment later to the setup of tick_sched timer. At this point
+the system switches to oneshot mode and a high resolution clocksource is
+available. At this point it is safe to align tick_next_period because
+ktime_get() will now return accurate (not jiffies based) time.
+
+[bigeasy: Patch description + testing].
+
+Fixes: e9523a0d81899 ("tick/common: Align tick period with the HZ tick.")
+Reported-by: Mathias Krause <minipli@grsecurity.net>
+Reported-by: "Bhatnagar, Rishabh" <risbhat@amazon.com>
+Suggested-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Richard W.M. Jones <rjones@redhat.com>
+Tested-by: Mathias Krause <minipli@grsecurity.net>
+Acked-by: SeongJae Park <sj@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/5a56290d-806e-b9a5-f37c-f21958b5a8c0@grsecurity.net
+Link: https://lore.kernel.org/12c6f9a3-d087-b824-0d05-0d18c9bc1bf3@amazon.com
+Link: https://lore.kernel.org/r/20230615091830.RxMV2xf_@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/tick-common.c |   13 +------------
+ kernel/time/tick-sched.c  |   13 ++++++++++++-
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -218,19 +218,8 @@ static void tick_setup_device(struct tic
+                * this cpu:
+                */
+               if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
+-                      ktime_t next_p;
+-                      u32 rem;
+-
+                       tick_do_timer_cpu = cpu;
+-
+-                      next_p = ktime_get();
+-                      div_u64_rem(next_p, TICK_NSEC, &rem);
+-                      if (rem) {
+-                              next_p -= rem;
+-                              next_p += TICK_NSEC;
+-                      }
+-
+-                      tick_next_period = next_p;
++                      tick_next_period = ktime_get();
+ #ifdef CONFIG_NO_HZ_FULL
+                       /*
+                        * The boot CPU may be nohz_full, in which case set
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -161,8 +161,19 @@ static ktime_t tick_init_jiffy_update(vo
+       raw_spin_lock(&jiffies_lock);
+       write_seqcount_begin(&jiffies_seq);
+       /* Did we start the jiffies update yet ? */
+-      if (last_jiffies_update == 0)
++      if (last_jiffies_update == 0) {
++              u32 rem;
++
++              /*
++               * Ensure that the tick is aligned to a multiple of
++               * TICK_NSEC.
++               */
++              div_u64_rem(tick_next_period, TICK_NSEC, &rem);
++              if (rem)
++                      tick_next_period += TICK_NSEC - rem;
++
+               last_jiffies_update = tick_next_period;
++      }
+       period = last_jiffies_update;
+       write_seqcount_end(&jiffies_seq);
+       raw_spin_unlock(&jiffies_lock);
diff --git a/queue-6.3/udmabuf-revert-add-support-for-mapping-hugepages-v4.patch b/queue-6.3/udmabuf-revert-add-support-for-mapping-hugepages-v4.patch
new file mode 100644 (file)
index 0000000..93d4c77
--- /dev/null
@@ -0,0 +1,129 @@
+From b7cb3821905b79b6ed474fd5ba34d1e187649139 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 8 Jun 2023 13:49:27 -0700
+Subject: udmabuf: revert 'Add support for mapping hugepages (v4)'
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit b7cb3821905b79b6ed474fd5ba34d1e187649139 upstream.
+
+This effectively reverts commit 16c243e99d33 ("udmabuf: Add support for
+mapping hugepages (v4)").  Recently, Junxiao Chang found a BUG with page
+map counting as described here [1].  This issue pointed out that the
+udmabuf driver was making direct use of subpages of hugetlb pages.  This
+is not a good idea, and no other mm code attempts such use.  In addition
+to the mapcount issue, this also causes issues with hugetlb vmemmap
+optimization and page poisoning.
+
+For now, remove hugetlb support.
+
+If udmabuf wants to be used on hugetlb mappings, it should be changed to
+only use complete hugetlb pages.  This will require different alignment
+and size requirements on the UDMABUF_CREATE API.
+
+[1] https://lore.kernel.org/linux-mm/20230512072036.1027784-1-junxiao.chang@intel.com/
+
+Link: https://lkml.kernel.org/r/20230608204927.88711-1-mike.kravetz@oracle.com
+Fixes: 16c243e99d33 ("udmabuf: Add support for mapping hugepages (v4)")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Dongwon Kim <dongwon.kim@intel.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Jerome Marchand <jmarchan@redhat.com>
+Cc: Junxiao Chang <junxiao.chang@intel.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma-buf/udmabuf.c |   47 +++++-----------------------------------------
+ 1 file changed, 6 insertions(+), 41 deletions(-)
+
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -12,7 +12,6 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/udmabuf.h>
+-#include <linux/hugetlb.h>
+ #include <linux/vmalloc.h>
+ #include <linux/iosys-map.h>
+@@ -207,9 +206,7 @@ static long udmabuf_create(struct miscde
+       struct udmabuf *ubuf;
+       struct dma_buf *buf;
+       pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
+-      struct page *page, *hpage = NULL;
+-      pgoff_t subpgoff, maxsubpgs;
+-      struct hstate *hpstate;
++      struct page *page;
+       int seals, ret = -EINVAL;
+       u32 i, flags;
+@@ -245,7 +242,7 @@ static long udmabuf_create(struct miscde
+               if (!memfd)
+                       goto err;
+               mapping = memfd->f_mapping;
+-              if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
++              if (!shmem_mapping(mapping))
+                       goto err;
+               seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
+               if (seals == -EINVAL)
+@@ -256,48 +253,16 @@ static long udmabuf_create(struct miscde
+                       goto err;
+               pgoff = list[i].offset >> PAGE_SHIFT;
+               pgcnt = list[i].size   >> PAGE_SHIFT;
+-              if (is_file_hugepages(memfd)) {
+-                      hpstate = hstate_file(memfd);
+-                      pgoff = list[i].offset >> huge_page_shift(hpstate);
+-                      subpgoff = (list[i].offset &
+-                                  ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
+-                      maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
+-              }
+               for (pgidx = 0; pgidx < pgcnt; pgidx++) {
+-                      if (is_file_hugepages(memfd)) {
+-                              if (!hpage) {
+-                                      hpage = find_get_page_flags(mapping, pgoff,
+-                                                                  FGP_ACCESSED);
+-                                      if (!hpage) {
+-                                              ret = -EINVAL;
+-                                              goto err;
+-                                      }
+-                              }
+-                              page = hpage + subpgoff;
+-                              get_page(page);
+-                              subpgoff++;
+-                              if (subpgoff == maxsubpgs) {
+-                                      put_page(hpage);
+-                                      hpage = NULL;
+-                                      subpgoff = 0;
+-                                      pgoff++;
+-                              }
+-                      } else {
+-                              page = shmem_read_mapping_page(mapping,
+-                                                             pgoff + pgidx);
+-                              if (IS_ERR(page)) {
+-                                      ret = PTR_ERR(page);
+-                                      goto err;
+-                              }
++                      page = shmem_read_mapping_page(mapping, pgoff + pgidx);
++                      if (IS_ERR(page)) {
++                              ret = PTR_ERR(page);
++                              goto err;
+                       }
+                       ubuf->pages[pgbuf++] = page;
+               }
+               fput(memfd);
+               memfd = NULL;
+-              if (hpage) {
+-                      put_page(hpage);
+-                      hpage = NULL;
+-              }
+       }
+       exp_info.ops  = &udmabuf_ops;