]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Nov 2023 21:40:20 +0000 (21:40 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Nov 2023 21:40:20 +0000 (21:40 +0000)
added patches:
acpi-fpdt-properly-handle-invalid-fpdt-subtables.patch
arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch
btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch
firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch
ima-annotate-iint-mutex-to-avoid-lockdep-false-positive-warnings.patch
ima-detect-changes-to-the-backing-overlay-file.patch
pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch
pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch
rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch
rcu-tree-defer-setting-of-jiffies-during-stall-reset.patch
svcrdma-drop-connection-after-an-rdma-read-error.patch
wifi-wilc1000-use-vmm_table-as-array-in-wilc-struct.patch

13 files changed:
queue-5.15/acpi-fpdt-properly-handle-invalid-fpdt-subtables.patch [new file with mode: 0644]
queue-5.15/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch [new file with mode: 0644]
queue-5.15/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch [new file with mode: 0644]
queue-5.15/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch [new file with mode: 0644]
queue-5.15/ima-annotate-iint-mutex-to-avoid-lockdep-false-positive-warnings.patch [new file with mode: 0644]
queue-5.15/ima-detect-changes-to-the-backing-overlay-file.patch [new file with mode: 0644]
queue-5.15/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch [new file with mode: 0644]
queue-5.15/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch [new file with mode: 0644]
queue-5.15/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch [new file with mode: 0644]
queue-5.15/rcu-tree-defer-setting-of-jiffies-during-stall-reset.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/svcrdma-drop-connection-after-an-rdma-read-error.patch [new file with mode: 0644]
queue-5.15/wifi-wilc1000-use-vmm_table-as-array-in-wilc-struct.patch [new file with mode: 0644]

diff --git a/queue-5.15/acpi-fpdt-properly-handle-invalid-fpdt-subtables.patch b/queue-5.15/acpi-fpdt-properly-handle-invalid-fpdt-subtables.patch
new file mode 100644 (file)
index 0000000..c32e712
--- /dev/null
@@ -0,0 +1,166 @@
+From a83c68a3bf7c418c9a46693c63c638852b0c1f4e Mon Sep 17 00:00:00 2001
+From: Vasily Khoruzhick <anarsoul@gmail.com>
+Date: Wed, 27 Sep 2023 12:50:02 -0700
+Subject: ACPI: FPDT: properly handle invalid FPDT subtables
+
+From: Vasily Khoruzhick <anarsoul@gmail.com>
+
+commit a83c68a3bf7c418c9a46693c63c638852b0c1f4e upstream.
+
+Buggy BIOSes may have invalid FPDT subtables, e.g. on my hardware:
+
+S3PT subtable:
+
+7F20FE30: 53 33 50 54 24 00 00 00-00 00 00 00 00 00 18 01  *S3PT$...........*
+7F20FE40: 00 00 00 00 00 00 00 00-00 00 00 00 00 00 00 00  *................*
+7F20FE50: 00 00 00 00
+
+Here the first record has zero length.
+
+FBPT subtable:
+
+7F20FE50:             46 42 50 54-3C 00 00 00 46 42 50 54  *....FBPT<...FBPT*
+7F20FE60: 02 00 30 02 00 00 00 00-00 00 00 00 00 00 00 00  *..0.............*
+7F20FE70: 2A A6 BC 6E 0B 00 00 00-1A 44 41 70 0B 00 00 00  **..n.....DAp....*
+7F20FE80: 00 00 00 00 00 00 00 00-00 00 00 00 00 00 00 00  *................*
+
+And here FBPT table has FBPT signature repeated instead of the first
+record.
+
+Current code will be looping indefinitely due to zero length records, so
+break out of the loop if record length is zero.
+
+While we are here, add proper handling for fpdt_process_subtable()
+failures.
+
+Fixes: d1eb86e59be0 ("ACPI: tables: introduce support for FPDT table")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
+[ rjw: Comment edit, added empty code lines ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/acpi_fpdt.c |   45 +++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 37 insertions(+), 8 deletions(-)
+
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 add
+               record_header = (void *)subtable_header + offset;
+               offset += record_header->length;
++              if (!record_header->length) {
++                      pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++                      result = -EINVAL;
++                      goto err;
++              }
++
+               switch (record_header->type) {
+               case RECORD_S3_RESUME:
+                       if (subtable_type != SUBTABLE_S3PT) {
+                               pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+                                    record_header->type, signature);
+-                              return -EINVAL;
++                              result = -EINVAL;
++                              goto err;
+                       }
+                       if (record_resume) {
+                               pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 add
+                       record_resume = (struct resume_performance_record *)record_header;
+                       result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+                       if (result)
+-                              return result;
++                              goto err;
+                       break;
+               case RECORD_S3_SUSPEND:
+                       if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 add
+                       record_suspend = (struct suspend_performance_record *)record_header;
+                       result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+                       if (result)
+-                              return result;
++                              goto err;
+                       break;
+               case RECORD_BOOT:
+                       if (subtable_type != SUBTABLE_FBPT) {
+                               pr_err(FW_BUG "Invalid %d for subtable %s\n",
+                                    record_header->type, signature);
+-                              return -EINVAL;
++                              result = -EINVAL;
++                              goto err;
+                       }
+                       if (record_boot) {
+                               pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 add
+                       record_boot = (struct boot_performance_record *)record_header;
+                       result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+                       if (result)
+-                              return result;
++                              goto err;
+                       break;
+               default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 add
+               }
+       }
+       return 0;
++
++err:
++      if (record_boot)
++              sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++      if (record_suspend)
++              sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++      if (record_resume)
++              sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++      return result;
+ }
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+       struct acpi_table_header *header;
+       struct fpdt_subtable_entry *subtable;
+       u32 offset = sizeof(*header);
++      int result;
+       status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+       fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+       if (!fpdt_kobj) {
+-              acpi_put_table(header);
+-              return -ENOMEM;
++              result = -ENOMEM;
++              goto err_nomem;
+       }
+       while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+               switch (subtable->type) {
+               case SUBTABLE_FBPT:
+               case SUBTABLE_S3PT:
+-                      fpdt_process_subtable(subtable->address,
++                      result = fpdt_process_subtable(subtable->address,
+                                             subtable->type);
++                      if (result)
++                              goto err_subtable;
+                       break;
+               default:
+                       /* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+               offset += sizeof(*subtable);
+       }
+       return 0;
++err_subtable:
++      kobject_put(fpdt_kobj);
++
++err_nomem:
++      acpi_put_table(header);
++      return result;
+ }
+ fs_initcall(acpi_init_fpdt);
diff --git a/queue-5.15/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch b/queue-5.15/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch
new file mode 100644 (file)
index 0000000..c182622
--- /dev/null
@@ -0,0 +1,36 @@
+From 95d97b111e1e184b0c8656137033ed64f2cf21e4 Mon Sep 17 00:00:00 2001
+From: Vignesh Viswanathan <quic_viswanat@quicinc.com>
+Date: Mon, 4 Sep 2023 22:55:13 +0530
+Subject: arm64: dts: qcom: ipq6018: Fix hwlock index for SMEM
+
+From: Vignesh Viswanathan <quic_viswanat@quicinc.com>
+
+commit 95d97b111e1e184b0c8656137033ed64f2cf21e4 upstream.
+
+SMEM uses lock index 3 of the TCSR Mutex hwlock for allocations
+in SMEM region shared by the Host and FW.
+
+Fix the SMEM hwlock index to 3 for IPQ6018.
+
+Cc: stable@vger.kernel.org
+Fixes: 5bf635621245 ("arm64: dts: ipq6018: Add a few device nodes")
+Signed-off-by: Vignesh Viswanathan <quic_viswanat@quicinc.com>
+Acked-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20230904172516.479866-3-quic_viswanat@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/ipq6018.dtsi |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -175,7 +175,7 @@
+       smem {
+               compatible = "qcom,smem";
+               memory-region = <&smem_region>;
+-              hwlocks = <&tcsr_mutex 0>;
++              hwlocks = <&tcsr_mutex 3>;
+       };
+       soc: soc {
diff --git a/queue-5.15/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch b/queue-5.15/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch
new file mode 100644 (file)
index 0000000..3c9d95d
--- /dev/null
@@ -0,0 +1,38 @@
+From 11aeb97b45ad2e0040cbb2a589bc403152526345 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 18 Sep 2023 14:15:33 -0400
+Subject: btrfs: don't arbitrarily slow down delalloc if we're committing
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 11aeb97b45ad2e0040cbb2a589bc403152526345 upstream.
+
+We have a random schedule_timeout() if the current transaction is
+committing, which seems to be a holdover from the original delalloc
+reservation code.
+
+Remove this, we have the proper flushing stuff, we shouldn't be hoping
+for random timing things to make everything work.  This just induces
+latency for no reason.
+
+CC: stable@vger.kernel.org # 5.4+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/delalloc-space.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -312,9 +312,6 @@ int btrfs_delalloc_reserve_metadata(stru
+       } else {
+               if (current->journal_info)
+                       flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+-              if (btrfs_transaction_in_commit(fs_info))
+-                      schedule_timeout(1);
+       }
+       num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
diff --git a/queue-5.15/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch b/queue-5.15/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch
new file mode 100644 (file)
index 0000000..61621ad
--- /dev/null
@@ -0,0 +1,52 @@
+From 3337a6fea25370d3d244ec6bb38c71ee86fcf837 Mon Sep 17 00:00:00 2001
+From: Kathiravan Thirumoorthy <quic_kathirav@quicinc.com>
+Date: Mon, 25 Sep 2023 13:59:22 +0530
+Subject: firmware: qcom_scm: use 64-bit calling convention only when client is 64-bit
+
+From: Kathiravan Thirumoorthy <quic_kathirav@quicinc.com>
+
+commit 3337a6fea25370d3d244ec6bb38c71ee86fcf837 upstream.
+
+Per the "SMC calling convention specification", the 64-bit calling
+convention can only be used when the client is 64-bit. Whereas the
+32-bit calling convention can be used by either a 32-bit or a 64-bit
+client.
+
+Currently during SCM probe, irrespective of the client, 64-bit calling
+convention is made, which is incorrect and may lead to the undefined
+behaviour when the client is 32-bit. Let's fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: 9a434cee773a ("firmware: qcom_scm: Dynamically support SMCCC and legacy conventions")
+Reviewed-By: Elliot Berman <quic_eberman@quicinc.com>
+Signed-off-by: Kathiravan Thirumoorthy <quic_kathirav@quicinc.com>
+Link: https://lore.kernel.org/r/20230925-scm-v3-1-8790dff6a749@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/qcom_scm.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -137,6 +137,12 @@ static enum qcom_scm_convention __get_co
+               return qcom_scm_convention;
+       /*
++       * Per the "SMC calling convention specification", the 64-bit calling
++       * convention can only be used when the client is 64-bit, otherwise
++       * system will encounter the undefined behaviour.
++       */
++#if IS_ENABLED(CONFIG_ARM64)
++      /*
+        * Device isn't required as there is only one argument - no device
+        * needed to dma_map_single to secure world
+        */
+@@ -156,6 +162,7 @@ static enum qcom_scm_convention __get_co
+               forced = true;
+               goto found;
+       }
++#endif
+       probed_convention = SMC_CONVENTION_ARM_32;
+       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
diff --git a/queue-5.15/ima-annotate-iint-mutex-to-avoid-lockdep-false-positive-warnings.patch b/queue-5.15/ima-annotate-iint-mutex-to-avoid-lockdep-false-positive-warnings.patch
new file mode 100644 (file)
index 0000000..69ea5bd
--- /dev/null
@@ -0,0 +1,112 @@
+From e044374a8a0a99e46f4e6d6751d3042b6d9cc12e Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Thu, 5 Oct 2023 14:15:58 +0300
+Subject: ima: annotate iint mutex to avoid lockdep false positive warnings
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit e044374a8a0a99e46f4e6d6751d3042b6d9cc12e upstream.
+
+It is not clear that IMA should be nested at all, but as long is it
+measures files both on overlayfs and on underlying fs, we need to
+annotate the iint mutex to avoid lockdep false positives related to
+IMA + overlayfs, same as overlayfs annotates the inode mutex.
+
+Reported-and-tested-by: syzbot+b42fe626038981fb7bfa@syzkaller.appspotmail.com
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/iint.c |   48 +++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 37 insertions(+), 11 deletions(-)
+
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_i
+       return iint;
+ }
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++                                       struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++      static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++      int depth = inode->i_sb->s_stack_depth;
++
++      if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++              depth = 0;
++
++      lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++                           struct inode *inode)
+ {
+-      kfree(iint->ima_hash);
+       iint->ima_hash = NULL;
+       iint->version = 0;
+       iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_i
+       iint->ima_creds_status = INTEGRITY_UNKNOWN;
+       iint->evm_status = INTEGRITY_UNKNOWN;
+       iint->measured_pcrs = 0;
++      mutex_init(&iint->mutex);
++      iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++      kfree(iint->ima_hash);
++      mutex_destroy(&iint->mutex);
+       kmem_cache_free(iint_cache, iint);
+ }
+@@ -112,6 +143,8 @@ struct integrity_iint_cache *integrity_i
+       if (!iint)
+               return NULL;
++      iint_init_always(iint, inode);
++
+       write_lock(&integrity_iint_lock);
+       p = &integrity_iint_tree.rb_node;
+@@ -161,25 +194,18 @@ void integrity_inode_free(struct inode *
+       iint_free(iint);
+ }
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+       struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+       memset(iint, 0, sizeof(*iint));
+-      iint->ima_file_status = INTEGRITY_UNKNOWN;
+-      iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+-      iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+-      iint->ima_read_status = INTEGRITY_UNKNOWN;
+-      iint->ima_creds_status = INTEGRITY_UNKNOWN;
+-      iint->evm_status = INTEGRITY_UNKNOWN;
+-      mutex_init(&iint->mutex);
+ }
+ static int __init integrity_iintcache_init(void)
+ {
+       iint_cache =
+           kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+-                            0, SLAB_PANIC, init_once);
++                            0, SLAB_PANIC, iint_init_once);
+       return 0;
+ }
+ DEFINE_LSM(integrity) = {
diff --git a/queue-5.15/ima-detect-changes-to-the-backing-overlay-file.patch b/queue-5.15/ima-detect-changes-to-the-backing-overlay-file.patch
new file mode 100644 (file)
index 0000000..0ccf56e
--- /dev/null
@@ -0,0 +1,113 @@
+From b836c4d29f2744200b2af41e14bf50758dddc818 Mon Sep 17 00:00:00 2001
+From: Mimi Zohar <zohar@linux.ibm.com>
+Date: Wed, 18 Oct 2023 14:47:02 -0400
+Subject: ima: detect changes to the backing overlay file
+
+From: Mimi Zohar <zohar@linux.ibm.com>
+
+commit b836c4d29f2744200b2af41e14bf50758dddc818 upstream.
+
+Commit 18b44bc5a672 ("ovl: Always reevaluate the file signature for
+IMA") forced signature re-evaulation on every file access.
+
+Instead of always re-evaluating the file's integrity, detect a change
+to the backing file, by comparing the cached file metadata with the
+backing file's metadata.  Verifying just the i_version has not changed
+is insufficient.  In addition save and compare the i_ino and s_dev
+as well.
+
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Tested-by: Eric Snowberg <eric.snowberg@oracle.com>
+Tested-by: Raul E Rangel <rrangel@chromium.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/super.c              |    2 +-
+ security/integrity/ima/ima_api.c  |    5 +++++
+ security/integrity/ima/ima_main.c |   16 +++++++++++++++-
+ security/integrity/integrity.h    |    2 ++
+ 4 files changed, 23 insertions(+), 2 deletions(-)
+
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -2140,7 +2140,7 @@ static int ovl_fill_super(struct super_b
+               ovl_trusted_xattr_handlers;
+       sb->s_fs_info = ofs;
+       sb->s_flags |= SB_POSIXACL;
+-      sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++      sb->s_iflags |= SB_I_SKIP_SYNC;
+       err = -ENOMEM;
+       root_dentry = ovl_get_root(sb, upperpath.dentry, oe);
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -216,6 +216,7 @@ int ima_collect_measurement(struct integ
+ {
+       const char *audit_cause = "failed";
+       struct inode *inode = file_inode(file);
++      struct inode *real_inode = d_real_inode(file_dentry(file));
+       const char *filename = file->f_path.dentry->d_name.name;
+       int result = 0;
+       int length;
+@@ -266,6 +267,10 @@ int ima_collect_measurement(struct integ
+       iint->ima_hash = tmpbuf;
+       memcpy(iint->ima_hash, &hash, length);
+       iint->version = i_version;
++      if (real_inode != inode) {
++              iint->real_ino = real_inode->i_ino;
++              iint->real_dev = real_inode->i_sb->s_dev;
++      }
+       /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+       if (!result)
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -26,6 +26,7 @@
+ #include <linux/ima.h>
+ #include <linux/iversion.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+ #include "ima.h"
+@@ -202,7 +203,7 @@ static int process_measurement(struct fi
+                              u32 secid, char *buf, loff_t size, int mask,
+                              enum ima_hooks func)
+ {
+-      struct inode *inode = file_inode(file);
++      struct inode *backing_inode, *inode = file_inode(file);
+       struct integrity_iint_cache *iint = NULL;
+       struct ima_template_desc *template_desc = NULL;
+       char *pathbuf = NULL;
+@@ -278,6 +279,19 @@ static int process_measurement(struct fi
+               iint->measured_pcrs = 0;
+       }
++      /* Detect and re-evaluate changes made to the backing file. */
++      backing_inode = d_real_inode(file_dentry(file));
++      if (backing_inode != inode &&
++          (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++              if (!IS_I_VERSION(backing_inode) ||
++                  backing_inode->i_sb->s_dev != iint->real_dev ||
++                  backing_inode->i_ino != iint->real_ino ||
++                  !inode_eq_iversion(backing_inode, iint->version)) {
++                      iint->flags &= ~IMA_DONE_MASK;
++                      iint->measured_pcrs = 0;
++              }
++      }
++
+       /* Determine if already appraised/measured based on bitmask
+        * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+        *  IMA_AUDIT, IMA_AUDITED)
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -131,6 +131,8 @@ struct integrity_iint_cache {
+       unsigned long flags;
+       unsigned long measured_pcrs;
+       unsigned long atomic_flags;
++      unsigned long real_ino;
++      dev_t real_dev;
+       enum integrity_status ima_file_status:4;
+       enum integrity_status ima_mmap_status:4;
+       enum integrity_status ima_bprm_status:4;
diff --git a/queue-5.15/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch b/queue-5.15/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch
new file mode 100644 (file)
index 0000000..39cacfb
--- /dev/null
@@ -0,0 +1,67 @@
+From d08970df1980476f27936e24d452550f3e9e92e1 Mon Sep 17 00:00:00 2001
+From: Brian Geffon <bgeffon@google.com>
+Date: Fri, 22 Sep 2023 12:07:04 -0400
+Subject: PM: hibernate: Clean up sync_read handling in snapshot_write_next()
+
+From: Brian Geffon <bgeffon@google.com>
+
+commit d08970df1980476f27936e24d452550f3e9e92e1 upstream.
+
+In snapshot_write_next(), sync_read is set and unset in three different
+spots unnecessiarly. As a result there is a subtle bug where the first
+page after the meta data has been loaded unconditionally sets sync_read
+to 0. If this first PFN was actually a highmem page, then the returned
+buffer will be the global "buffer," and the page needs to be loaded
+synchronously.
+
+That is, I'm not sure we can always assume the following to be safe:
+
+       handle->buffer = get_buffer(&orig_bm, &ca);
+       handle->sync_read = 0;
+
+Because get_buffer() can call get_highmem_page_buffer() which can
+return 'buffer'.
+
+The easiest way to address this is just set sync_read before
+snapshot_write_next() returns if handle->buffer == buffer.
+
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem")
+Cc: All applicable <stable@vger.kernel.org>
+[ rjw: Subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/snapshot.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2629,8 +2629,6 @@ int snapshot_write_next(struct snapshot_
+       if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+               return 0;
+-      handle->sync_read = 1;
+-
+       if (!handle->cur) {
+               if (!buffer)
+                       /* This makes the buffer be freed by swsusp_free() */
+@@ -2666,7 +2664,6 @@ int snapshot_write_next(struct snapshot_
+                       memory_bm_position_reset(&orig_bm);
+                       restore_pblist = NULL;
+                       handle->buffer = get_buffer(&orig_bm, &ca);
+-                      handle->sync_read = 0;
+                       if (IS_ERR(handle->buffer))
+                               return PTR_ERR(handle->buffer);
+               }
+@@ -2676,9 +2673,8 @@ int snapshot_write_next(struct snapshot_
+               handle->buffer = get_buffer(&orig_bm, &ca);
+               if (IS_ERR(handle->buffer))
+                       return PTR_ERR(handle->buffer);
+-              if (handle->buffer != buffer)
+-                      handle->sync_read = 0;
+       }
++      handle->sync_read = (handle->buffer == buffer);
+       handle->cur++;
+       return PAGE_SIZE;
+ }
diff --git a/queue-5.15/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch b/queue-5.15/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch
new file mode 100644 (file)
index 0000000..e2288e5
--- /dev/null
@@ -0,0 +1,47 @@
+From f0c7183008b41e92fa676406d87f18773724b48b Mon Sep 17 00:00:00 2001
+From: Brian Geffon <bgeffon@google.com>
+Date: Thu, 21 Sep 2023 13:00:45 -0400
+Subject: PM: hibernate: Use __get_safe_page() rather than touching the list
+
+From: Brian Geffon <bgeffon@google.com>
+
+commit f0c7183008b41e92fa676406d87f18773724b48b upstream.
+
+We found at least one situation where the safe pages list was empty and
+get_buffer() would gladly try to use a NULL pointer.
+
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/snapshot.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2414,8 +2414,9 @@ static void *get_highmem_page_buffer(str
+               pbe->copy_page = tmp;
+       } else {
+               /* Copy of the page will be stored in normal memory */
+-              kaddr = safe_pages_list;
+-              safe_pages_list = safe_pages_list->next;
++              kaddr = __get_safe_page(ca->gfp_mask);
++              if (!kaddr)
++                      return ERR_PTR(-ENOMEM);
+               pbe->copy_page = virt_to_page(kaddr);
+       }
+       pbe->next = highmem_pblist;
+@@ -2595,8 +2596,9 @@ static void *get_buffer(struct memory_bi
+               return ERR_PTR(-ENOMEM);
+       }
+       pbe->orig_address = page_address(page);
+-      pbe->address = safe_pages_list;
+-      safe_pages_list = safe_pages_list->next;
++      pbe->address = __get_safe_page(ca->gfp_mask);
++      if (!pbe->address)
++              return ERR_PTR(-ENOMEM);
+       pbe->next = restore_pblist;
+       restore_pblist = pbe;
+       return pbe->address;
diff --git a/queue-5.15/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch b/queue-5.15/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch
new file mode 100644 (file)
index 0000000..dce1f2d
--- /dev/null
@@ -0,0 +1,58 @@
+From 5f98fd034ca6fd1ab8c91a3488968a0e9caaabf6 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Sat, 30 Sep 2023 17:46:56 +0000
+Subject: rcu: kmemleak: Ignore kmemleak false positives when RCU-freeing objects
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 5f98fd034ca6fd1ab8c91a3488968a0e9caaabf6 upstream.
+
+Since the actual slab freeing is deferred when calling kvfree_rcu(), so
+is the kmemleak_free() callback informing kmemleak of the object
+deletion. From the perspective of the kvfree_rcu() caller, the object is
+freed and it may remove any references to it. Since kmemleak does not
+scan RCU internal data storing the pointer, it will report such objects
+as leaks during the grace period.
+
+Tell kmemleak to ignore such objects on the kvfree_call_rcu() path. Note
+that the tiny RCU implementation does not have such issue since the
+objects can be tracked from the rcu_ctrlblk structure.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://lore.kernel.org/all/F903A825-F05F-4B77-A2B5-7356282FBA2C@apple.com/
+Cc: <stable@vger.kernel.org>
+Tested-by: Christoph Paasch <cpaasch@apple.com>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -3609,6 +3610,14 @@ void kvfree_call_rcu(struct rcu_head *he
+       WRITE_ONCE(krcp->count, krcp->count + 1);
++      /*
++       * The kvfree_rcu() caller considers the pointer freed at this point
++       * and likely removes any references to it. Since the actual slab
++       * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++       * this object (no scanning or false positives reporting).
++       */
++      kmemleak_ignore(ptr);
++
+       // Set timer to drain after KFREE_DRAIN_JIFFIES.
+       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
+           !krcp->monitor_todo) {
diff --git a/queue-5.15/rcu-tree-defer-setting-of-jiffies-during-stall-reset.patch b/queue-5.15/rcu-tree-defer-setting-of-jiffies-during-stall-reset.patch
new file mode 100644 (file)
index 0000000..be2e029
--- /dev/null
@@ -0,0 +1,131 @@
+From b96e7a5fa0ba9cda32888e04f8f4bac42d49a7f8 Mon Sep 17 00:00:00 2001
+From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Date: Tue, 5 Sep 2023 00:02:11 +0000
+Subject: rcu/tree: Defer setting of jiffies during stall reset
+
+From: Joel Fernandes (Google) <joel@joelfernandes.org>
+
+commit b96e7a5fa0ba9cda32888e04f8f4bac42d49a7f8 upstream.
+
+There are instances where rcu_cpu_stall_reset() is called when jiffies
+did not get a chance to update for a long time. Before jiffies is
+updated, the CPU stall detector can go off triggering false-positives
+where a just-started grace period appears to be ages old. In the past,
+we disabled stall detection in rcu_cpu_stall_reset() however this got
+changed [1]. This is resulting in false-positives in KGDB usecase [2].
+
+Fix this by deferring the update of jiffies to the third run of the FQS
+loop. This is more robust, as, even if rcu_cpu_stall_reset() is called
+just before jiffies is read, we would end up pushing out the jiffies
+read by 3 more FQS loops. Meanwhile the CPU stall detection will be
+delayed and we will not get any false positives.
+
+[1] https://lore.kernel.org/all/20210521155624.174524-2-senozhatsky@chromium.org/
+[2] https://lore.kernel.org/all/20230814020045.51950-2-chenhuacai@loongson.cn/
+
+Tested with rcutorture.cpu_stall option as well to verify stall behavior
+with/without patch.
+
+Tested-by: Huacai Chen <chenhuacai@loongson.cn>
+Reported-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Closes: https://lore.kernel.org/all/20230814020045.51950-2-chenhuacai@loongson.cn/
+Suggested-by: Paul  McKenney <paulmck@kernel.org>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Fixes: a80be428fbc1 ("rcu: Do not disable GP stall detection in rcu_cpu_stall_reset()")
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c       |   12 ++++++++++++
+ kernel/rcu/tree.h       |    4 ++++
+ kernel/rcu/tree_stall.h |   20 ++++++++++++++++++--
+ 3 files changed, 34 insertions(+), 2 deletions(-)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1906,10 +1906,22 @@ static bool rcu_gp_fqs_check_wake(int *g
+  */
+ static void rcu_gp_fqs(bool first_time)
+ {
++      int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+       struct rcu_node *rnp = rcu_get_root();
+       WRITE_ONCE(rcu_state.gp_activity, jiffies);
+       WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++      WARN_ON_ONCE(nr_fqs > 3);
++      /* Only countdown nr_fqs for stall purposes if jiffies moves. */
++      if (nr_fqs) {
++              if (nr_fqs == 1) {
++                      WRITE_ONCE(rcu_state.jiffies_stall,
++                                 jiffies + rcu_jiffies_till_stall_check());
++              }
++              WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++      }
++
+       if (first_time) {
+               /* Collect dyntick-idle snapshots. */
+               force_qs_rnp(dyntick_save_progress_counter);
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -351,6 +351,10 @@ struct rcu_state {
+                                               /*  in jiffies. */
+       unsigned long jiffies_stall;            /* Time at which to check */
+                                               /*  for CPU stalls. */
++      int nr_fqs_jiffies_stall;               /* Number of fqs loops after
++                                               * which read jiffies and set
++                                               * jiffies_stall. Stall
++                                               * warnings disabled if !0. */
+       unsigned long jiffies_resched;          /* Time at which to resched */
+                                               /*  a reluctant CPU. */
+       unsigned long n_force_qs_gpstart;       /* Snapshot of n_force_qs at */
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -121,12 +121,17 @@ static void panic_on_rcu_stall(void)
+ /**
+  * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+  *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded.  It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+  * The caller must disable hard irqs.
+  */
+ void rcu_cpu_stall_reset(void)
+ {
+-      WRITE_ONCE(rcu_state.jiffies_stall,
+-                 jiffies + rcu_jiffies_till_stall_check());
++      WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++      WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+ //////////////////////////////////////////////////////////////////////////////
+@@ -142,6 +147,7 @@ static void record_gp_stall_check_time(v
+       WRITE_ONCE(rcu_state.gp_start, j);
+       j1 = rcu_jiffies_till_stall_check();
+       smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++      WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+       WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+       rcu_state.jiffies_resched = j + j1 / 2;
+       rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -662,6 +668,16 @@ static void check_cpu_stall(struct rcu_d
+           !rcu_gp_in_progress())
+               return;
+       rcu_stall_kick_kthreads();
++
++      /*
++       * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++       * loop has to set jiffies to ensure a non-stale jiffies value. This
++       * is required to have good jiffies value after coming out of long
++       * breaks of jiffies updates. Not doing so can cause false positives.
++       */
++      if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++              return;
++
+       j = jiffies;
+       /*
index 6e46217d1b51c3dd3cbffd87c8a46372972be825..2c41b55173c95e7d04fe95a08d289deb2c9e41de 100644 (file)
@@ -189,6 +189,18 @@ mmc-vub300-fix-an-error-code.patch
 mmc-sdhci_am654-fix-start-loop-index-for-tap-value-parsing.patch
 pci-aspm-fix-l1-substate-handling-in-aspm_attr_store_common.patch
 pci-exynos-don-t-discard-.remove-callback.patch
+wifi-wilc1000-use-vmm_table-as-array-in-wilc-struct.patch
+svcrdma-drop-connection-after-an-rdma-read-error.patch
+rcu-tree-defer-setting-of-jiffies-during-stall-reset.patch
+arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch
+pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch
+pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch
+rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch
+btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch
+firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch
+acpi-fpdt-properly-handle-invalid-fpdt-subtables.patch
+ima-annotate-iint-mutex-to-avoid-lockdep-false-positive-warnings.patch
+ima-detect-changes-to-the-backing-overlay-file.patch
 wifi-ath11k-fix-temperature-event-locking.patch
 wifi-ath11k-fix-dfs-radar-event-locking.patch
 wifi-ath11k-fix-htt-pktlog-locking.patch
diff --git a/queue-5.15/svcrdma-drop-connection-after-an-rdma-read-error.patch b/queue-5.15/svcrdma-drop-connection-after-an-rdma-read-error.patch
new file mode 100644 (file)
index 0000000..b52760f
--- /dev/null
@@ -0,0 +1,33 @@
+From 197115ebf358cb440c73e868b2a0a5ef728decc6 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Tue, 10 Oct 2023 13:23:41 -0400
+Subject: svcrdma: Drop connection after an RDMA Read error
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 197115ebf358cb440c73e868b2a0a5ef728decc6 upstream.
+
+When an RPC Call message cannot be pulled from the client, that
+is a message loss, by definition. Close the connection to trigger
+the client to resend.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Tom Talpey <tom@talpey.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -852,7 +852,8 @@ out_readfail:
+       if (ret == -EINVAL)
+               svc_rdma_send_error(rdma_xprt, ctxt, ret);
+       svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+-      return ret;
++      svc_xprt_deferred_close(xprt);
++      return -ENOTCONN;
+ out_backchannel:
+       svc_rdma_handle_bc_reply(rqstp, ctxt);
diff --git a/queue-5.15/wifi-wilc1000-use-vmm_table-as-array-in-wilc-struct.patch b/queue-5.15/wifi-wilc1000-use-vmm_table-as-array-in-wilc-struct.patch
new file mode 100644 (file)
index 0000000..1acadc0
--- /dev/null
@@ -0,0 +1,47 @@
+From 05ac1a198a63ad66bf5ae8b7321407c102d40ef3 Mon Sep 17 00:00:00 2001
+From: Ajay Singh <ajay.kathat@microchip.com>
+Date: Tue, 17 Oct 2023 10:43:38 +0200
+Subject: wifi: wilc1000: use vmm_table as array in wilc struct
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ajay Singh <ajay.kathat@microchip.com>
+
+commit 05ac1a198a63ad66bf5ae8b7321407c102d40ef3 upstream.
+
+Enabling KASAN and running some iperf tests raises some memory issues with
+vmm_table:
+
+BUG: KASAN: slab-out-of-bounds in wilc_wlan_handle_txq+0x6ac/0xdb4
+Write of size 4 at addr c3a61540 by task wlan0-tx/95
+
+KASAN detects that we are writing data beyond range allocated to vmm_table.
+There is indeed a mismatch between the size passed to allocator in
+wilc_wlan_init, and the range of possible indexes used later: allocation
+size is missing a multiplication by sizeof(u32)
+
+Fixes: 40b717bfcefa ("wifi: wilc1000: fix DMA on stack objects")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ajay Singh <ajay.kathat@microchip.com>
+Signed-off-by: Alexis LothorĂ© <alexis.lothore@bootlin.com>
+Reviewed-by: Michael Walle <mwalle@kernel.org>
+Reviewed-by: Jeff Johnson <quic_jjohnson@quicinc.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20231017-wilc1000_tx_oops-v3-1-b2155f1f7bee@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/microchip/wilc1000/wlan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1458,7 +1458,7 @@ int wilc_wlan_init(struct net_device *de
+       }
+       if (!wilc->vmm_table)
+-              wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++              wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+       if (!wilc->vmm_table) {
+               ret = -ENOBUFS;