--- /dev/null
+From 3770e52fd4ec40ebee16ba19ad6c09dc0b52739b Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 30 Jan 2023 14:07:26 +0100
+Subject: mm: extend max struct page size for kmsan
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 3770e52fd4ec40ebee16ba19ad6c09dc0b52739b upstream.
+
+After x86 enabled support for KMSAN, it has become possible to have larger
+'struct page' than was expected when commit 5470dea49f53 ("mm: use
+mm_zero_struct_page from SPARC on all 64b architectures") was merged:
+
+include/linux/mm.h:156:10: warning: no case matching constant switch condition '96'
+ switch (sizeof(struct page)) {
+
+Extend the maximum accordingly.
+
+Link: https://lkml.kernel.org/r/20230130130739.563628-1-arnd@kernel.org
+Fixes: 5470dea49f53 ("mm: use mm_zero_struct_page from SPARC on all 64b architectures")
+Fixes: 4ca8cc8d1bbe ("x86: kmsan: enable KMSAN builds for x86")
+Fixes: f80be4571b19 ("kmsan: add KMSAN runtime core")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Alex Sierra <alex.sierra@amd.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -136,7 +136,7 @@ extern int mmap_rnd_compat_bits __read_m
+ * define their own version of this macro in <asm/pgtable.h>
+ */
+ #if BITS_PER_LONG == 64
+-/* This function must be updated when the size of struct page grows above 80
++/* This function must be updated when the size of struct page grows above 96
+ * or reduces below 56. The idea that compiler optimizes out switch()
+ * statement, and only leaves move/store instructions. Also the compiler can
+ * combine write statements if they are both assignments and can be reordered,
+@@ -147,12 +147,18 @@ static inline void __mm_zero_struct_page
+ {
+ unsigned long *_pp = (void *)page;
+
+- /* Check that struct page is either 56, 64, 72, or 80 bytes */
++ /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
+ BUILD_BUG_ON(sizeof(struct page) & 7);
+ BUILD_BUG_ON(sizeof(struct page) < 56);
+- BUILD_BUG_ON(sizeof(struct page) > 80);
++ BUILD_BUG_ON(sizeof(struct page) > 96);
+
+ switch (sizeof(struct page)) {
++ case 96:
++ _pp[11] = 0;
++ fallthrough;
++ case 88:
++ _pp[10] = 0;
++ fallthrough;
+ case 80:
+ _pp[9] = 0;
+ fallthrough;