--- /dev/null
+From 4639c5021029d49fd2f97fa8d74731f167f98919 Mon Sep 17 00:00:00 2001
+From: bo liu <bo.liu@senarytech.com>
+Date: Mon, 5 Feb 2024 09:38:02 +0800
+Subject: ALSA: hda/conexant: Add quirk for SWS JS201D
+
+From: bo liu <bo.liu@senarytech.com>
+
+commit 4639c5021029d49fd2f97fa8d74731f167f98919 upstream.
+
+The SWS JS201D need a different pinconfig from windows driver.
+Add a quirk to use a specific pinconfig to SWS JS201D.
+
+Signed-off-by: bo liu <bo.liu@senarytech.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20240205013802.51907-1-bo.liu@senarytech.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_conexant.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -216,6 +216,7 @@ enum {
+ CXT_FIXUP_MUTE_LED_GPIO,
+ CXT_FIXUP_HEADSET_MIC,
+ CXT_FIXUP_HP_MIC_NO_PRESENCE,
++ CXT_PINCFG_SWS_JS201D,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -704,6 +705,17 @@ static const struct hda_pintbl cxt_pincf
+ {}
+ };
+
++/* SuoWoSi/South-holding JS201D with sn6140 */
++static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
++ { 0x16, 0x03211040 }, /* hp out */
++ { 0x17, 0x91170110 }, /* SPK/Class_D */
++ { 0x18, 0x95a70130 }, /* Internal mic */
++ { 0x19, 0x03a11020 }, /* Headset Mic */
++ { 0x1a, 0x40f001f0 }, /* Not used */
++ { 0x21, 0x40f001f0 }, /* Not used */
++ {}
++};
++
+ static const struct hda_fixup cxt_fixups[] = {
+ [CXT_PINCFG_LENOVO_X200] = {
+ .type = HDA_FIXUP_PINS,
+@@ -855,6 +867,10 @@ static const struct hda_fixup cxt_fixups
+ .chained = true,
+ .chain_id = CXT_FIXUP_HEADSET_MIC,
+ },
++ [CXT_PINCFG_SWS_JS201D] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = cxt_pincfg_sws_js201d,
++ },
+ };
+
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -926,6 +942,7 @@ static const struct snd_pci_quirk cxt506
+ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
++ SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
+ SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+@@ -965,6 +982,7 @@ static const struct hda_model_fixup cxt5
+ { .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
+ { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
++ { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
+ {}
+ };
+
--- /dev/null
+From cc9432c4fb159a3913e0ce3173b8218cd5bad2e0 Mon Sep 17 00:00:00 2001
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+Date: Tue, 6 Feb 2024 09:39:12 +0100
+Subject: mmc: slot-gpio: Allow non-sleeping GPIO ro
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+commit cc9432c4fb159a3913e0ce3173b8218cd5bad2e0 upstream.
+
+This change uses the appropriate _cansleep or non-sleeping API for
+reading GPIO read-only state. This allows users with GPIOs that
+never sleepbeing called in atomic context.
+
+Implement the same mechanism as in commit 52af318c93e97 ("mmc: Allow
+non-sleeping GPIO cd").
+
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240206083912.2543142-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/slot-gpio.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -63,11 +63,15 @@ int mmc_gpio_alloc(struct mmc_host *host
+ int mmc_gpio_get_ro(struct mmc_host *host)
+ {
+ struct mmc_gpio *ctx = host->slot.handler_priv;
++ int cansleep;
+
+ if (!ctx || !ctx->ro_gpio)
+ return -ENOSYS;
+
+- return gpiod_get_value_cansleep(ctx->ro_gpio);
++ cansleep = gpiod_cansleep(ctx->ro_gpio);
++ return cansleep ?
++ gpiod_get_value_cansleep(ctx->ro_gpio) :
++ gpiod_get_value(ctx->ro_gpio);
+ }
+ EXPORT_SYMBOL(mmc_gpio_get_ro);
+
--- /dev/null
+From 67b8bcbaed4777871bb0dcc888fb02a614a98ab1 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Wed, 24 Jan 2024 21:19:36 +0900
+Subject: nilfs2: fix data corruption in dsync block recovery for small block sizes
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 67b8bcbaed4777871bb0dcc888fb02a614a98ab1 upstream.
+
+The helper function nilfs_recovery_copy_block() of
+nilfs_recovery_dsync_blocks(), which recovers data from logs created by
+data sync writes during a mount after an unclean shutdown, incorrectly
+calculates the on-page offset when copying repair data to the file's page
+cache. In environments where the block size is smaller than the page
+size, this flaw can cause data corruption and leak uninitialized memory
+bytes during the recovery process.
+
+Fix these issues by correcting this byte offset calculation on the page.
+
+Link: https://lkml.kernel.org/r/20240124121936.10575-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/recovery.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/nilfs2/recovery.c
++++ b/fs/nilfs2/recovery.c
+@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_rec
+
+ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
+ struct nilfs_recovery_block *rb,
+- struct page *page)
++ loff_t pos, struct page *page)
+ {
+ struct buffer_head *bh_org;
++ size_t from = pos & ~PAGE_MASK;
+ void *kaddr;
+
+ bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
+@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(str
+ return -EIO;
+
+ kaddr = kmap_atomic(page);
+- memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
++ memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
+ kunmap_atomic(kaddr);
+ brelse(bh_org);
+ return 0;
+@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(st
+ goto failed_inode;
+ }
+
+- err = nilfs_recovery_copy_block(nilfs, rb, page);
++ err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
+ if (unlikely(err))
+ goto failed_page;
+
--- /dev/null
+From 38296afe3c6ee07319e01bb249aa4bb47c07b534 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Wed, 31 Jan 2024 23:56:57 +0900
+Subject: nilfs2: fix hang in nilfs_lookup_dirty_data_buffers()
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
+
+Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
+and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
+
+While migrate_pages_batch() locks a folio and waits for the writeback to
+complete, the log writer thread that should bring the writeback to
+completion picks up the folio being written back in
+nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
+creation and was trying to lock the folio. Thus causing a deadlock.
+
+In the first place, it is unexpected that folios/pages in the middle of
+writeback will be updated and become dirty. Nilfs2 adds a checksum to
+verify the validity of the log being written and uses it for recovery at
+mount, so data changes during writeback are suppressed. Since this is
+broken, an unclean shutdown could potentially cause recovery to fail.
+
+Investigation revealed that the root cause is that the wait for writeback
+completion in nilfs_page_mkwrite() is conditional, and if the backing
+device does not require stable writes, data may be modified without
+waiting.
+
+Fix these issues by making nilfs_page_mkwrite() wait for writeback to
+finish regardless of the stable write requirement of the backing device.
+
+Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
+Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+ee2ae68da3b22d04cd8d@syzkaller.appspotmail.com
+Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/file.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/file.c
++++ b/fs/nilfs2/file.c
+@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(str
+ nilfs_transaction_commit(inode->i_sb);
+
+ mapped:
+- wait_for_stable_page(page);
++ /*
++ * Since checksumming including data blocks is performed to determine
++ * the validity of the log to be written and used for recovery, it is
++ * necessary to wait for writeback to finish here, regardless of the
++ * stable write requirement of the backing device.
++ */
++ wait_on_page_writeback(page);
+ out:
+ sb_end_pagefault(inode->i_sb);
+ return block_page_mkwrite_return(ret);
--- /dev/null
+From 66bbea9ed6446b8471d365a22734dc00556c4785 Mon Sep 17 00:00:00 2001
+From: Vincent Donnefort <vdonnefort@google.com>
+Date: Wed, 31 Jan 2024 14:09:55 +0000
+Subject: ring-buffer: Clean ring_buffer_poll_wait() error return
+
+From: Vincent Donnefort <vdonnefort@google.com>
+
+commit 66bbea9ed6446b8471d365a22734dc00556c4785 upstream.
+
+The return type for ring_buffer_poll_wait() is __poll_t. This is behind
+the scenes an unsigned where we can set event bits. In case of a
+non-allocated CPU, we do return instead -EINVAL (0xffffffea). Lucky us,
+this ends up setting few error bits (EPOLLERR | EPOLLHUP | EPOLLNVAL), so
+user-space at least is aware something went wrong.
+
+Nonetheless, this is an incorrect code. Replace that -EINVAL with a
+proper EPOLLERR to clean that output. As this doesn't change the
+behaviour, there's no need to treat this change as a bug fix.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240131140955.3322792-1-vdonnefort@google.com
+
+Cc: stable@vger.kernel.org
+Fixes: 6721cb6002262 ("ring-buffer: Do not poll non allocated cpu buffers")
+Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -738,7 +738,7 @@ __poll_t ring_buffer_poll_wait(struct ri
+ full = 0;
+ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+- return -EINVAL;
++ return EPOLLERR;
+
+ cpu_buffer = buffer->buffers[cpu];
+ work = &cpu_buffer->irq_work;
--- /dev/null
+From 93cd256ab224c2519e7c4e5f58bb4f1ac2bf0965 Mon Sep 17 00:00:00 2001
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Date: Tue, 16 Jan 2024 16:29:59 -0500
+Subject: serial: max310x: improve crystal stable clock detection
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+commit 93cd256ab224c2519e7c4e5f58bb4f1ac2bf0965 upstream.
+
+Some people are seeing a warning similar to this when using a crystal:
+
+ max310x 11-006c: clock is not stable yet
+
+The datasheet doesn't mention the maximum time to wait for the clock to be
+stable when using a crystal, and it seems that the 10ms delay in the driver
+is not always sufficient.
+
+Jan Kundrát reported that it took three tries (each separated by 10ms) to
+get a stable clock.
+
+Modify behavior to check stable clock ready bit multiple times (20), and
+waiting 10ms between each try.
+
+Note: the first draft of the driver originally used a 50ms delay, without
+checking the clock stable bit.
+Then a loop with 1000 retries was implemented, each time reading the clock
+stable bit.
+
+Fixes: 4cf9a888fd3c ("serial: max310x: Check the clock readiness")
+Cc: stable@vger.kernel.org
+Suggested-by: Jan Kundrát <jan.kundrat@cesnet.cz>
+Link: https://www.spinics.net/lists/linux-serial/msg35773.html
+Link: https://lore.kernel.org/all/20240110174015.6f20195fde08e5c9e64e5675@hugovil.com/raw
+Link: https://github.com/boundarydevices/linux/commit/e5dfe3e4a751392515d78051973190301a37ca9a
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20240116213001.3691629-3-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/max310x.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -235,6 +235,10 @@
+ #define MAX310x_REV_MASK (0xf8)
+ #define MAX310X_WRITE_BIT 0x80
+
++/* Crystal-related definitions */
++#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
++#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
++
+ /* MAX3107 specific */
+ #define MAX3107_REV_ID (0xa0)
+
+@@ -610,12 +614,19 @@ static int max310x_set_ref_clk(struct de
+
+ /* Wait for crystal */
+ if (xtal) {
+- unsigned int val = 0;
+- msleep(10);
+- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+- if (!(val & MAX310X_STS_CLKREADY_BIT)) {
++ bool stable = false;
++ unsigned int try = 0, val = 0;
++
++ do {
++ msleep(MAX310X_XTAL_WAIT_DELAY_MS);
++ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
++
++ if (val & MAX310X_STS_CLKREADY_BIT)
++ stable = true;
++ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
++
++ if (!stable)
+ dev_warn(dev, "clock is not stable yet\n");
+- }
+ }
+
+ return (int)bestfreq;
--- /dev/null
+From 0419373333c2f2024966d36261fd82a453281e80 Mon Sep 17 00:00:00 2001
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Date: Tue, 16 Jan 2024 16:29:58 -0500
+Subject: serial: max310x: set default value when reading clock ready bit
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+commit 0419373333c2f2024966d36261fd82a453281e80 upstream.
+
+If regmap_read() returns a non-zero value, the 'val' variable can be left
+uninitialized.
+
+Clear it before calling regmap_read() to make sure we properly detect
+the clock ready bit.
+
+Fixes: 4cf9a888fd3c ("serial: max310x: Check the clock readiness")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20240116213001.3691629-2-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/max310x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -610,7 +610,7 @@ static int max310x_set_ref_clk(struct de
+
+ /* Wait for crystal */
+ if (xtal) {
+- unsigned int val;
++ unsigned int val = 0;
+ msleep(10);
+ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+ if (!(val & MAX310X_STS_CLKREADY_BIT)) {
tracing-fix-wasted-memory-in-saved_cmdlines-logic.patch
staging-iio-ad5933-fix-type-mismatch-regression.patch
iio-magnetometer-rm3100-add-boundary-check-for-the-value-read-from-rm3100_reg_tmrc.patch
+ring-buffer-clean-ring_buffer_poll_wait-error-return.patch
+serial-max310x-set-default-value-when-reading-clock-ready-bit.patch
+serial-max310x-improve-crystal-stable-clock-detection.patch
+x86-kconfig-transmeta-crusoe-is-cpu-family-5-not-6.patch
+x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch
+mmc-slot-gpio-allow-non-sleeping-gpio-ro.patch
+alsa-hda-conexant-add-quirk-for-sws-js201d.patch
+nilfs2-fix-data-corruption-in-dsync-block-recovery-for-small-block-sizes.patch
+nilfs2-fix-hang-in-nilfs_lookup_dirty_data_buffers.patch
--- /dev/null
+From f6a1892585cd19e63c4ef2334e26cd536d5b678d Mon Sep 17 00:00:00 2001
+From: Aleksander Mazur <deweloper@wp.pl>
+Date: Tue, 23 Jan 2024 14:43:00 +0100
+Subject: x86/Kconfig: Transmeta Crusoe is CPU family 5, not 6
+
+From: Aleksander Mazur <deweloper@wp.pl>
+
+commit f6a1892585cd19e63c4ef2334e26cd536d5b678d upstream.
+
+The kernel built with MCRUSOE is unbootable on Transmeta Crusoe. It shows
+the following error message:
+
+ This kernel requires an i686 CPU, but only detected an i586 CPU.
+ Unable to boot - please use a kernel appropriate for your CPU.
+
+Remove MCRUSOE from the condition introduced in commit in Fixes, effectively
+changing X86_MINIMUM_CPU_FAMILY back to 5 on that machine, which matches the
+CPU family given by CPUID.
+
+ [ bp: Massage commit message. ]
+
+Fixes: 25d76ac88821 ("x86/Kconfig: Explicitly enumerate i686-class CPUs in Kconfig")
+Signed-off-by: Aleksander Mazur <deweloper@wp.pl>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20240123134309.1117782-1-deweloper@wp.pl
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig.cpu | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -372,7 +372,7 @@ config X86_CMOV
+ config X86_MINIMUM_CPU_FAMILY
+ int
+ default "64" if X86_64
+- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
+ default "5" if X86_32 && X86_CMPXCHG64
+ default "4"
+
--- /dev/null
+From d794734c9bbfe22f86686dc2909c25f5ffe1a572 Mon Sep 17 00:00:00 2001
+From: Steve Wahl <steve.wahl@hpe.com>
+Date: Fri, 26 Jan 2024 10:48:41 -0600
+Subject: x86/mm/ident_map: Use gbpages only where full GB page should be mapped.
+
+From: Steve Wahl <steve.wahl@hpe.com>
+
+commit d794734c9bbfe22f86686dc2909c25f5ffe1a572 upstream.
+
+When ident_pud_init() uses only gbpages to create identity maps, large
+ranges of addresses not actually requested can be included in the
+resulting table; a 4K request will map a full GB. On UV systems, this
+ends up including regions that will cause hardware to halt the system
+if accessed (these are marked "reserved" by BIOS). Even processor
+speculation into these regions is enough to trigger the system halt.
+
+Only use gbpages when map creation requests include the full GB page
+of space. Fall back to using smaller 2M pages when only portions of a
+GB page are included in the request.
+
+No attempt is made to coalesce mapping requests. If a request requires
+a map entry at the 2M (pmd) level, subsequent mapping requests within
+the same 1G region will also be at the pmd level, even if adjacent or
+overlapping such requests could have been combined to map a full
+gbpage. Existing usage starts with larger regions and then adds
+smaller regions, so this should not have any great consequence.
+
+[ dhansen: fix up comment formatting, simplifty changelog ]
+
+Signed-off-by: Steve Wahl <steve.wahl@hpe.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240126164841.170866-1-steve.wahl%40hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ident_map.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_map
+ for (; addr < end; addr = next) {
+ pud_t *pud = pud_page + pud_index(addr);
+ pmd_t *pmd;
++ bool use_gbpage;
+
+ next = (addr & PUD_MASK) + PUD_SIZE;
+ if (next > end)
+ next = end;
+
+- if (info->direct_gbpages) {
+- pud_t pudval;
++ /* if this is already a gbpage, this portion is already mapped */
++ if (pud_large(*pud))
++ continue;
++
++ /* Is using a gbpage allowed? */
++ use_gbpage = info->direct_gbpages;
+
+- if (pud_present(*pud))
+- continue;
++ /* Don't use gbpage if it maps more than the requested region. */
++ /* at the begining: */
++ use_gbpage &= ((addr & ~PUD_MASK) == 0);
++ /* ... or at the end: */
++ use_gbpage &= ((next & ~PUD_MASK) == 0);
++
++ /* Never overwrite existing mappings */
++ use_gbpage &= !pud_present(*pud);
++
++ if (use_gbpage) {
++ pud_t pudval;
+
+- addr &= PUD_MASK;
+ pudval = __pud((addr - info->offset) | info->page_flag);
+ set_pud(pud, pudval);
+ continue;