--- /dev/null
+From f71f6ff8c1f682a1cae4e8d7bdeed9d7f76b8f75 Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Fri, 24 Nov 2023 10:50:56 +0200
+Subject: bus: ti-sysc: Flush posted write only after srst_udelay
+
+From: Tony Lindgren <tony@atomide.com>
+
+commit f71f6ff8c1f682a1cae4e8d7bdeed9d7f76b8f75 upstream.
+
+Commit 34539b442b3b ("bus: ti-sysc: Flush posted write on enable before
+reset") caused a regression reproducable on omap4 duovero where the ISS
+target module can produce interconnect errors on boot. Turns out the
+registers are not accessible until after a delay for devices needing
+a ti,sysc-delay-us value.
+
+Let's fix this by flushing the posted write only after the reset delay.
+We do flushing also for ti,sysc-delay-us using devices as that should
+trigger an interconnect error if the delay is not properly configured.
+
+Let's also add some comments while at it.
+
+Fixes: 34539b442b3b ("bus: ti-sysc: Flush posted write on enable before reset")
+Cc: stable@vger.kernel.org
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/ti-sysc.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+- /* Flush posted write */
++
++ /*
++ * Some devices need a delay before reading registers
++ * after reset. Presumably a srst_udelay is not needed
++ * for devices that use a rstctrl register reset.
++ */
++ if (ddata->cfg.srst_udelay)
++ fsleep(ddata->cfg.srst_udelay);
++
++ /*
++ * Flush posted write. For devices needing srst_udelay
++ * this should trigger an interconnect error if the
++ * srst_udelay value is needed but not configured.
++ */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+- if (ddata->cfg.srst_udelay)
+- fsleep(ddata->cfg.srst_udelay);
+-
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
+
--- /dev/null
+From b86f4b790c998afdbc88fe1aa55cfe89c4068726 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 5 Dec 2023 16:39:16 +0100
+Subject: dm-integrity: don't modify bio's immutable bio_vec in integrity_metadata()
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit b86f4b790c998afdbc88fe1aa55cfe89c4068726 upstream.
+
+__bio_for_each_segment assumes that the first struct bio_vec argument
+doesn't change - it calls "bio_advance_iter_single((bio), &(iter),
+(bvl).bv_len)" to advance the iterator. Unfortunately, the dm-integrity
+code changes the bio_vec with "bv.bv_len -= pos". When this code path
+is taken, the iterator would be out of sync and dm-integrity would
+report errors. This happens if the machine is out of memory and
+"kmalloc" fails.
+
+Fix this bug by making a copy of "bv" and changing the copy instead.
+
+Fixes: 7eada909bfd7 ("dm: add integrity target")
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-integrity.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1765,11 +1765,12 @@ static void integrity_metadata(struct wo
+ sectors_to_process = dio->range.n_sectors;
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++ struct bio_vec bv_copy = bv;
+ unsigned int pos;
+ char *mem, *checksums_ptr;
+
+ again:
+- mem = bvec_kmap_local(&bv);
++ mem = bvec_kmap_local(&bv_copy);
+ pos = 0;
+ checksums_ptr = checksums;
+ do {
+@@ -1778,7 +1779,7 @@ again:
+ sectors_to_process -= ic->sectors_per_block;
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+- } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
++ } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
+ kunmap_local(mem);
+
+ r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+@@ -1803,9 +1804,9 @@ again:
+ if (!sectors_to_process)
+ break;
+
+- if (unlikely(pos < bv.bv_len)) {
+- bv.bv_offset += pos;
+- bv.bv_len -= pos;
++ if (unlikely(pos < bv_copy.bv_len)) {
++ bv_copy.bv_offset += pos;
++ bv_copy.bv_len -= pos;
+ goto again;
+ }
+ }
--- /dev/null
+From 1cc3542c76acb5f59001e3e562eba672f1983355 Mon Sep 17 00:00:00 2001
+From: xiongxin <xiongxin@kylinos.cn>
+Date: Wed, 20 Dec 2023 10:29:01 +0800
+Subject: gpio: dwapb: mask/unmask IRQ when disable/enale it
+
+From: xiongxin <xiongxin@kylinos.cn>
+
+commit 1cc3542c76acb5f59001e3e562eba672f1983355 upstream.
+
+In the hardware implementation of the I2C HID driver based on DesignWare
+GPIO IRQ chip, when the user continues to use the I2C HID device in the
+suspend process, the I2C HID interrupt will be masked after the resume
+process is finished.
+
+This is because the disable_irq()/enable_irq() of the DesignWare GPIO
+driver does not synchronize the IRQ mask register state. In normal use
+of the I2C HID procedure, the GPIO IRQ irq_mask()/irq_unmask() functions
+are called in pairs. In case of an exception, i2c_hid_core_suspend()
+calls disable_irq() to disable the GPIO IRQ. With low probability, this
+causes irq_unmask() to not be called, which causes the GPIO IRQ to be
+masked and not unmasked in enable_irq(), raising an exception.
+
+Add synchronization to the masked register state in the
+dwapb_irq_enable()/dwapb_irq_disable() function. mask the GPIO IRQ
+before disabling it. After enabling the GPIO IRQ, unmask the IRQ.
+
+Fixes: 7779b3455697 ("gpio: add a driver for the Synopsys DesignWare APB GPIO block")
+Cc: stable@kernel.org
+Co-developed-by: Riwen Lu <luriwen@kylinos.cn>
+Signed-off-by: Riwen Lu <luriwen@kylinos.cn>
+Signed-off-by: xiongxin <xiongxin@kylinos.cn>
+Acked-by: Serge Semin <fancer.lancer@gmail.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-dwapb.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpio/gpio-dwapb.c
++++ b/drivers/gpio/gpio-dwapb.c
+@@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val |= BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
++ val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+
+@@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val &= ~BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
++ val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
--- /dev/null
+From d26b9cb33c2d1ba68d1f26bb06c40300f16a3799 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 7 Dec 2023 15:11:58 +0000
+Subject: KVM: arm64: vgic: Add a non-locking primitive for kvm_vgic_vcpu_destroy()
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit d26b9cb33c2d1ba68d1f26bb06c40300f16a3799 upstream.
+
+As we are going to need to call into kvm_vgic_vcpu_destroy() without
+prior holding of the slots_lock, introduce __kvm_vgic_vcpu_destroy()
+as a non-locking primitive of kvm_vgic_vcpu_destroy().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20231207151201.3028710-3-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-init.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct
+ vgic_v4_teardown(kvm);
+ }
+
+-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+@@ -382,6 +382,15 @@ void kvm_vgic_vcpu_destroy(struct kvm_vc
+ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ }
+
++void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++{
++ struct kvm *kvm = vcpu->kvm;
++
++ mutex_lock(&kvm->slots_lock);
++ __kvm_vgic_vcpu_destroy(vcpu);
++ mutex_unlock(&kvm->slots_lock);
++}
++
+ void kvm_vgic_destroy(struct kvm *kvm)
+ {
+ struct kvm_vcpu *vcpu;
+@@ -392,7 +401,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
+ vgic_debug_destroy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+- kvm_vgic_vcpu_destroy(vcpu);
++ __kvm_vgic_vcpu_destroy(vcpu);
+
+ mutex_lock(&kvm->arch.config_lock);
+
--- /dev/null
+From 02e3858f08faabab9503ae2911cf7c7e27702257 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 7 Dec 2023 15:11:59 +0000
+Subject: KVM: arm64: vgic: Force vcpu vgic teardown on vcpu destroy
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 02e3858f08faabab9503ae2911cf7c7e27702257 upstream.
+
+When failing to create a vcpu because (for example) it has a
+duplicate vcpu_id, we destroy the vcpu. Amusingly, this leaves
+the redistributor registered with the KVM_MMIO bus.
+
+This is no good, and we should properly clean the mess. Force
+a teardown of the vgic vcpu interface, including the RD device
+before returning to the caller.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20231207151201.3028710-4-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/arm.c | 2 +-
+ arch/arm64/kvm/vgic/vgic-init.c | 5 ++++-
+ arch/arm64/kvm/vgic/vgic-mmio-v3.c | 2 +-
+ arch/arm64/kvm/vgic/vgic.h | 1 +
+ 4 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -407,7 +407,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vc
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_timer_vcpu_terminate(vcpu);
+ kvm_pmu_vcpu_destroy(vcpu);
+-
++ kvm_vgic_vcpu_destroy(vcpu);
+ kvm_arm_vcpu_destroy(vcpu);
+ }
+
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -379,7 +379,10 @@ static void __kvm_vgic_vcpu_destroy(stru
+ vgic_flush_pending_lpis(vcpu);
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+- vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
++ vgic_unregister_redist_iodev(vcpu);
++ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ }
+ }
+
+ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -820,7 +820,7 @@ out_unlock:
+ return ret;
+ }
+
+-static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(stru
+ int vgic_v3_save_pending_tables(struct kvm *kvm);
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
+ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
--- /dev/null
+From 01ad29d224ff73bc4e16e0ef9ece17f28598c4a4 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 7 Dec 2023 15:11:57 +0000
+Subject: KVM: arm64: vgic: Simplify kvm_vgic_destroy()
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 01ad29d224ff73bc4e16e0ef9ece17f28598c4a4 upstream.
+
+When destroying a vgic, we have rather cumbersome rules about
+when slots_lock and config_lock are held, resulting in fun
+buglets.
+
+The first port of call is to simplify kvm_vgic_map_resources()
+so that there is only one call to kvm_vgic_destroy() instead of
+two, with the second only holding half of the locks.
+
+For that, we kill the non-locking primitive and move the call
+outside of the locking altogether. This doesn't change anything
+(we re-acquire the locks and teardown the whole vgic), and
+simplifies the code significantly.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20231207151201.3028710-2-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-init.c | 29 ++++++++++++++---------------
+ 1 file changed, 14 insertions(+), 15 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -382,26 +382,24 @@ void kvm_vgic_vcpu_destroy(struct kvm_vc
+ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ }
+
+-static void __kvm_vgic_destroy(struct kvm *kvm)
++void kvm_vgic_destroy(struct kvm *kvm)
+ {
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+- lockdep_assert_held(&kvm->arch.config_lock);
++ mutex_lock(&kvm->slots_lock);
+
+ vgic_debug_destroy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vgic_vcpu_destroy(vcpu);
+
++ mutex_lock(&kvm->arch.config_lock);
++
+ kvm_vgic_dist_destroy(kvm);
+-}
+
+-void kvm_vgic_destroy(struct kvm *kvm)
+-{
+- mutex_lock(&kvm->arch.config_lock);
+- __kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->arch.config_lock);
++ mutex_unlock(&kvm->slots_lock);
+ }
+
+ /**
+@@ -469,25 +467,26 @@ int kvm_vgic_map_resources(struct kvm *k
+ type = VGIC_V3;
+ }
+
+- if (ret) {
+- __kvm_vgic_destroy(kvm);
++ if (ret)
+ goto out;
+- }
++
+ dist->ready = true;
+ dist_base = dist->vgic_dist_base;
+ mutex_unlock(&kvm->arch.config_lock);
+
+ ret = vgic_register_dist_iodev(kvm, dist_base, type);
+- if (ret) {
++ if (ret)
+ kvm_err("Unable to register VGIC dist MMIO regions\n");
+- kvm_vgic_destroy(kvm);
+- }
+- mutex_unlock(&kvm->slots_lock);
+- return ret;
+
++ goto out_slots;
+ out:
+ mutex_unlock(&kvm->arch.config_lock);
++out_slots:
+ mutex_unlock(&kvm->slots_lock);
++
++ if (ret)
++ kvm_vgic_destroy(kvm);
++
+ return ret;
+ }
+
--- /dev/null
+From 5c47251e8c4903111608ddcba2a77c0c425c247c Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 14 Nov 2023 16:26:55 +0100
+Subject: lib/vsprintf: Fix %pfwf when current node refcount == 0
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit 5c47251e8c4903111608ddcba2a77c0c425c247c upstream.
+
+A refcount issue can appeared in __fwnode_link_del() due to the
+pr_debug() call:
+ WARNING: CPU: 0 PID: 901 at lib/refcount.c:25 refcount_warn_saturate+0xe5/0x110
+ Call Trace:
+ <TASK>
+ ...
+ of_node_get+0x1e/0x30
+ of_fwnode_get+0x28/0x40
+ fwnode_full_name_string+0x34/0x90
+ fwnode_string+0xdb/0x140
+ ...
+ vsnprintf+0x17b/0x630
+ ...
+ __fwnode_link_del+0x25/0xa0
+ fwnode_links_purge+0x39/0xb0
+ of_node_release+0xd9/0x180
+ ...
+
+Indeed, an fwnode (of_node) is being destroyed and so, of_node_release()
+is called because the of_node refcount reached 0.
+From of_node_release() several function calls are done and lead to
+a pr_debug() calls with %pfwf to print the fwnode full name.
+The issue is not present if we change %pfwf to %pfwP.
+
+To print the full name, %pfwf iterates over the current node and its
+parents and obtain/drop a reference to all nodes involved.
+
+In order to allow to print the full name (%pfwf) of a node while it is
+being destroyed, do not obtain/drop a reference to this current node.
+
+Fixes: a92eb7621b9f ("lib/vsprintf: Make use of fwnode API to obtain node names and separators")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20231114152655.409331-1-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/vsprintf.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2110,15 +2110,20 @@ char *fwnode_full_name_string(struct fwn
+
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+- struct fwnode_handle *__fwnode =
+- fwnode_get_nth_parent(fwnode, depth);
++ /*
++ * Only get a reference for other nodes (i.e. parent nodes).
++ * fwnode refcount may be 0 here.
++ */
++ struct fwnode_handle *__fwnode = depth ?
++ fwnode_get_nth_parent(fwnode, depth) : fwnode;
+
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
+ default_str_spec);
+
+- fwnode_handle_put(__fwnode);
++ if (depth)
++ fwnode_handle_put(__fwnode);
+ }
+
+ return buf;
--- /dev/null
+From 1e37bf84afacd5ba17b7a13a18ca2bc78aff05c0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Fri, 15 Dec 2023 11:13:58 +0000
+Subject: nvmem: brcm_nvram: store a copy of NVRAM content
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+commit 1e37bf84afacd5ba17b7a13a18ca2bc78aff05c0 upstream.
+
+This driver uses MMIO access for reading NVRAM from a flash device.
+Underneath there is a flash controller that reads data and provides
+mapping window.
+
+Using MMIO interface affects controller configuration and may break real
+controller driver. It was reported by multiple users of devices with
+NVRAM stored on NAND.
+
+Modify driver to read & cache NVRAM content during init and use that
+copy to provide NVMEM data when requested. On NAND flashes due to their
+alignment NVRAM partitions can be quite big (1 MiB and more) while
+actual NVRAM content stays quite small (usually 16 to 32 KiB). To avoid
+allocating so much memory check for actual data length.
+
+Link: https://lore.kernel.org/linux-mtd/CACna6rwf3_9QVjYcM+847biTX=K0EoWXuXcSMkJO1Vy_5vmVqA@mail.gmail.com/
+Fixes: 3fef9ed0627a ("nvmem: brcm_nvram: new driver exposing Broadcom's NVRAM")
+Cc: <Stable@vger.kernel.org>
+Cc: Arınç ÜNAL <arinc.unal@arinc9.com>
+Cc: Florian Fainelli <florian.fainelli@broadcom.com>
+Cc: Scott Branden <scott.branden@broadcom.com>
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Acked-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20231215111358.316727-3-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvmem/brcm_nvram.c | 134 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 94 insertions(+), 40 deletions(-)
+
+--- a/drivers/nvmem/brcm_nvram.c
++++ b/drivers/nvmem/brcm_nvram.c
+@@ -17,9 +17,23 @@
+
+ #define NVRAM_MAGIC "FLSH"
+
++/**
++ * struct brcm_nvram - driver state internal struct
++ *
++ * @dev: NVMEM device pointer
++ * @nvmem_size: Size of the whole space available for NVRAM
++ * @data: NVRAM data copy stored to avoid poking underlaying flash controller
++ * @data_len: NVRAM data size
++ * @padding_byte: Padding value used to fill remaining space
++ * @cells: Array of discovered NVMEM cells
++ * @ncells: Number of elements in cells
++ */
+ struct brcm_nvram {
+ struct device *dev;
+- void __iomem *base;
++ size_t nvmem_size;
++ uint8_t *data;
++ size_t data_len;
++ uint8_t padding_byte;
+ struct nvmem_cell_info *cells;
+ int ncells;
+ };
+@@ -36,10 +50,47 @@ static int brcm_nvram_read(void *context
+ size_t bytes)
+ {
+ struct brcm_nvram *priv = context;
+- u8 *dst = val;
++ size_t to_copy;
++
++ if (offset + bytes > priv->data_len)
++ to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0);
++ else
++ to_copy = bytes;
++
++ memcpy(val, priv->data + offset, to_copy);
++
++ memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy);
++
++ return 0;
++}
++
++static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev)
++{
++ struct resource *res;
++ void __iomem *base;
++
++ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ priv->nvmem_size = resource_size(res);
++
++ priv->padding_byte = readb(base + priv->nvmem_size - 1);
++ for (priv->data_len = priv->nvmem_size;
++ priv->data_len;
++ priv->data_len--) {
++ if (readb(base + priv->data_len - 1) != priv->padding_byte)
++ break;
++ }
++ WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len);
+
+- while (bytes--)
+- *dst++ = readb(priv->base + offset++);
++ priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL);
++ if (!priv->data)
++ return -ENOMEM;
++
++ memcpy_fromio(priv->data, base, priv->data_len);
++
++ bcm47xx_nvram_init_from_iomem(base, priv->data_len);
+
+ return 0;
+ }
+@@ -67,8 +118,13 @@ static int brcm_nvram_add_cells(struct b
+ size_t len)
+ {
+ struct device *dev = priv->dev;
+- char *var, *value, *eq;
++ char *var, *value;
++ uint8_t tmp;
+ int idx;
++ int err = 0;
++
++ tmp = priv->data[len - 1];
++ priv->data[len - 1] = '\0';
+
+ priv->ncells = 0;
+ for (var = data + sizeof(struct brcm_nvram_header);
+@@ -78,67 +134,68 @@ static int brcm_nvram_add_cells(struct b
+ }
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+- if (!priv->cells)
+- return -ENOMEM;
++ if (!priv->cells) {
++ err = -ENOMEM;
++ goto out;
++ }
+
+ for (var = data + sizeof(struct brcm_nvram_header), idx = 0;
+ var < (char *)data + len && *var;
+ var = value + strlen(value) + 1, idx++) {
++ char *eq, *name;
++
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
++ name = devm_kstrdup(dev, var, GFP_KERNEL);
++ *eq = '=';
++ if (!name) {
++ err = -ENOMEM;
++ goto out;
++ }
+ value = eq + 1;
+
+- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+- if (!priv->cells[idx].name)
+- return -ENOMEM;
++ priv->cells[idx].name = name;
+ priv->cells[idx].offset = value - (char *)data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+- if (!strcmp(var, "et0macaddr") ||
+- !strcmp(var, "et1macaddr") ||
+- !strcmp(var, "et2macaddr")) {
++ if (!strcmp(name, "et0macaddr") ||
++ !strcmp(name, "et1macaddr") ||
++ !strcmp(name, "et2macaddr")) {
+ priv->cells[idx].raw_len = strlen(value);
+ priv->cells[idx].bytes = ETH_ALEN;
+ priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
+ }
+ }
+
+- return 0;
++out:
++ priv->data[len - 1] = tmp;
++ return err;
+ }
+
+ static int brcm_nvram_parse(struct brcm_nvram *priv)
+ {
++ struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data;
+ struct device *dev = priv->dev;
+- struct brcm_nvram_header header;
+- uint8_t *data;
+ size_t len;
+ int err;
+
+- memcpy_fromio(&header, priv->base, sizeof(header));
+-
+- if (memcmp(header.magic, NVRAM_MAGIC, 4)) {
++ if (memcmp(header->magic, NVRAM_MAGIC, 4)) {
+ dev_err(dev, "Invalid NVRAM magic\n");
+ return -EINVAL;
+ }
+
+- len = le32_to_cpu(header.len);
+-
+- data = kzalloc(len, GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
+-
+- memcpy_fromio(data, priv->base, len);
+- data[len - 1] = '\0';
+-
+- err = brcm_nvram_add_cells(priv, data, len);
+- if (err) {
+- dev_err(dev, "Failed to add cells: %d\n", err);
+- return err;
++ len = le32_to_cpu(header->len);
++ if (len > priv->nvmem_size) {
++ dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len,
++ priv->nvmem_size);
++ return -EINVAL;
+ }
+
+- kfree(data);
++ err = brcm_nvram_add_cells(priv, priv->data, len);
++ if (err)
++ dev_err(dev, "Failed to add cells: %d\n", err);
+
+ return 0;
+ }
+@@ -150,7 +207,6 @@ static int brcm_nvram_probe(struct platf
+ .reg_read = brcm_nvram_read,
+ };
+ struct device *dev = &pdev->dev;
+- struct resource *res;
+ struct brcm_nvram *priv;
+ int err;
+
+@@ -159,21 +215,19 @@ static int brcm_nvram_probe(struct platf
+ return -ENOMEM;
+ priv->dev = dev;
+
+- priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+- if (IS_ERR(priv->base))
+- return PTR_ERR(priv->base);
++ err = brcm_nvram_copy_data(priv, pdev);
++ if (err)
++ return err;
+
+ err = brcm_nvram_parse(priv);
+ if (err)
+ return err;
+
+- bcm47xx_nvram_init_from_iomem(priv->base, resource_size(res));
+-
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+- config.size = resource_size(res);
++ config.size = priv->nvmem_size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+ }
--- /dev/null
+From 5c584f175d32f9cc66c909f851cd905da58b39ea Mon Sep 17 00:00:00 2001
+From: Nam Cao <namcao@linutronix.de>
+Date: Fri, 1 Dec 2023 10:23:29 +0100
+Subject: pinctrl: starfive: jh7100: ignore disabled device tree nodes
+
+From: Nam Cao <namcao@linutronix.de>
+
+commit 5c584f175d32f9cc66c909f851cd905da58b39ea upstream.
+
+The driver always registers pin configurations in device tree. This can
+cause some inconvenience to users, as pin configurations in the base
+device tree cannot be disabled in the device tree overlay, even when the
+relevant devices are not used.
+
+Ignore disabled pin configuration nodes in device tree.
+
+Fixes: ec648f6b7686 ("pinctrl: starfive: Add pinctrl driver for StarFive SoCs")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/r/fe4c15dcc3074412326b8dc296b0cbccf79c49bf.1701422582.git.namcao@linutronix.de
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+@@ -492,7 +492,7 @@ static int starfive_dt_node_to_map(struc
+
+ nmaps = 0;
+ ngroups = 0;
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npinmux = of_property_count_u32_elems(child, "pinmux");
+ int npins = of_property_count_u32_elems(child, "pins");
+
+@@ -527,7 +527,7 @@ static int starfive_dt_node_to_map(struc
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npins;
+ int i;
+
--- /dev/null
+From f6e3b40a2c89c1d832ed9cb031dc9825bbf43b7c Mon Sep 17 00:00:00 2001
+From: Nam Cao <namcao@linutronix.de>
+Date: Fri, 1 Dec 2023 10:23:28 +0100
+Subject: pinctrl: starfive: jh7110: ignore disabled device tree nodes
+
+From: Nam Cao <namcao@linutronix.de>
+
+commit f6e3b40a2c89c1d832ed9cb031dc9825bbf43b7c upstream.
+
+The driver always registers pin configurations in device tree. This can
+cause some inconvenience to users, as pin configurations in the base
+device tree cannot be disabled in the device tree overlay, even when the
+relevant devices are not used.
+
+Ignore disabled pin configuration nodes in device tree.
+
+Fixes: 447976ab62c5 ("pinctrl: starfive: Add StarFive JH7110 sys controller driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/r/fd8bf044799ae50a6291ae150ef87b4f1923cacb.1701422582.git.namcao@linutronix.de
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+@@ -135,7 +135,7 @@ static int jh7110_dt_node_to_map(struct
+ int ret;
+
+ ngroups = 0;
+- for_each_child_of_node(np, child)
++ for_each_available_child_of_node(np, child)
+ ngroups += 1;
+ nmaps = 2 * ngroups;
+
+@@ -150,7 +150,7 @@ static int jh7110_dt_node_to_map(struct
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ int *pins;
+ u32 *pinmux;
--- /dev/null
+From c5becf57dd5659c687d41d623a69f42d63f59eb2 Mon Sep 17 00:00:00 2001
+From: "Martin K. Petersen" <martin.petersen@oracle.com>
+Date: Fri, 8 Dec 2023 12:09:38 -0500
+Subject: Revert "scsi: aacraid: Reply queue mapping to CPUs based on IRQ affinity"
+
+From: Martin K. Petersen <martin.petersen@oracle.com>
+
+commit c5becf57dd5659c687d41d623a69f42d63f59eb2 upstream.
+
+This reverts commit 9dc704dcc09eae7d21b5da0615eb2ed79278f63e.
+
+Several reports have been made indicating that this commit caused
+hangs. Numerous attempts at root causing and fixing the issue have
+been unsuccessful so let's revert for now.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217599
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/aacraid/aacraid.h | 1 -
+ drivers/scsi/aacraid/commsup.c | 6 +-----
+ drivers/scsi/aacraid/linit.c | 14 --------------
+ drivers/scsi/aacraid/src.c | 25 ++-----------------------
+ 4 files changed, 3 insertions(+), 43 deletions(-)
+
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -1678,7 +1678,6 @@ struct aac_dev
+ u32 handle_pci_error;
+ bool init_reset;
+ u8 soft_reset_support;
+- u8 use_map_queue;
+ };
+
+ #define aac_adapter_interrupt(dev) \
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
+ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+ {
+ struct fib *fibptr;
+- u32 blk_tag;
+- int i;
+
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- i = blk_mq_unique_tag_to_tag(blk_tag);
+- fibptr = &dev->fibs[i];
++ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -19,7 +19,6 @@
+
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/completion.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -505,15 +504,6 @@ common_config:
+ return 0;
+ }
+
+-static void aac_map_queues(struct Scsi_Host *shost)
+-{
+- struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+-
+- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+- aac->pdev, 0);
+- aac->use_map_queue = true;
+-}
+-
+ /**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+@@ -1498,7 +1488,6 @@ static const struct scsi_host_template a
+ .bios_param = aac_biosparm,
+ .shost_groups = aac_host_groups,
+ .slave_configure = aac_slave_configure,
+- .map_queues = aac_map_queues,
+ .change_queue_depth = aac_change_queue_depth,
+ .sdev_groups = aac_dev_groups,
+ .eh_abort_handler = aac_eh_abort,
+@@ -1786,8 +1775,6 @@ static int aac_probe_one(struct pci_dev
+ shost->max_lun = AAC_MAX_LUN;
+
+ pci_set_drvdata(pdev, shost);
+- shost->nr_hw_queues = aac->max_msix;
+- shost->host_tagset = 1;
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+@@ -1919,7 +1906,6 @@ static void aac_remove_one(struct pci_de
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ aac_cancel_rescan_worker(aac);
+- aac->use_map_queue = false;
+ scsi_remove_host(shost);
+
+ __aac_shutdown(aac);
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struc
+ #endif
+
+ u16 vector_no;
+- struct scsi_cmnd *scmd;
+- u32 blk_tag;
+- struct Scsi_Host *shost = dev->scsi_host_ptr;
+- struct blk_mq_queue_map *qmap;
+
+ atomic_inc(&q->numpending);
+
+@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struc
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ && dev->sa_firmware)
+ vector_no = aac_get_vector(dev);
+- else {
+- if (!fib->vector_no || !fib->callback_data) {
+- if (shost && dev->use_map_queue) {
+- qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+- vector_no = qmap->mq_map[raw_smp_processor_id()];
+- }
+- /*
+- * We hardcode the vector_no for
+- * reserved commands as a valid shost is
+- * absent during the init
+- */
+- else
+- vector_no = 0;
+- } else {
+- scmd = (struct scsi_cmnd *)fib->callback_data;
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
+- }
+- }
++ else
++ vector_no = fib->vector_no;
+
+ if (native_hba) {
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
--- /dev/null
+From 066c5b46b6eaf2f13f80c19500dbb3b84baabb33 Mon Sep 17 00:00:00 2001
+From: Alexander Atanasov <alexander.atanasov@virtuozzo.com>
+Date: Fri, 15 Dec 2023 14:10:08 +0200
+Subject: scsi: core: Always send batch on reset or error handling command
+
+From: Alexander Atanasov <alexander.atanasov@virtuozzo.com>
+
+commit 066c5b46b6eaf2f13f80c19500dbb3b84baabb33 upstream.
+
+In commit 8930a6c20791 ("scsi: core: add support for request batching") the
+block layer bd->last flag was mapped to SCMD_LAST and used as an indicator
+to send the batch for the drivers that implement this feature. However, the
+error handling code was not updated accordingly.
+
+scsi_send_eh_cmnd() is used to send error handling commands and request
+sense. The problem is that request sense comes as a single command that
+gets into the batch queue and times out. As a result the device goes
+offline after several failed resets. This was observed on virtio_scsi
+during a device resize operation.
+
+[ 496.316946] sd 0:0:4:0: [sdd] tag#117 scsi_eh_0: requesting sense
+[ 506.786356] sd 0:0:4:0: [sdd] tag#117 scsi_send_eh_cmnd timeleft: 0
+[ 506.787981] sd 0:0:4:0: [sdd] tag#117 abort
+
+To fix this always set SCMD_LAST flag in scsi_send_eh_cmnd() and
+scsi_reset_ioctl().
+
+Fixes: 8930a6c20791 ("scsi: core: add support for request batching")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Alexander Atanasov <alexander.atanasov@virtuozzo.com>
+Link: https://lore.kernel.org/r/20231215121008.2881653-1-alexander.atanasov@virtuozzo.com
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/scsi_error.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1152,6 +1152,7 @@ retry:
+
+ scsi_log_send(scmd);
+ scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
++ scmd->flags |= SCMD_LAST;
+
+ /*
+ * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+@@ -2459,6 +2460,7 @@ scsi_ioctl_reset(struct scsi_device *dev
+ scsi_init_command(dev, scmd);
+
+ scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
++ scmd->flags |= SCMD_LAST;
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+
+ scmd->cmd_len = 0;
--- /dev/null
+From c8f021eec5817601dbd25ab7e3ad5c720965c688 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@linux.dev>
+Date: Fri, 15 Dec 2023 17:04:24 +0100
+Subject: selftests: mptcp: join: fix subflow_send_ack lookup
+
+From: Geliang Tang <geliang.tang@linux.dev>
+
+commit c8f021eec5817601dbd25ab7e3ad5c720965c688 upstream.
+
+MPC backups tests will skip unexpected sometimes (For example, when
+compiling kernel with an older version of gcc, such as gcc-8), since
+static functions like mptcp_subflow_send_ack also be listed in
+/proc/kallsyms, with a 't' in front of it, not 'T' ('T' is for a global
+function):
+
+ > grep "mptcp_subflow_send_ack" /proc/kallsyms
+
+ 0000000000000000 T __pfx___mptcp_subflow_send_ack
+ 0000000000000000 T __mptcp_subflow_send_ack
+ 0000000000000000 t __pfx_mptcp_subflow_send_ack
+ 0000000000000000 t mptcp_subflow_send_ack
+
+In this case, mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"
+will be false, MPC backups tests will skip. This is not what we expected.
+
+The correct logic here should be: if mptcp_subflow_send_ack is not a
+global function in /proc/kallsyms, do these MPC backups tests. So a 'T'
+must be added in front of mptcp_subflow_send_ack.
+
+Fixes: 632978f0a961 ("selftests: mptcp: join: skip MPC backups tests if not supported")
+Cc: stable@vger.kernel.org
+Signed-off-by: Geliang Tang <geliang.tang@linux.dev>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2773,7 +2773,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+@@ -2782,7 +2782,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ speed=slow \
+@@ -2792,7 +2792,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+@@ -2801,7 +2801,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ sflags=backup speed=slow \
ring-buffer-fix-32-bit-rb_time_read-race-with-rb_tim.patch
ring-buffer-remove-useless-update-to-write_stamp-in-.patch
ring-buffer-fix-slowpath-of-interrupted-event.patch
+spi-atmel-do-not-cancel-a-transfer-upon-any-signal.patch
+spi-atmel-prevent-spi-transfers-from-being-killed.patch
+spi-atmel-fix-clock-issue-when-using-devices-with-different-polarities.patch
+nvmem-brcm_nvram-store-a-copy-of-nvram-content.patch
+revert-scsi-aacraid-reply-queue-mapping-to-cpus-based-on-irq-affinity.patch
+scsi-core-always-send-batch-on-reset-or-error-handling-command.patch
+tracing-synthetic-disable-events-after-testing-in-synth_event_gen_test_init.patch
+dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch
+selftests-mptcp-join-fix-subflow_send_ack-lookup.patch
+pinctrl-starfive-jh7110-ignore-disabled-device-tree-nodes.patch
+pinctrl-starfive-jh7100-ignore-disabled-device-tree-nodes.patch
+bus-ti-sysc-flush-posted-write-only-after-srst_udelay.patch
+gpio-dwapb-mask-unmask-irq-when-disable-enale-it.patch
+lib-vsprintf-fix-pfwf-when-current-node-refcount-0.patch
+thunderbolt-fix-memory-leak-in-margining_port_remove.patch
+kvm-arm64-vgic-simplify-kvm_vgic_destroy.patch
+kvm-arm64-vgic-add-a-non-locking-primitive-for-kvm_vgic_vcpu_destroy.patch
+kvm-arm64-vgic-force-vcpu-vgic-teardown-on-vcpu-destroy.patch
+x86-alternatives-sync-core-before-enabling-interrupts.patch
+x86-alternatives-disable-interrupts-and-sync-when-optimizing-nops-in-place.patch
+x86-smpboot-64-handle-x2apic-bios-inconsistency-gracefully.patch
--- /dev/null
+From 1ca2761a7734928ffe0678f88789266cf3d05362 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Mon, 27 Nov 2023 10:58:41 +0100
+Subject: spi: atmel: Do not cancel a transfer upon any signal
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 1ca2761a7734928ffe0678f88789266cf3d05362 upstream.
+
+The intended move from wait_for_completion_*() to
+wait_for_completion_interruptible_*() was to allow (very) long spi memory
+transfers to be stopped upon user request instead of freezing the
+machine forever as the timeout value could now be significantly bigger.
+
+However, depending on the user logic, applications can receive many
+signals for their own "internal" purpose and have nothing to do with the
+requested kernel operations, hence interrupting spi transfers upon any
+signal is probably not a wise choice. Instead, let's switch to
+wait_for_completion_killable_*() to only catch the "important"
+signals. This was likely the intended behavior anyway.
+
+Fixes: e0205d6203c2 ("spi: atmel: Prevent false timeouts on long transfers")
+Cc: stable@vger.kernel.org
+Reported-by: Ronald Wahl <ronald.wahl@raritan.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/r/20231127095842.389631-1-miquel.raynal@bootlin.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-atmel.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 6aa8adbe4170..2e8860865af9 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1336,8 +1336,8 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
+ }
+
+ dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
+- ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
+- dma_timeout);
++ ret_timeout = wait_for_completion_killable_timeout(&as->xfer_completion,
++ dma_timeout);
+ if (ret_timeout <= 0) {
+ dev_err(&spi->dev, "spi transfer %s\n",
+ !ret_timeout ? "timeout" : "canceled");
+--
+2.43.0
+
--- /dev/null
+From fc70d643a2f6678cbe0f5c86433c1aeb4d613fcc Mon Sep 17 00:00:00 2001
+From: Louis Chauvet <louis.chauvet@bootlin.com>
+Date: Mon, 4 Dec 2023 16:49:03 +0100
+Subject: spi: atmel: Fix clock issue when using devices with different polarities
+
+From: Louis Chauvet <louis.chauvet@bootlin.com>
+
+commit fc70d643a2f6678cbe0f5c86433c1aeb4d613fcc upstream.
+
+The current Atmel SPI controller driver (v2) behaves incorrectly when
+using two SPI devices with different clock polarities and GPIO CS.
+
+When switching from one device to another, the controller driver first
+enables the CS and then applies whatever configuration suits the targeted
+device (typically, the polarities). The side effect of such order is the
+apparition of a spurious clock edge after enabling the CS when the clock
+polarity needs to be inverted wrt. the previous configuration of the
+controller.
+
+This parasitic clock edge is problematic when the SPI device uses that edge
+for internal processing, which is perfectly legitimate given that its CS
+was asserted. Indeed, devices such as HVS8080 driven by driver gpio-sr in
+the kernel are shift registers and will process this first clock edge to
+perform a first register shift. In this case, the first bit gets lost and
+the whole data block that will later be read by the kernel is all shifted
+by one.
+
+ Current behavior:
+ The actual switching of the clock polarity only occurs after the CS
+ when the controller sends the first message:
+
+ CLK ------------\ /-\ /-\
+ | | | | | . . .
+ \---/ \-/ \
+ CS -----\
+ |
+ \------------------
+
+ ^ ^ ^
+ | | |
+ | | Actual clock of the message sent
+ | |
+ | Change of clock polarity, which occurs with the first
+ | write to the bus. This edge occurs when the CS is
+ | already asserted, and can be interpreted as
+ | the first clock edge by the receiver.
+ |
+ GPIO CS toggle
+
+This issue is specific to this controller because while the SPI core
+performs the operations in the right order, the controller however does
+not. In practice, the controller only applies the clock configuration right
+before the first transmission.
+
+So this is not a problem when using the controller's dedicated CS, as the
+controller does things correctly, but it becomes a problem when you need to
+change the clock polarity and use an external GPIO for the CS.
+
+One possible approach to solve this problem is to send a dummy message
+before actually activating the CS, so that the controller applies the clock
+polarity beforehand.
+
+New behavior:
+
+CLK ------\ /-\ /-\ /-\ /-\
+ | | | ... | | | | ... | |
+ \------/ \- -/ \------/ \- -/ \------
+
+CS -\/-----------------------\
+ || |
+ \/ \---------------------
+ ^ ^ ^ ^ ^
+ | | | | |
+ | | | | Expected clock cycles when
+ | | | | sending the message
+ | | | |
+ | | | Actual GPIO CS activation, occurs inside
+ | | | the driver
+ | | |
+ | | Dummy message, to trigger clock polarity
+ | | reconfiguration. This message is not received and
+ | | processed by the device because CS is low.
+ | |
+ | Change of clock polarity, forced by the dummy message. This
+ | time, the edge is not detected by the receiver.
+ |
+ This small spike in CS activation is due to the fact that the
+ spi-core activates the CS gpio before calling the driver's
+ set_cs callback, which deactivates this gpio again until the
+ clock polarity is correct.
+
+To avoid having to systematically send a dummy packet, the driver keeps
+track of the clock's current polarity. In this way, it only sends the dummy
+packet when necessary, ensuring that the clock will have the correct
+polarity when the CS is toggled.
+
+There could be two hardware problems with this patch:
+1- Maybe the small CS activation peak can confuse SPI devices
+2- If on a design, a single wire is used to select two devices depending
+on its state, the dummy message may disturb them.
+
+Fixes: 5ee36c989831 ("spi: atmel_spi update chipselect handling")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Louis Chauvet <louis.chauvet@bootlin.com>
+Link: https://msgid.link/r/20231204154903.11607-1-louis.chauvet@bootlin.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-atmel.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 81 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -22,6 +22,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/pm_runtime.h>
++#include <linux/iopoll.h>
+ #include <trace/events/spi.h>
+
+ /* SPI register offsets */
+@@ -279,6 +280,7 @@ struct atmel_spi {
+ bool keep_cs;
+
+ u32 fifo_size;
++ bool last_polarity;
+ u8 native_cs_free;
+ u8 native_cs_for_gpio;
+ };
+@@ -292,6 +294,22 @@ struct atmel_spi_device {
+ #define INVALID_DMA_ADDRESS 0xffffffff
+
+ /*
++ * This frequency can be anything supported by the controller, but to avoid
++ * unnecessary delay, the highest possible frequency is chosen.
++ *
++ * This frequency is the highest possible which is not interfering with other
++ * chip select registers (see Note for Serial Clock Bit Rate configuration in
++ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
++ */
++#define DUMMY_MSG_FREQUENCY 0x02
++/*
++ * 8 bits is the minimum data the controller is capable of sending.
++ *
++ * This message can be anything as it should not be treated by any SPI device.
++ */
++#define DUMMY_MSG 0xAA
++
++/*
+ * Version 2 of the SPI controller has
+ * - CR.LASTXFER
+ * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
+@@ -305,6 +323,43 @@ static bool atmel_spi_is_v2(struct atmel
+ }
+
+ /*
++ * Send a dummy message.
++ *
++ * This is sometimes needed when using a CS GPIO to force clock transition when
++ * switching between devices with different polarities.
++ */
++static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
++{
++ u32 status;
++ u32 csr;
++
++ /*
++ * Set a clock frequency to allow sending message on SPI bus.
++ * The frequency here can be anything, but is needed for
++ * the controller to send the data.
++ */
++ csr = spi_readl(as, CSR0 + 4 * chip_select);
++ csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
++ spi_writel(as, CSR0 + 4 * chip_select, csr);
++
++ /*
++ * Read all data coming from SPI bus, needed to be able to send
++ * the message.
++ */
++ spi_readl(as, RDR);
++ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
++ spi_readl(as, RDR);
++ cpu_relax();
++ }
++
++ spi_writel(as, TDR, DUMMY_MSG);
++
++ readl_poll_timeout_atomic(as->regs + SPI_SR, status,
++ (status & SPI_BIT(TXEMPTY)), 1, 1000);
++}
++
++
++/*
+ * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
+ * they assume that spi slave device state will not change on deselect, so
+ * that automagic deselection is OK. ("NPCSx rises if no data is to be
+@@ -320,11 +375,17 @@ static bool atmel_spi_is_v2(struct atmel
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
++ *
++ * When changing the clock polarity, the SPI controller waits for the next
++ * transmission to enforce the default clock state. This may be an issue when
++ * using a GPIO as Chip Select: the clock level is applied only when the first
++ * packet is sent, once the CS has already been asserted. The workaround is to
++ * avoid this by sending a first (dummy) message before toggling the CS state.
+ */
+-
+ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+ {
+ struct atmel_spi_device *asd = spi->controller_state;
++ bool new_polarity;
+ int chip_select;
+ u32 mr;
+
+@@ -353,6 +414,25 @@ static void cs_activate(struct atmel_spi
+ }
+
+ mr = spi_readl(as, MR);
++
++ /*
++ * Ensures the clock polarity is valid before we actually
++ * assert the CS to avoid spurious clock edges to be
++ * processed by the spi devices.
++ */
++ if (spi_get_csgpiod(spi, 0)) {
++ new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
++ if (new_polarity != as->last_polarity) {
++ /*
++ * Need to disable the GPIO before sending the dummy
++ * message because it is already set by the spi core.
++ */
++ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
++ atmel_spi_send_dummy(as, spi, chip_select);
++ as->last_polarity = new_polarity;
++ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
++ }
++ }
+ } else {
+ u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
+ int i;
--- /dev/null
+From 890188d2d7e4ac6c131ba166ca116cb315e752ee Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Tue, 5 Dec 2023 09:31:02 +0100
+Subject: spi: atmel: Prevent spi transfers from being killed
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 890188d2d7e4ac6c131ba166ca116cb315e752ee upstream.
+
+Upstream commit e0205d6203c2 ("spi: atmel: Prevent false timeouts on
+long transfers") has tried to mitigate the problem of getting spi
+transfers canceled because they were lasting too long. On slow buses,
+transfers in the MiB range can take more than one second and thus a
+calculation was added to progressively increment the timeout value. In
+order to not be too problematic from a user point of view (waiting dozen
+of seconds or even minutes), the wait call was turned interruptible.
+
+Turning the wait interruptible was a mistake as what we really wanted to
+do was to be able to kill a transfer. Any signal interrupting our
+transfer would not be suitable at all so a second attempt was made at
+turning the wait killable instead.
+
+Link: https://lore.kernel.org/linux-spi/20231127095842.389631-1-miquel.raynal@bootlin.com/
+
+All being well, it was reported that JFFS2 was showing a splat when
+interrupting a transfer. After some more debate about whether JFFS2
+should be fixed and how, it was also pointed out that the whole
+consistency of the filesystem in case of parallel I/O would be
+compromised. Changing JFFS2 behavior would in theory be possible but
+nobody has the energy and time and knowledge to do this now, so better
+prevent spi transfers to be interrupted by the user.
+
+Partially revert the blamed commit to no longer use the interruptible
+nor the killable variant of wait_for_completion().
+
+Fixes: e0205d6203c2 ("spi: atmel: Prevent false timeouts on long transfers")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Ronald Wahl <ronald.wahl@raritan.com>
+Link: https://lore.kernel.org/r/20231205083102.16946-1-miquel.raynal@bootlin.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-atmel.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1336,12 +1336,10 @@ static int atmel_spi_one_transfer(struct
+ }
+
+ dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
+- ret_timeout = wait_for_completion_killable_timeout(&as->xfer_completion,
+- dma_timeout);
+- if (ret_timeout <= 0) {
+- dev_err(&spi->dev, "spi transfer %s\n",
+- !ret_timeout ? "timeout" : "canceled");
+- as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
++ ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout);
++ if (!ret_timeout) {
++ dev_err(&spi->dev, "spi transfer timeout\n");
++ as->done_status = -EIO;
+ }
+
+ if (as->done_status)
--- /dev/null
+From ac43c9122e4287bbdbe91e980fc2528acb72cc1e Mon Sep 17 00:00:00 2001
+From: Yaxiong Tian <tianyaxiong@kylinos.cn>
+Date: Wed, 22 Nov 2023 16:02:43 +0800
+Subject: thunderbolt: Fix memory leak in margining_port_remove()
+
+From: Yaxiong Tian <tianyaxiong@kylinos.cn>
+
+commit ac43c9122e4287bbdbe91e980fc2528acb72cc1e upstream.
+
+The dentry returned by debugfs_lookup() needs to be released by calling
+dput() which is missing in margining_port_remove(). Fix this by calling
+debugfs_lookup_and_remove() that combines both and avoids the memory leak.
+
+Fixes: d0f1e0c2a699 ("thunderbolt: Add support for receiver lane margining")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yaxiong Tian <tianyaxiong@kylinos.cn>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/debugfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -959,7 +959,7 @@ static void margining_port_remove(struct
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ if (parent)
+- debugfs_remove_recursive(debugfs_lookup("margining", parent));
++ debugfs_lookup_and_remove("margining", parent);
+
+ kfree(port->usb4->margining);
+ port->usb4->margining = NULL;
--- /dev/null
+From 88b30c7f5d27e1594d70dc2bd7199b18f2b57fa9 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 20 Dec 2023 11:15:25 -0500
+Subject: tracing / synthetic: Disable events after testing in synth_event_gen_test_init()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 88b30c7f5d27e1594d70dc2bd7199b18f2b57fa9 upstream.
+
+The synth_event_gen_test module can be built in, if someone wants to run
+the tests at boot up and not have to load them.
+
+The synth_event_gen_test_init() function creates and enables the synthetic
+events and runs its tests.
+
+The synth_event_gen_test_exit() disables the events it created and
+destroys the events.
+
+If the module is builtin, the events are never disabled. The issue is, the
+events should be disable after the tests are run. This could be an issue
+if the rest of the boot up tests are enabled, as they expect the events to
+be in a known state before testing. That known state happens to be
+disabled.
+
+When CONFIG_SYNTH_EVENT_GEN_TEST=y and CONFIG_EVENT_TRACE_STARTUP_TEST=y
+a warning will trigger:
+
+ Running tests on trace events:
+ Testing event create_synth_test:
+ Enabled event during self test!
+ ------------[ cut here ]------------
+ WARNING: CPU: 2 PID: 1 at kernel/trace/trace_events.c:4150 event_trace_self_tests+0x1c2/0x480
+ Modules linked in:
+ CPU: 2 PID: 1 Comm: swapper/0 Not tainted 6.7.0-rc2-test-00031-gb803d7c664d5-dirty #276
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
+ RIP: 0010:event_trace_self_tests+0x1c2/0x480
+ Code: bb e8 a2 ab 5d fc 48 8d 7b 48 e8 f9 3d 99 fc 48 8b 73 48 40 f6 c6 01 0f 84 d6 fe ff ff 48 c7 c7 20 b6 ad bb e8 7f ab 5d fc 90 <0f> 0b 90 48 89 df e8 d3 3d 99 fc 48 8b 1b 4c 39 f3 0f 85 2c ff ff
+ RSP: 0000:ffffc9000001fdc0 EFLAGS: 00010246
+ RAX: 0000000000000029 RBX: ffff88810399ca80 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: ffffffffb9f19478 RDI: ffff88823c734e64
+ RBP: ffff88810399f300 R08: 0000000000000000 R09: fffffbfff79eb32a
+ R10: ffffffffbcf59957 R11: 0000000000000001 R12: ffff888104068090
+ R13: ffffffffbc89f0a0 R14: ffffffffbc8a0f08 R15: 0000000000000078
+ FS: 0000000000000000(0000) GS:ffff88823c700000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000000 CR3: 00000001f6282001 CR4: 0000000000170ef0
+ Call Trace:
+ <TASK>
+ ? __warn+0xa5/0x200
+ ? event_trace_self_tests+0x1c2/0x480
+ ? report_bug+0x1f6/0x220
+ ? handle_bug+0x6f/0x90
+ ? exc_invalid_op+0x17/0x50
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? tracer_preempt_on+0x78/0x1c0
+ ? event_trace_self_tests+0x1c2/0x480
+ ? __pfx_event_trace_self_tests_init+0x10/0x10
+ event_trace_self_tests_init+0x27/0xe0
+ do_one_initcall+0xd6/0x3c0
+ ? __pfx_do_one_initcall+0x10/0x10
+ ? kasan_set_track+0x25/0x30
+ ? rcu_is_watching+0x38/0x60
+ kernel_init_freeable+0x324/0x450
+ ? __pfx_kernel_init+0x10/0x10
+ kernel_init+0x1f/0x1e0
+ ? _raw_spin_unlock_irq+0x33/0x50
+ ret_from_fork+0x34/0x60
+ ? __pfx_kernel_init+0x10/0x10
+ ret_from_fork_asm+0x1b/0x30
+ </TASK>
+
+This is because the synth_event_gen_test_init() left the synthetic events
+that it created enabled. By having it disable them after testing, the
+other selftests will run fine.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231220111525.2f0f49b0@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 9fe41efaca084 ("tracing: Add synth event generation test module")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reported-by: Alexander Graf <graf@amazon.com>
+Tested-by: Alexander Graf <graf@amazon.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/synth_event_gen_test.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/kernel/trace/synth_event_gen_test.c
++++ b/kernel/trace/synth_event_gen_test.c
+@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_i
+
+ ret = test_trace_synth_event();
+ WARN_ON(ret);
++
++ /* Disable when done */
++ trace_array_set_clr_event(gen_synth_test->tr,
++ "synthetic",
++ "gen_synth_test", false);
++ trace_array_set_clr_event(empty_synth_test->tr,
++ "synthetic",
++ "empty_synth_test", false);
++ trace_array_set_clr_event(create_synth_test->tr,
++ "synthetic",
++ "create_synth_test", false);
+ out:
+ return ret;
+ }
--- /dev/null
+From 2dc4196138055eb0340231aecac4d78c2ec2bea5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 7 Dec 2023 20:49:26 +0100
+Subject: x86/alternatives: Disable interrupts and sync when optimizing NOPs in place
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2dc4196138055eb0340231aecac4d78c2ec2bea5 upstream.
+
+apply_alternatives() treats alternatives with the ALT_FLAG_NOT flag set
+special as it optimizes the existing NOPs in place.
+
+Unfortunately, this happens with interrupts enabled and does not provide any
+form of core synchronization.
+
+So an interrupt hitting in the middle of the update and using the affected code
+path will observe a half updated NOP and crash and burn. The following
+3 NOP sequence was observed to expose this crash halfway reliably under QEMU
+ 32bit:
+
+ 0x90 0x90 0x90
+
+which is replaced by the optimized 3 byte NOP:
+
+ 0x8d 0x76 0x00
+
+So an interrupt can observe:
+
+ 1) 0x90 0x90 0x90 nop nop nop
+ 2) 0x8d 0x90 0x90 undefined
+ 3) 0x8d 0x76 0x90 lea -0x70(%esi),%esi
+ 4) 0x8d 0x76 0x00 lea 0x0(%esi),%esi
+
+Where only #1 and #4 are true NOPs. The same problem exists for 64bit obviously.
+
+Disable interrupts around this NOP optimization and invoke sync_core()
+before re-enabling them.
+
+Fixes: 270a69c4485d ("x86/alternative: Support relocations in alternatives")
+Reported-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/ZT6narvE%2BLxX%2B7Be@windriver.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index fd44739828f7..aae7456ece07 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -255,6 +255,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
+ }
+ }
+
++static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ optimize_nops(instr, len);
++ sync_core();
++ local_irq_restore(flags);
++}
++
+ /*
+ * In this context, "source" is where the instructions are placed in the
+ * section .altinstr_replacement, for example during kernel build by the
+@@ -438,7 +448,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ * patch if feature is *NOT* present.
+ */
+ if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
+- optimize_nops(instr, a->instrlen);
++ optimize_nops_inplace(instr, a->instrlen);
+ continue;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 3ea1704a92967834bf0e64ca1205db4680d04048 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 7 Dec 2023 20:49:24 +0100
+Subject: x86/alternatives: Sync core before enabling interrupts
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 3ea1704a92967834bf0e64ca1205db4680d04048 upstream.
+
+text_poke_early() does:
+
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+ local_irq_restore(flags);
+ sync_core();
+
+That's not really correct because the synchronization should happen before
+interrupts are re-enabled to ensure that a pending interrupt observes the
+complete update of the opcodes.
+
+It's not entirely clear whether the interrupt entry provides enough
+serialization already, but moving the sync_core() invocation into interrupt
+disabled region does no harm and is obviously correct.
+
+Fixes: 6fffacb30349 ("x86/alternatives, jumplabel: Use text_poke_early() before mm_init()")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/ZT6narvE%2BLxX%2B7Be@windriver.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1685,8 +1685,8 @@ void __init_or_module text_poke_early(vo
+ } else {
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+- local_irq_restore(flags);
+ sync_core();
++ local_irq_restore(flags);
+
+ /*
+ * Could also do a CLFLUSH here to speed up CPU recovery; but
--- /dev/null
+From 69a7386c1ec25476a0c78ffeb59de08a2a08f495 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Dec 2023 09:58:58 +0100
+Subject: x86/smpboot/64: Handle X2APIC BIOS inconsistency gracefully
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 69a7386c1ec25476a0c78ffeb59de08a2a08f495 upstream.
+
+Chris reported that a Dell PowerEdge T340 system stopped to boot when upgrading
+to a kernel which contains the parallel hotplug changes. Disabling parallel
+hotplug on the kernel command line makes it boot again.
+
+It turns out that the Dell BIOS has x2APIC enabled and the boot CPU comes up in
+X2APIC mode, but the APs come up inconsistently in xAPIC mode.
+
+Parallel hotplug requires that the upcoming CPU reads out its APIC ID from the
+local APIC in order to map it to the Linux CPU number.
+
+In this particular case the readout on the APs uses the MMIO mapped registers
+because the BIOS failed to enable x2APIC mode. That readout results in a page
+fault because the kernel does not have the APIC MMIO space mapped when X2APIC
+mode was enabled by the BIOS on the boot CPU and the kernel switched to X2APIC
+mode early. That page fault can't be handled on the upcoming CPU that early and
+results in a silent boot failure.
+
+If parallel hotplug is disabled the system boots because in that case the APIC
+ID read is not required as the Linux CPU number is provided to the AP in the
+smpboot control word. When the kernel uses x2APIC mode then the APs are
+switched to x2APIC mode too slightly later in the bringup process, but there is
+no reason to do it that late.
+
+Cure the BIOS bogosity by checking in the parallel bootup path whether the
+kernel uses x2APIC mode and if so switching over the APs to x2APIC mode before
+the APIC ID readout.
+
+Fixes: 0c7ffa32dbd6 ("x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it")
+Reported-by: Chris Lindee <chris.lindee@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Chris Lindee <chris.lindee@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/CA%2B2tU59853R49EaU_tyvOZuOTDdcU0RshGyydccp9R1NX9bEeQ@mail.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/head_64.S | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -256,6 +256,22 @@ SYM_INNER_LABEL(secondary_startup_64_no_
+ testl $X2APIC_ENABLE, %eax
+ jnz .Lread_apicid_msr
+
++#ifdef CONFIG_X86_X2APIC
++ /*
++ * If system is in X2APIC mode then MMIO base might not be
++ * mapped causing the MMIO read below to fault. Faults can't
++ * be handled at that point.
++ */
++ cmpl $0, x2apic_mode(%rip)
++ jz .Lread_apicid_mmio
++
++ /* Force the AP into X2APIC mode. */
++ orl $X2APIC_ENABLE, %eax
++ wrmsr
++ jmp .Lread_apicid_msr
++#endif
++
++.Lread_apicid_mmio:
+ /* Read the APIC ID from the fix-mapped MMIO space. */
+ movq apic_mmio_base(%rip), %rcx
+ addq $APIC_ID, %rcx