--- /dev/null
+From c7de2d9bb68a5fc71c25ff96705a80a76c8436eb Mon Sep 17 00:00:00 2001
+From: Edson Juliano Drosdeck <edson.drosdeck@gmail.com>
+Date: Thu, 1 Feb 2024 09:21:14 -0300
+Subject: ALSA: hda/realtek: Enable headset mic on Vaio VJFE-ADL
+
+From: Edson Juliano Drosdeck <edson.drosdeck@gmail.com>
+
+commit c7de2d9bb68a5fc71c25ff96705a80a76c8436eb upstream.
+
+Vaio VJFE-ADL is equipped with ALC269VC, and it needs
+ALC298_FIXUP_SPK_VOLUME quirk to make its headset mic work.
+
+Signed-off-by: Edson Juliano Drosdeck <edson.drosdeck@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20240201122114.30080-1-edson.drosdeck@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8491,6 +8491,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
++ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
--- /dev/null
+From 97830f3c3088638ff90b20dfba2eb4d487bf14d7 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Wed, 31 Jan 2024 21:53:46 +0000
+Subject: binder: signal epoll threads of self-work
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 97830f3c3088638ff90b20dfba2eb4d487bf14d7 upstream.
+
+In (e)poll mode, threads often depend on I/O events to determine when
+data is ready for consumption. Within binder, a thread may initiate a
+command via BINDER_WRITE_READ without a read buffer and then make use
+of epoll_wait() or similar to consume any responses afterwards.
+
+It is then crucial that epoll threads are signaled via wakeup when they
+queue their own work. Otherwise, they risk waiting indefinitely for an
+event leaving their work unhandled. What is worse, subsequent commands
+won't trigger a wakeup either as the thread has pending work.
+
+Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
+Cc: Arve Hjønnevåg <arve@android.com>
+Cc: Martijn Coenen <maco@android.com>
+Cc: Alice Ryhl <aliceryhl@google.com>
+Cc: Steven Moreland <smoreland@google.com>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Link: https://lore.kernel.org/r/20240131215347.1808751-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -840,6 +840,16 @@ binder_enqueue_thread_work_ilocked(struc
+ {
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
+ binder_enqueue_work_ilocked(work, &thread->todo);
++
++ /* (e)poll-based threads require an explicit wakeup signal when
++ * queuing their own work; they rely on these events to consume
++ * messages without I/O block. Without it, threads risk waiting
++ * indefinitely without handling the work.
++ */
++ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
++ thread->pid == current->pid && !thread->process_todo)
++ wake_up_interruptible_sync(&thread->wait);
++
+ thread->process_todo = true;
+ }
+
--- /dev/null
+From 55583e899a5357308274601364741a83e78d6ac4 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Thu, 4 Jan 2024 22:20:33 +0800
+Subject: ext4: fix double-free of blocks due to wrong extents moved_len
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 55583e899a5357308274601364741a83e78d6ac4 upstream.
+
+In ext4_move_extents(), moved_len is only updated when all moves are
+successfully executed, and only discards orig_inode and donor_inode
+preallocations when moved_len is not zero. When the loop fails to exit
+after successfully moving some extents, moved_len is not updated and
+remains at 0, so it does not discard the preallocations.
+
+If the moved extents overlap with the preallocated extents, the
+overlapped extents are freed twice in ext4_mb_release_inode_pa() and
+ext4_process_freed_data() (as described in commit 94d7c16cbbbd ("ext4:
+Fix double-free of blocks with EXT4_IOC_MOVE_EXT")), and bb_free is
+incremented twice. Hence when trim is executed, a zero-division bug is
+triggered in mb_update_avg_fragment_size() because bb_free is not zero
+and bb_fragments is zero.
+
+Therefore, update move_len after each extent move to avoid the issue.
+
+Reported-by: Wei Chen <harperchen1110@gmail.com>
+Reported-by: xingwei lee <xrivendell7@gmail.com>
+Closes: https://lore.kernel.org/r/CAO4mrferzqBUnCag8R3m2zf897ts9UEuhjFQGPtODT92rYyR2Q@mail.gmail.com
+Fixes: fcf6b1b729bc ("ext4: refactor ext4_move_extents code base")
+CC: <stable@vger.kernel.org> # 3.18
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20240104142040.2835097-2-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/move_extent.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -615,6 +615,7 @@ ext4_move_extents(struct file *o_filp, s
+ goto out;
+ o_end = o_start + len;
+
++ *moved_len = 0;
+ while (o_start < o_end) {
+ struct ext4_extent *ex;
+ ext4_lblk_t cur_blk, next_blk;
+@@ -670,7 +671,7 @@ ext4_move_extents(struct file *o_filp, s
+ */
+ ext4_double_up_write_data_sem(orig_inode, donor_inode);
+ /* Swap original branches with new branches */
+- move_extent_per_page(o_filp, donor_inode,
++ *moved_len += move_extent_per_page(o_filp, donor_inode,
+ orig_page_index, donor_page_index,
+ offset_in_page, cur_len,
+ unwritten, &ret);
+@@ -680,9 +681,6 @@ ext4_move_extents(struct file *o_filp, s
+ o_start += cur_len;
+ d_start += cur_len;
+ }
+- *moved_len = o_start - orig_blk;
+- if (*moved_len > len)
+- *moved_len = len;
+
+ out:
+ if (*moved_len) {
--- /dev/null
+From 5f9ab17394f831cb7986ec50900fa37507a127f1 Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Thu, 1 Feb 2024 20:53:18 +0900
+Subject: firewire: core: correct documentation of fw_csr_string() kernel API
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit 5f9ab17394f831cb7986ec50900fa37507a127f1 upstream.
+
+Against its current description, the kernel API can accepts all types of
+directory entries.
+
+This commit corrects the documentation.
+
+Cc: stable@vger.kernel.org
+Fixes: 3c2c58cb33b3 ("firewire: core: fw_csr_string addendum")
+Link: https://lore.kernel.org/r/20240130100409.30128-2-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firewire/core-device.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -100,10 +100,9 @@ static int textual_leaf_to_string(const
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+- * The string is taken from a minimal ASCII text descriptor leaf after
+- * the immediate entry with @key. The string is zero-terminated.
+- * An overlong string is silently truncated such that it and the
+- * zero byte fit into @size.
++ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
++ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
++ * and the zero byte fit into @size.
+ *
+ * Returns strlen(buf) or a negative error code.
+ */
--- /dev/null
+From 792595bab4925aa06532a14dd256db523eb4fa5e Mon Sep 17 00:00:00 2001
+From: "zhili.liu" <zhili.liu@ucas.com.cn>
+Date: Tue, 2 Jan 2024 09:07:11 +0800
+Subject: iio: magnetometer: rm3100: add boundary check for the value read from RM3100_REG_TMRC
+
+From: zhili.liu <zhili.liu@ucas.com.cn>
+
+commit 792595bab4925aa06532a14dd256db523eb4fa5e upstream.
+
+Recently, we encounter kernel crash in function rm3100_common_probe
+caused by out of bound access of array rm3100_samp_rates (because of
+underlying hardware failures). Add boundary check to prevent out of
+bound access.
+
+Fixes: 121354b2eceb ("iio: magnetometer: Add driver support for PNI RM3100")
+Suggested-by: Zhouyi Zhou <zhouzhouyi@gmail.com>
+Signed-off-by: zhili.liu <zhili.liu@ucas.com.cn>
+Link: https://lore.kernel.org/r/1704157631-3814-1-git-send-email-zhouzhouyi@gmail.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/magnetometer/rm3100-core.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/magnetometer/rm3100-core.c
++++ b/drivers/iio/magnetometer/rm3100-core.c
+@@ -539,6 +539,7 @@ int rm3100_common_probe(struct device *d
+ struct rm3100_data *data;
+ unsigned int tmp;
+ int ret;
++ int samp_rate_index;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+@@ -598,9 +599,14 @@ int rm3100_common_probe(struct device *d
+ ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
+ if (ret < 0)
+ return ret;
++
++ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
++ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
++ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
++ return -EINVAL;
++ }
+ /* Initializing max wait time, which is double conversion time. */
+- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
+- * 2;
++ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
+
+ /* Cycle count values may not be what we want. */
+ if ((tmp - RM3100_TMRC_OFFSET) == 0)
--- /dev/null
+From e3a9ee963ad8ba677ca925149812c5932b49af69 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Mon, 12 Feb 2024 19:05:10 -0700
+Subject: kbuild: Fix changing ELF file type for output of gen_btf for big endian
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit e3a9ee963ad8ba677ca925149812c5932b49af69 upstream.
+
+Commit 90ceddcb4950 ("bpf: Support llvm-objcopy for vmlinux BTF")
+changed the ELF type of .btf.vmlinux.bin.o to ET_REL via dd, which works
+fine for little endian platforms:
+
+ 00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
+ -00000010 03 00 b7 00 01 00 00 00 00 00 00 80 00 80 ff ff |................|
+ +00000010 01 00 b7 00 01 00 00 00 00 00 00 80 00 80 ff ff |................|
+
+However, for big endian platforms, it changes the wrong byte, resulting
+in an invalid ELF file type, which ld.lld rejects:
+
+ 00000000 7f 45 4c 46 02 02 01 00 00 00 00 00 00 00 00 00 |.ELF............|
+ -00000010 00 03 00 16 00 00 00 01 00 00 00 00 00 10 00 00 |................|
+ +00000010 01 03 00 16 00 00 00 01 00 00 00 00 00 10 00 00 |................|
+
+ Type: <unknown>: 103
+
+ ld.lld: error: .btf.vmlinux.bin.o: unknown file type
+
+Fix this by updating the entire 16-bit e_type field rather than just a
+single byte, so that everything works correctly for all platforms and
+linkers.
+
+ 00000000 7f 45 4c 46 02 02 01 00 00 00 00 00 00 00 00 00 |.ELF............|
+ -00000010 00 03 00 16 00 00 00 01 00 00 00 00 00 10 00 00 |................|
+ +00000010 00 01 00 16 00 00 00 01 00 00 00 00 00 10 00 00 |................|
+
+ Type: REL (Relocatable file)
+
+While in the area, update the comment to mention that binutils 2.35+
+matches LLD's behavior of rejecting an ET_EXEC input, which occurred
+after the comment was added.
+
+Cc: stable@vger.kernel.org
+Fixes: 90ceddcb4950 ("bpf: Support llvm-objcopy for vmlinux BTF")
+Link: https://github.com/llvm/llvm-project/pull/75643
+Suggested-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Fangrui Song <maskray@google.com>
+Reviewed-by: Nicolas Schier <nicolas@fjasle.eu>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Justin Stitt <justinstitt@google.com>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/link-vmlinux.sh | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -138,8 +138,13 @@ gen_btf()
+ ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
+ --strip-all ${1} ${2} 2>/dev/null
+ # Change e_type to ET_REL so that it can be used to link final vmlinux.
+- # Unlike GNU ld, lld does not allow an ET_EXEC input.
+- printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
++ # GNU ld 2.35+ and lld do not allow an ET_EXEC input.
++ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
++ et_rel='\0\1'
++ else
++ et_rel='\1\0'
++ fi
++ printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ }
+
+ # Create ${2} .o file with all symbols from the ${1} object file
--- /dev/null
+From a4e61de63e34860c36a71d1a364edba16fb6203b Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Mon, 8 Jan 2024 17:18:33 +0530
+Subject: misc: fastrpc: Mark all sessions as invalid in cb_remove
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit a4e61de63e34860c36a71d1a364edba16fb6203b upstream.
+
+In remoteproc shutdown sequence, rpmsg_remove will get called which
+would depopulate all the child nodes that have been created during
+rpmsg_probe. This would result in cb_remove call for all the context
+banks for the remoteproc. In cb_remove function, session 0 is
+getting skipped which is not correct as session 0 will never become
+available again. Add changes to mark session 0 also as invalid.
+
+Fixes: f6f9279f2bf0 ("misc: fastrpc: Add Qualcomm fastrpc basic driver model")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Link: https://lore.kernel.org/r/20240108114833.20480-1-quic_ekangupt@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1401,7 +1401,7 @@ static int fastrpc_cb_remove(struct plat
+ int i;
+
+ spin_lock_irqsave(&cctx->lock, flags);
+- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
++ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
+ if (cctx->session[i].sid == sess->sid) {
+ cctx->session[i].valid = false;
+ cctx->sesscount--;
--- /dev/null
+From bfb007aebe6bff451f7f3a4be19f4f286d0d5d9c Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Thu, 25 Jan 2024 12:53:09 +0300
+Subject: nfc: nci: free rx_data_reassembly skb on NCI device cleanup
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit bfb007aebe6bff451f7f3a4be19f4f286d0d5d9c upstream.
+
+rx_data_reassembly skb is stored during NCI data exchange for processing
+fragmented packets. It is dropped only when the last fragment is processed
+or when an NTF packet with NCI_OP_RF_DEACTIVATE_NTF opcode is received.
+However, the NCI device may be deallocated before that which leads to skb
+leak.
+
+As by design the rx_data_reassembly skb is bound to the NCI device and
+nothing prevents the device to be freed before the skb is processed in
+some way and cleaned, free it on the NCI device cleanup.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+6b7c68d9c21e4ee4251b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/lkml/000000000000f43987060043da7b@google.com/
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/nci/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1197,6 +1197,10 @@ void nci_free_device(struct nci_dev *nde
+ {
+ nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
++
++ /* drop partial rx data packet if present */
++ if (ndev->rx_data_reassembly)
++ kfree_skb(ndev->rx_data_reassembly);
+ kfree(ndev);
+ }
+ EXPORT_SYMBOL(nci_free_device);
--- /dev/null
+From 977fe773dcc7098d8eaf4ee6382cb51e13e784cb Mon Sep 17 00:00:00 2001
+From: Lee Duncan <lduncan@suse.com>
+Date: Fri, 9 Feb 2024 10:07:34 -0800
+Subject: scsi: Revert "scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock"
+
+From: Lee Duncan <lduncan@suse.com>
+
+commit 977fe773dcc7098d8eaf4ee6382cb51e13e784cb upstream.
+
+This reverts commit 1a1975551943f681772720f639ff42fbaa746212.
+
+This commit causes interrupts to be lost for FCoE devices, since it changed
+sping locks from "bh" to "irqsave".
+
+Instead, a work queue should be used, and will be addressed in a separate
+commit.
+
+Fixes: 1a1975551943 ("scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock")
+Signed-off-by: Lee Duncan <lduncan@suse.com>
+Link: https://lore.kernel.org/r/c578cdcd46b60470535c4c4a953e6a1feca0dffd.1707500786.git.lduncan@suse.com
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/fcoe/fcoe_ctlr.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -318,17 +318,16 @@ static void fcoe_ctlr_announce(struct fc
+ {
+ struct fcoe_fcf *sel;
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = NULL;
+ list_for_each_entry(fcf, &fip->fcfs, list)
+ fcf->flogi_sent = 0;
+
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ sel = fip->sel_fcf;
+
+ if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -698,7 +697,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr
+ {
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+- unsigned long flags;
+ u16 old_xid;
+ u8 op;
+ u8 mac[ETH_ALEN];
+@@ -732,11 +730,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr
+ op = FIP_DT_FLOGI;
+ if (fip->mode == FIP_MODE_VN2VN)
+ break;
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = skb;
+ fip->flogi_req_send = 1;
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ schedule_work(&fip->timer_work);
+ return -EINPROGRESS;
+ case ELS_FDISC:
+@@ -1713,11 +1711,10 @@ static int fcoe_ctlr_flogi_send_locked(s
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+ int error;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ fcf = fcoe_ctlr_select(fip);
+ if (!fcf || fcf->flogi_sent) {
+@@ -1728,7 +1725,7 @@ static int fcoe_ctlr_flogi_retry(struct
+ fcoe_ctlr_solicit(fip, NULL);
+ error = fcoe_ctlr_flogi_send_locked(fip);
+ }
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ mutex_unlock(&fip->ctlr_mutex);
+ return error;
+ }
+@@ -1745,9 +1742,8 @@ static int fcoe_ctlr_flogi_retry(struct
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ fcf = fip->sel_fcf;
+ if (!fcf || !fip->flogi_req_send)
+ goto unlock;
+@@ -1774,7 +1770,7 @@ static void fcoe_ctlr_flogi_send(struct
+ } else /* XXX */
+ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ }
+
+ /**
usb-f_mass_storage-forbid-async-queue-when-shutdown-happen.patch
i2c-i801-remove-i801_set_block_buffer_mode.patch
i2c-i801-fix-block-process-call-transactions.patch
+scsi-revert-scsi-fcoe-fix-potential-deadlock-on-fip-ctlr_lock.patch
+firewire-core-correct-documentation-of-fw_csr_string-kernel-api.patch
+kbuild-fix-changing-elf-file-type-for-output-of-gen_btf-for-big-endian.patch
+nfc-nci-free-rx_data_reassembly-skb-on-nci-device-cleanup.patch
+xen-netback-properly-sync-tx-responses.patch
+alsa-hda-realtek-enable-headset-mic-on-vaio-vjfe-adl.patch
+binder-signal-epoll-threads-of-self-work.patch
+misc-fastrpc-mark-all-sessions-as-invalid-in-cb_remove.patch
+ext4-fix-double-free-of-blocks-due-to-wrong-extents-moved_len.patch
+tracing-fix-wasted-memory-in-saved_cmdlines-logic.patch
+staging-iio-ad5933-fix-type-mismatch-regression.patch
+iio-magnetometer-rm3100-add-boundary-check-for-the-value-read-from-rm3100_reg_tmrc.patch
--- /dev/null
+From 6db053cd949fcd6254cea9f2cd5d39f7bd64379c Mon Sep 17 00:00:00 2001
+From: David Schiller <david.schiller@jku.at>
+Date: Mon, 22 Jan 2024 14:49:17 +0100
+Subject: staging: iio: ad5933: fix type mismatch regression
+
+From: David Schiller <david.schiller@jku.at>
+
+commit 6db053cd949fcd6254cea9f2cd5d39f7bd64379c upstream.
+
+Commit 4c3577db3e4f ("Staging: iio: impedance-analyzer: Fix sparse
+warning") fixed a compiler warning, but introduced a bug that resulted
+in one of the two 16 bit IIO channels always being zero (when both are
+enabled).
+
+This is because int is 32 bits wide on most architectures and in the
+case of a little-endian machine the two most significant bytes would
+occupy the buffer for the second channel as 'val' is being passed as a
+void pointer to 'iio_push_to_buffers()'.
+
+Fix by defining 'val' as u16. Tested working on ARM64.
+
+Fixes: 4c3577db3e4f ("Staging: iio: impedance-analyzer: Fix sparse warning")
+Signed-off-by: David Schiller <david.schiller@jku.at>
+Link: https://lore.kernel.org/r/20240122134916.2137957-1-david.schiller@jku.at
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/iio/impedance-analyzer/ad5933.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -624,7 +624,7 @@ static void ad5933_work(struct work_stru
+ struct ad5933_state, work.work);
+ struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ __be16 buf[2];
+- int val[2];
++ u16 val[2];
+ unsigned char status;
+ int ret;
+
--- /dev/null
+From 44dc5c41b5b1267d4dd037d26afc0c4d3a568acb Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Fri, 9 Feb 2024 06:36:22 -0500
+Subject: tracing: Fix wasted memory in saved_cmdlines logic
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 44dc5c41b5b1267d4dd037d26afc0c4d3a568acb upstream.
+
+While looking at improving the saved_cmdlines cache I found a huge amount
+of wasted memory that should be used for the cmdlines.
+
+The tracing data saves pids during the trace. At sched switch, if a trace
+occurred, it will save the comm of the task that did the trace. This is
+saved in a "cache" that maps pids to comms and exposed to user space via
+the /sys/kernel/tracing/saved_cmdlines file. Currently it only caches by
+default 128 comms.
+
+The structure that uses this creates an array to store the pids using
+PID_MAX_DEFAULT (which is usually set to 32768). This causes the structure
+to be of the size of 131104 bytes on 64 bit machines.
+
+In hex: 131104 = 0x20020, and since the kernel allocates generic memory in
+powers of two, the kernel would allocate 0x40000 or 262144 bytes to store
+this structure. That leaves 131040 bytes of wasted space.
+
+Worse, the structure points to an allocated array to store the comm names,
+which is 16 bytes times the amount of names to save (currently 128), which
+is 2048 bytes. Instead of allocating a separate array, make the structure
+end with a variable length string and use the extra space for that.
+
+This is similar to a recommendation that Linus had made about eventfs_inode names:
+
+ https://lore.kernel.org/all/20240130190355.11486-5-torvalds@linux-foundation.org/
+
+Instead of allocating a separate string array to hold the saved comms,
+have the structure end with: char saved_cmdlines[]; and round up to the
+next power of two over sizeof(struct saved_cmdline_buffers) + num_cmdlines * TASK_COMM_LEN
+It will use this extra space for the saved_cmdline portion.
+
+Now, instead of saving only 128 comms by default, by using this wasted
+space at the end of the structure it can save over 8000 comms and even
+saves space by removing the need for allocating the other array.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240209063622.1f7b6d5f@rorschach.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Vincent Donnefort <vdonnefort@google.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Mete Durlu <meted@linux.ibm.com>
+Fixes: 939c7a4f04fcd ("tracing: Introduce saved_cmdlines_size file")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 75 +++++++++++++++++++++++++--------------------------
+ 1 file changed, 37 insertions(+), 38 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1977,7 +1977,7 @@ struct saved_cmdlines_buffer {
+ unsigned *map_cmdline_to_pid;
+ unsigned cmdline_num;
+ int cmdline_idx;
+- char *saved_cmdlines;
++ char saved_cmdlines[];
+ };
+ static struct saved_cmdlines_buffer *savedcmd;
+
+@@ -1991,47 +1991,58 @@ static inline void set_cmdline(int idx,
+ strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
+ }
+
+-static int allocate_cmdlines_buffer(unsigned int val,
+- struct saved_cmdlines_buffer *s)
++static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+ {
++ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
++
++ kfree(s->map_cmdline_to_pid);
++ free_pages((unsigned long)s, order);
++}
++
++static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
++{
++ struct saved_cmdlines_buffer *s;
++ struct page *page;
++ int orig_size, size;
++ int order;
++
++ /* Figure out how much is needed to hold the given number of cmdlines */
++ orig_size = sizeof(*s) + val * TASK_COMM_LEN;
++ order = get_order(orig_size);
++ size = 1 << (order + PAGE_SHIFT);
++ page = alloc_pages(GFP_KERNEL, order);
++ if (!page)
++ return NULL;
++
++ s = page_address(page);
++ memset(s, 0, sizeof(*s));
++
++ /* Round up to actual allocation */
++ val = (size - sizeof(*s)) / TASK_COMM_LEN;
++ s->cmdline_num = val;
++
+ s->map_cmdline_to_pid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_pid),
+ GFP_KERNEL);
+- if (!s->map_cmdline_to_pid)
+- return -ENOMEM;
+-
+- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
+- if (!s->saved_cmdlines) {
+- kfree(s->map_cmdline_to_pid);
+- return -ENOMEM;
++ if (!s->map_cmdline_to_pid) {
++ free_saved_cmdlines_buffer(s);
++ return NULL;
+ }
+
+ s->cmdline_idx = 0;
+- s->cmdline_num = val;
+ memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
+ sizeof(s->map_pid_to_cmdline));
+ memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
+ val * sizeof(*s->map_cmdline_to_pid));
+
+- return 0;
++ return s;
+ }
+
+ static int trace_create_savedcmd(void)
+ {
+- int ret;
+-
+- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
+- if (!savedcmd)
+- return -ENOMEM;
++ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
+
+- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
+- if (ret < 0) {
+- kfree(savedcmd);
+- savedcmd = NULL;
+- return -ENOMEM;
+- }
+-
+- return 0;
++ return savedcmd ? 0 : -ENOMEM;
+ }
+
+ int is_tracing_stopped(void)
+@@ -5283,26 +5294,14 @@ tracing_saved_cmdlines_size_read(struct
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ }
+
+-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+-{
+- kfree(s->saved_cmdlines);
+- kfree(s->map_cmdline_to_pid);
+- kfree(s);
+-}
+-
+ static int tracing_resize_saved_cmdlines(unsigned int val)
+ {
+ struct saved_cmdlines_buffer *s, *savedcmd_temp;
+
+- s = kmalloc(sizeof(*s), GFP_KERNEL);
++ s = allocate_cmdlines_buffer(val);
+ if (!s)
+ return -ENOMEM;
+
+- if (allocate_cmdlines_buffer(val, s) < 0) {
+- kfree(s);
+- return -ENOMEM;
+- }
+-
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+ savedcmd_temp = savedcmd;
--- /dev/null
+From 7b55984c96ffe9e236eb9c82a2196e0b1f84990d Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Mon, 29 Jan 2024 14:03:08 +0100
+Subject: xen-netback: properly sync TX responses
+
+From: Jan Beulich <jbeulich@suse.com>
+
+commit 7b55984c96ffe9e236eb9c82a2196e0b1f84990d upstream.
+
+Invoking the make_tx_response() / push_tx_responses() pair with no lock
+held would be acceptable only if all such invocations happened from the
+same context (NAPI instance or dealloc thread). Since this isn't the
+case, and since the interface "spec" also doesn't demand that multicast
+operations may only be performed with no in-flight transmits,
+MCAST_{ADD,DEL} processing also needs to acquire the response lock
+around the invocations.
+
+To prevent similar mistakes going forward, "downgrade" the present
+functions to private helpers of just the two remaining ones using them
+directly, with no forward declarations anymore. This involves renaming
+what so far was make_tx_response(), for the new function of that name
+to serve the new (wrapper) purpose.
+
+While there,
+- constify the txp parameters,
+- correct xenvif_idx_release()'s status parameter's type,
+- rename {,_}make_tx_response()'s status parameters for consistency with
+ xenvif_idx_release()'s.
+
+Fixes: 210c34dcd8d9 ("xen-netback: add support for multicast control")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Link: https://lore.kernel.org/r/980c6c3d-e10e-4459-8565-e8fbde122f00@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c | 84 ++++++++++++++++++--------------------
+ 1 file changed, 40 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -97,13 +97,12 @@ module_param_named(hash_cache_size, xenv
+ MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
+
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status);
++ s8 status);
+
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st);
+-static void push_tx_responses(struct xenvif_queue *queue);
++ s8 status);
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+
+@@ -201,13 +200,9 @@ static void xenvif_tx_err(struct xenvif_
+ unsigned int extra_count, RING_IDX end)
+ {
+ RING_IDX cons = queue->tx.req_cons;
+- unsigned long flags;
+
+ do {
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (cons == end)
+ break;
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
+@@ -458,12 +453,7 @@ static void xenvif_get_requests(struct x
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock, flags);
+ ++txp;
+ continue;
+ }
+@@ -489,14 +479,8 @@ static void xenvif_get_requests(struct x
+
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock,
+- flags);
+ continue;
+ }
+
+@@ -990,7 +974,6 @@ static void xenvif_tx_build_gops(struct
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -1002,7 +985,6 @@ static void xenvif_tx_build_gops(struct
+
+ make_tx_response(queue, &txreq, extra_count,
+ XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -1437,8 +1419,35 @@ int xenvif_tx_action(struct xenvif_queue
+ return work_done;
+ }
+
++static void _make_tx_response(struct xenvif_queue *queue,
++ const struct xen_netif_tx_request *txp,
++ unsigned int extra_count,
++ s8 status)
++{
++ RING_IDX i = queue->tx.rsp_prod_pvt;
++ struct xen_netif_tx_response *resp;
++
++ resp = RING_GET_RESPONSE(&queue->tx, i);
++ resp->id = txp->id;
++ resp->status = status;
++
++ while (extra_count-- != 0)
++ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++
++ queue->tx.rsp_prod_pvt = ++i;
++}
++
++static void push_tx_responses(struct xenvif_queue *queue)
++{
++ int notify;
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
++ if (notify)
++ notify_remote_via_irq(queue->tx_irq);
++}
++
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status)
++ s8 status)
+ {
+ struct pending_tx_info *pending_tx_info;
+ pending_ring_idx_t index;
+@@ -1448,8 +1457,8 @@ static void xenvif_idx_release(struct xe
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+
+- make_tx_response(queue, &pending_tx_info->req,
+- pending_tx_info->extra_count, status);
++ _make_tx_response(queue, &pending_tx_info->req,
++ pending_tx_info->extra_count, status);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+@@ -1463,32 +1472,19 @@ static void xenvif_idx_release(struct xe
+ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+-
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st)
++ s8 status)
+ {
+- RING_IDX i = queue->tx.rsp_prod_pvt;
+- struct xen_netif_tx_response *resp;
+-
+- resp = RING_GET_RESPONSE(&queue->tx, i);
+- resp->id = txp->id;
+- resp->status = st;
+-
+- while (extra_count-- != 0)
+- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++ unsigned long flags;
+
+- queue->tx.rsp_prod_pvt = ++i;
+-}
++ spin_lock_irqsave(&queue->response_lock, flags);
+
+-static void push_tx_responses(struct xenvif_queue *queue)
+-{
+- int notify;
++ _make_tx_response(queue, txp, extra_count, status);
++ push_tx_responses(queue);
+
+- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+- if (notify)
+- notify_remote_via_irq(queue->tx_irq);
++ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)