--- /dev/null
+From 97830f3c3088638ff90b20dfba2eb4d487bf14d7 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Wed, 31 Jan 2024 21:53:46 +0000
+Subject: binder: signal epoll threads of self-work
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 97830f3c3088638ff90b20dfba2eb4d487bf14d7 upstream.
+
+In (e)poll mode, threads often depend on I/O events to determine when
+data is ready for consumption. Within binder, a thread may initiate a
+command via BINDER_WRITE_READ without a read buffer and then make use
+of epoll_wait() or similar to consume any responses afterwards.
+
+It is then crucial that epoll threads are signaled via wakeup when they
+queue their own work. Otherwise, they risk waiting indefinitely for an
+event leaving their work unhandled. What is worse, subsequent commands
+won't trigger a wakeup either as the thread has pending work.
+
+Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
+Cc: Arve Hjønnevåg <arve@android.com>
+Cc: Martijn Coenen <maco@android.com>
+Cc: Alice Ryhl <aliceryhl@google.com>
+Cc: Steven Moreland <smoreland@google.com>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Link: https://lore.kernel.org/r/20240131215347.1808751-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -846,6 +846,16 @@ binder_enqueue_thread_work_ilocked(struc
+ {
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
+ binder_enqueue_work_ilocked(work, &thread->todo);
++
++ /* (e)poll-based threads require an explicit wakeup signal when
++ * queuing their own work; they rely on these events to consume
++ * messages without I/O block. Without it, threads risk waiting
++ * indefinitely without handling the work.
++ */
++ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
++ thread->pid == current->pid && !thread->process_todo)
++ wake_up_interruptible_sync(&thread->wait);
++
+ thread->process_todo = true;
+ }
+
--- /dev/null
+From 55583e899a5357308274601364741a83e78d6ac4 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Thu, 4 Jan 2024 22:20:33 +0800
+Subject: ext4: fix double-free of blocks due to wrong extents moved_len
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 55583e899a5357308274601364741a83e78d6ac4 upstream.
+
+In ext4_move_extents(), moved_len is only updated when all moves are
+successfully executed, and only discards orig_inode and donor_inode
+preallocations when moved_len is not zero. When the loop fails to exit
+after successfully moving some extents, moved_len is not updated and
+remains at 0, so it does not discard the preallocations.
+
+If the moved extents overlap with the preallocated extents, the
+overlapped extents are freed twice in ext4_mb_release_inode_pa() and
+ext4_process_freed_data() (as described in commit 94d7c16cbbbd ("ext4:
+Fix double-free of blocks with EXT4_IOC_MOVE_EXT")), and bb_free is
+incremented twice. Hence when trim is executed, a zero-division bug is
+triggered in mb_update_avg_fragment_size() because bb_free is not zero
+and bb_fragments is zero.
+
+Therefore, update move_len after each extent move to avoid the issue.
+
+Reported-by: Wei Chen <harperchen1110@gmail.com>
+Reported-by: xingwei lee <xrivendell7@gmail.com>
+Closes: https://lore.kernel.org/r/CAO4mrferzqBUnCag8R3m2zf897ts9UEuhjFQGPtODT92rYyR2Q@mail.gmail.com
+Fixes: fcf6b1b729bc ("ext4: refactor ext4_move_extents code base")
+CC: <stable@vger.kernel.org> # 3.18
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20240104142040.2835097-2-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/move_extent.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -616,6 +616,7 @@ ext4_move_extents(struct file *o_filp, s
+ goto out;
+ o_end = o_start + len;
+
++ *moved_len = 0;
+ while (o_start < o_end) {
+ struct ext4_extent *ex;
+ ext4_lblk_t cur_blk, next_blk;
+@@ -671,7 +672,7 @@ ext4_move_extents(struct file *o_filp, s
+ */
+ ext4_double_up_write_data_sem(orig_inode, donor_inode);
+ /* Swap original branches with new branches */
+- move_extent_per_page(o_filp, donor_inode,
++ *moved_len += move_extent_per_page(o_filp, donor_inode,
+ orig_page_index, donor_page_index,
+ offset_in_page, cur_len,
+ unwritten, &ret);
+@@ -681,9 +682,6 @@ ext4_move_extents(struct file *o_filp, s
+ o_start += cur_len;
+ d_start += cur_len;
+ }
+- *moved_len = o_start - orig_blk;
+- if (*moved_len > len)
+- *moved_len = len;
+
+ out:
+ if (*moved_len) {
--- /dev/null
+From 5f9ab17394f831cb7986ec50900fa37507a127f1 Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Thu, 1 Feb 2024 20:53:18 +0900
+Subject: firewire: core: correct documentation of fw_csr_string() kernel API
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit 5f9ab17394f831cb7986ec50900fa37507a127f1 upstream.
+
+Against its current description, the kernel API can accepts all types of
+directory entries.
+
+This commit corrects the documentation.
+
+Cc: stable@vger.kernel.org
+Fixes: 3c2c58cb33b3 ("firewire: core: fw_csr_string addendum")
+Link: https://lore.kernel.org/r/20240130100409.30128-2-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firewire/core-device.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -113,10 +113,9 @@ static int textual_leaf_to_string(const
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+- * The string is taken from a minimal ASCII text descriptor leaf after
+- * the immediate entry with @key. The string is zero-terminated.
+- * An overlong string is silently truncated such that it and the
+- * zero byte fit into @size.
++ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
++ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
++ * and the zero byte fit into @size.
+ *
+ * Returns strlen(buf) or a negative error code.
+ */
--- /dev/null
+From bfb007aebe6bff451f7f3a4be19f4f286d0d5d9c Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Thu, 25 Jan 2024 12:53:09 +0300
+Subject: nfc: nci: free rx_data_reassembly skb on NCI device cleanup
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit bfb007aebe6bff451f7f3a4be19f4f286d0d5d9c upstream.
+
+rx_data_reassembly skb is stored during NCI data exchange for processing
+fragmented packets. It is dropped only when the last fragment is processed
+or when an NTF packet with NCI_OP_RF_DEACTIVATE_NTF opcode is received.
+However, the NCI device may be deallocated before that which leads to skb
+leak.
+
+As by design the rx_data_reassembly skb is bound to the NCI device and
+nothing prevents the device to be freed before the skb is processed in
+some way and cleaned, free it on the NCI device cleanup.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+6b7c68d9c21e4ee4251b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/lkml/000000000000f43987060043da7b@google.com/
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/nci/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1209,6 +1209,10 @@ void nci_free_device(struct nci_dev *nde
+ {
+ nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
++
++ /* drop partial rx data packet if present */
++ if (ndev->rx_data_reassembly)
++ kfree_skb(ndev->rx_data_reassembly);
+ kfree(ndev);
+ }
+ EXPORT_SYMBOL(nci_free_device);
--- /dev/null
+From 977fe773dcc7098d8eaf4ee6382cb51e13e784cb Mon Sep 17 00:00:00 2001
+From: Lee Duncan <lduncan@suse.com>
+Date: Fri, 9 Feb 2024 10:07:34 -0800
+Subject: scsi: Revert "scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock"
+
+From: Lee Duncan <lduncan@suse.com>
+
+commit 977fe773dcc7098d8eaf4ee6382cb51e13e784cb upstream.
+
+This reverts commit 1a1975551943f681772720f639ff42fbaa746212.
+
+This commit causes interrupts to be lost for FCoE devices, since it changed
+sping locks from "bh" to "irqsave".
+
+Instead, a work queue should be used, and will be addressed in a separate
+commit.
+
+Fixes: 1a1975551943 ("scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock")
+Signed-off-by: Lee Duncan <lduncan@suse.com>
+Link: https://lore.kernel.org/r/c578cdcd46b60470535c4c4a953e6a1feca0dffd.1707500786.git.lduncan@suse.com
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/fcoe/fcoe_ctlr.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -330,17 +330,16 @@ static void fcoe_ctlr_announce(struct fc
+ {
+ struct fcoe_fcf *sel;
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = NULL;
+ list_for_each_entry(fcf, &fip->fcfs, list)
+ fcf->flogi_sent = 0;
+
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ sel = fip->sel_fcf;
+
+ if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -710,7 +709,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr
+ {
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+- unsigned long flags;
+ u16 old_xid;
+ u8 op;
+ u8 mac[ETH_ALEN];
+@@ -744,11 +742,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr
+ op = FIP_DT_FLOGI;
+ if (fip->mode == FIP_MODE_VN2VN)
+ break;
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = skb;
+ fip->flogi_req_send = 1;
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ schedule_work(&fip->timer_work);
+ return -EINPROGRESS;
+ case ELS_FDISC:
+@@ -1725,11 +1723,10 @@ static int fcoe_ctlr_flogi_send_locked(s
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+ int error;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ fcf = fcoe_ctlr_select(fip);
+ if (!fcf || fcf->flogi_sent) {
+@@ -1740,7 +1737,7 @@ static int fcoe_ctlr_flogi_retry(struct
+ fcoe_ctlr_solicit(fip, NULL);
+ error = fcoe_ctlr_flogi_send_locked(fip);
+ }
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ mutex_unlock(&fip->ctlr_mutex);
+ return error;
+ }
+@@ -1757,9 +1754,8 @@ static int fcoe_ctlr_flogi_retry(struct
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ fcf = fip->sel_fcf;
+ if (!fcf || !fip->flogi_req_send)
+ goto unlock;
+@@ -1786,7 +1782,7 @@ static void fcoe_ctlr_flogi_send(struct
+ } else /* XXX */
+ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ }
+
+ /**
hid-wacom-do-not-register-input-devices-until-after-hid_hw_start.patch
usb-hub-check-for-alternate-port-before-enabling-a_alt_hnp_support.patch
usb-f_mass_storage-forbid-async-queue-when-shutdown-happen.patch
+scsi-revert-scsi-fcoe-fix-potential-deadlock-on-fip-ctlr_lock.patch
+firewire-core-correct-documentation-of-fw_csr_string-kernel-api.patch
+nfc-nci-free-rx_data_reassembly-skb-on-nci-device-cleanup.patch
+xen-netback-properly-sync-tx-responses.patch
+binder-signal-epoll-threads-of-self-work.patch
+ext4-fix-double-free-of-blocks-due-to-wrong-extents-moved_len.patch
+staging-iio-ad5933-fix-type-mismatch-regression.patch
--- /dev/null
+From 6db053cd949fcd6254cea9f2cd5d39f7bd64379c Mon Sep 17 00:00:00 2001
+From: David Schiller <david.schiller@jku.at>
+Date: Mon, 22 Jan 2024 14:49:17 +0100
+Subject: staging: iio: ad5933: fix type mismatch regression
+
+From: David Schiller <david.schiller@jku.at>
+
+commit 6db053cd949fcd6254cea9f2cd5d39f7bd64379c upstream.
+
+Commit 4c3577db3e4f ("Staging: iio: impedance-analyzer: Fix sparse
+warning") fixed a compiler warning, but introduced a bug that resulted
+in one of the two 16 bit IIO channels always being zero (when both are
+enabled).
+
+This is because int is 32 bits wide on most architectures and in the
+case of a little-endian machine the two most significant bytes would
+occupy the buffer for the second channel as 'val' is being passed as a
+void pointer to 'iio_push_to_buffers()'.
+
+Fix by defining 'val' as u16. Tested working on ARM64.
+
+Fixes: 4c3577db3e4f ("Staging: iio: impedance-analyzer: Fix sparse warning")
+Signed-off-by: David Schiller <david.schiller@jku.at>
+Link: https://lore.kernel.org/r/20240122134916.2137957-1-david.schiller@jku.at
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/iio/impedance-analyzer/ad5933.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -638,7 +638,7 @@ static void ad5933_work(struct work_stru
+ struct ad5933_state, work.work);
+ struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ __be16 buf[2];
+- int val[2];
++ u16 val[2];
+ unsigned char status;
+ int ret;
+
--- /dev/null
+From 7b55984c96ffe9e236eb9c82a2196e0b1f84990d Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Mon, 29 Jan 2024 14:03:08 +0100
+Subject: xen-netback: properly sync TX responses
+
+From: Jan Beulich <jbeulich@suse.com>
+
+commit 7b55984c96ffe9e236eb9c82a2196e0b1f84990d upstream.
+
+Invoking the make_tx_response() / push_tx_responses() pair with no lock
+held would be acceptable only if all such invocations happened from the
+same context (NAPI instance or dealloc thread). Since this isn't the
+case, and since the interface "spec" also doesn't demand that multicast
+operations may only be performed with no in-flight transmits,
+MCAST_{ADD,DEL} processing also needs to acquire the response lock
+around the invocations.
+
+To prevent similar mistakes going forward, "downgrade" the present
+functions to private helpers of just the two remaining ones using them
+directly, with no forward declarations anymore. This involves renaming
+what so far was make_tx_response(), for the new function of that name
+to serve the new (wrapper) purpose.
+
+While there,
+- constify the txp parameters,
+- correct xenvif_idx_release()'s status parameter's type,
+- rename {,_}make_tx_response()'s status parameters for consistency with
+ xenvif_idx_release()'s.
+
+Fixes: 210c34dcd8d9 ("xen-netback: add support for multicast control")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Link: https://lore.kernel.org/r/980c6c3d-e10e-4459-8565-e8fbde122f00@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c | 84 ++++++++++++++++++--------------------
+ 1 file changed, 40 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -97,13 +97,12 @@ module_param_named(hash_cache_size, xenv
+ MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
+
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status);
++ s8 status);
+
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st);
+-static void push_tx_responses(struct xenvif_queue *queue);
++ s8 status);
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+
+@@ -201,13 +200,9 @@ static void xenvif_tx_err(struct xenvif_
+ unsigned int extra_count, RING_IDX end)
+ {
+ RING_IDX cons = queue->tx.req_cons;
+- unsigned long flags;
+
+ do {
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (cons == end)
+ break;
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
+@@ -458,12 +453,7 @@ static void xenvif_get_requests(struct x
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock, flags);
+ ++txp;
+ continue;
+ }
+@@ -489,14 +479,8 @@ static void xenvif_get_requests(struct x
+
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock,
+- flags);
+ continue;
+ }
+
+@@ -990,7 +974,6 @@ static void xenvif_tx_build_gops(struct
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -1002,7 +985,6 @@ static void xenvif_tx_build_gops(struct
+
+ make_tx_response(queue, &txreq, extra_count,
+ XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -1428,8 +1410,35 @@ int xenvif_tx_action(struct xenvif_queue
+ return work_done;
+ }
+
++static void _make_tx_response(struct xenvif_queue *queue,
++ const struct xen_netif_tx_request *txp,
++ unsigned int extra_count,
++ s8 status)
++{
++ RING_IDX i = queue->tx.rsp_prod_pvt;
++ struct xen_netif_tx_response *resp;
++
++ resp = RING_GET_RESPONSE(&queue->tx, i);
++ resp->id = txp->id;
++ resp->status = status;
++
++ while (extra_count-- != 0)
++ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++
++ queue->tx.rsp_prod_pvt = ++i;
++}
++
++static void push_tx_responses(struct xenvif_queue *queue)
++{
++ int notify;
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
++ if (notify)
++ notify_remote_via_irq(queue->tx_irq);
++}
++
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status)
++ s8 status)
+ {
+ struct pending_tx_info *pending_tx_info;
+ pending_ring_idx_t index;
+@@ -1439,8 +1448,8 @@ static void xenvif_idx_release(struct xe
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+
+- make_tx_response(queue, &pending_tx_info->req,
+- pending_tx_info->extra_count, status);
++ _make_tx_response(queue, &pending_tx_info->req,
++ pending_tx_info->extra_count, status);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+@@ -1454,32 +1463,19 @@ static void xenvif_idx_release(struct xe
+ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+-
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st)
++ s8 status)
+ {
+- RING_IDX i = queue->tx.rsp_prod_pvt;
+- struct xen_netif_tx_response *resp;
+-
+- resp = RING_GET_RESPONSE(&queue->tx, i);
+- resp->id = txp->id;
+- resp->status = st;
+-
+- while (extra_count-- != 0)
+- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++ unsigned long flags;
+
+- queue->tx.rsp_prod_pvt = ++i;
+-}
++ spin_lock_irqsave(&queue->response_lock, flags);
+
+-static void push_tx_responses(struct xenvif_queue *queue)
+-{
+- int notify;
++ _make_tx_response(queue, txp, extra_count, status);
++ push_tx_responses(queue);
+
+- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+- if (notify)
+- notify_remote_via_irq(queue->tx_irq);
++ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)