]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 Dec 2021 13:57:15 +0000 (14:57 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 Dec 2021 13:57:15 +0000 (14:57 +0100)
added patches:
xen-blkfront-harden-blkfront-against-event-channel-storms.patch
xen-console-harden-hvc_xen-against-event-channel-storms.patch
xen-netback-don-t-queue-unlimited-number-of-packages.patch
xen-netback-fix-rx-queue-stall-detection.patch
xen-netfront-harden-netfront-against-event-channel-storms.patch

queue-4.19/series
queue-4.19/xen-blkfront-harden-blkfront-against-event-channel-storms.patch [new file with mode: 0644]
queue-4.19/xen-console-harden-hvc_xen-against-event-channel-storms.patch [new file with mode: 0644]
queue-4.19/xen-netback-don-t-queue-unlimited-number-of-packages.patch [new file with mode: 0644]
queue-4.19/xen-netback-fix-rx-queue-stall-detection.patch [new file with mode: 0644]
queue-4.19/xen-netfront-harden-netfront-against-event-channel-storms.patch [new file with mode: 0644]

index 318830e79a784884fccfdabef23a8e7a8fbc9ed8..1eb5a1d79117419eeb75fc2ddbcae57dd0e6388a 100644 (file)
@@ -49,3 +49,8 @@ media-mxl111sf-change-mutex_init-location.patch
 fuse-annotate-lock-in-fuse_reverse_inval_entry.patch
 ovl-fix-warning-in-ovl_create_real.patch
 scsi-scsi_debug-sanity-check-block-descriptor-length-in-resp_mode_select.patch
+xen-blkfront-harden-blkfront-against-event-channel-storms.patch
+xen-netfront-harden-netfront-against-event-channel-storms.patch
+xen-console-harden-hvc_xen-against-event-channel-storms.patch
+xen-netback-fix-rx-queue-stall-detection.patch
+xen-netback-don-t-queue-unlimited-number-of-packages.patch
diff --git a/queue-4.19/xen-blkfront-harden-blkfront-against-event-channel-storms.patch b/queue-4.19/xen-blkfront-harden-blkfront-against-event-channel-storms.patch
new file mode 100644 (file)
index 0000000..bc4abbd
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Mon Dec 20 02:55:31 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/blkfront: harden blkfront against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 0fd08a34e8e3b67ec9bd8287ac0facf8374b844a upstream.
+
+The Xen blkfront driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using lateeoi event
+channels.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkfront.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1565,9 +1565,12 @@ static irqreturn_t blkif_interrupt(int i
+       unsigned long flags;
+       struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
+       struct blkfront_info *info = rinfo->dev_info;
++      unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+-      if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++      if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++              xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+               return IRQ_HANDLED;
++      }
+       spin_lock_irqsave(&rinfo->ring_lock, flags);
+  again:
+@@ -1583,6 +1586,8 @@ static irqreturn_t blkif_interrupt(int i
+               unsigned long id;
+               unsigned int op;
++              eoiflag = 0;
++
+               RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
+               id = bret.id;
+@@ -1698,6 +1703,8 @@ static irqreturn_t blkif_interrupt(int i
+       spin_unlock_irqrestore(&rinfo->ring_lock, flags);
++      xen_irq_lateeoi(irq, eoiflag);
++
+       return IRQ_HANDLED;
+  err:
+@@ -1705,6 +1712,8 @@ static irqreturn_t blkif_interrupt(int i
+       spin_unlock_irqrestore(&rinfo->ring_lock, flags);
++      /* No EOI in order to avoid further interrupts. */
++
+       pr_alert("%s disabled for further use\n", info->gd->disk_name);
+       return IRQ_HANDLED;
+ }
+@@ -1744,8 +1753,8 @@ static int setup_blkring(struct xenbus_d
+       if (err)
+               goto fail;
+-      err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
+-                                      "blkif", rinfo);
++      err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
++                                              0, "blkif", rinfo);
+       if (err <= 0) {
+               xenbus_dev_fatal(dev, err,
+                                "bind_evtchn_to_irqhandler failed");
diff --git a/queue-4.19/xen-console-harden-hvc_xen-against-event-channel-storms.patch b/queue-4.19/xen-console-harden-hvc_xen-against-event-channel-storms.patch
new file mode 100644 (file)
index 0000000..f8fc5d2
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Mon Dec 20 02:55:31 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/console: harden hvc_xen against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+This is commit fe415186b43df0db1f17fa3a46275fd92107fe71 upstream.
+
+The Xen console driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using a lateeoi event
+channel.
+
+For the normal domU initial console this requires the introduction of
+bind_evtchn_to_irq_lateeoi() as there is no xenbus device available
+at the time the event channel is bound to the irq.
+
+As the decision whether an interrupt was spurious or not requires to
+test for bytes having been read from the backend, move sending the
+event into the if statement, as sending an event without having found
+any bytes to be read is making no sense at all.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/hvc/hvc_xen.c |   30 +++++++++++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -37,6 +37,8 @@ struct xencons_info {
+       struct xenbus_device *xbdev;
+       struct xencons_interface *intf;
+       unsigned int evtchn;
++      XENCONS_RING_IDX out_cons;
++      unsigned int out_cons_same;
+       struct hvc_struct *hvc;
+       int irq;
+       int vtermno;
+@@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vt
+       XENCONS_RING_IDX cons, prod;
+       int recv = 0;
+       struct xencons_info *xencons = vtermno_to_xencons(vtermno);
++      unsigned int eoiflag = 0;
++
+       if (xencons == NULL)
+               return -EINVAL;
+       intf = xencons->intf;
+@@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vt
+       mb();                   /* read ring before consuming */
+       intf->in_cons = cons;
+-      notify_daemon(xencons);
++      /*
++       * When to mark interrupt having been spurious:
++       * - there was no new data to be read, and
++       * - the backend did not consume some output bytes, and
++       * - the previous round with no read data didn't see consumed bytes
++       *   (we might have a race with an interrupt being in flight while
++       *   updating xencons->out_cons, so account for that by allowing one
++       *   round without any visible reason)
++       */
++      if (intf->out_cons != xencons->out_cons) {
++              xencons->out_cons = intf->out_cons;
++              xencons->out_cons_same = 0;
++      }
++      if (recv) {
++              notify_daemon(xencons);
++      } else if (xencons->out_cons_same++ > 1) {
++              eoiflag = XEN_EOI_FLAG_SPURIOUS;
++      }
++
++      xen_irq_lateeoi(xencons->irq, eoiflag);
++
+       return recv;
+ }
+@@ -386,7 +410,7 @@ static int xencons_connect_backend(struc
+       if (ret)
+               return ret;
+       info->evtchn = evtchn;
+-      irq = bind_evtchn_to_irq(evtchn);
++      irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
+       if (irq < 0)
+               return irq;
+       info->irq = irq;
+@@ -550,7 +574,7 @@ static int __init xen_hvc_init(void)
+                       return r;
+               info = vtermno_to_xencons(HVC_COOKIE);
+-              info->irq = bind_evtchn_to_irq(info->evtchn);
++              info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+       }
+       if (info->irq < 0)
+               info->irq = 0; /* NO_IRQ */
diff --git a/queue-4.19/xen-netback-don-t-queue-unlimited-number-of-packages.patch b/queue-4.19/xen-netback-don-t-queue-unlimited-number-of-packages.patch
new file mode 100644 (file)
index 0000000..b12d54f
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Mon Dec 20 02:55:31 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 30 Nov 2021 08:36:12 +0100
+Subject: xen/netback: don't queue unlimited number of packages
+
+From: Juergen Gross <jgross@suse.com>
+
+commit be81992f9086b230623ae3ebbc85ecee4d00a3d3 upstream.
+
+In case a guest isn't consuming incoming network traffic as fast as it
+is coming in, xen-netback is buffering network packages in unlimited
+numbers today. This can result in host OOM situations.
+
+Commit f48da8b14d04ca8 ("xen-netback: fix unlimited guest Rx internal
+queue and carrier flapping") meant to introduce a mechanism to limit
+the amount of buffered data by stopping the Tx queue when reaching the
+data limit, but this doesn't work for cases like UDP.
+
+When hitting the limit don't queue further SKBs, but drop them instead.
+In order to be able to tell Rx packages have been dropped increment the
+rx_dropped statistics counter in this case.
+
+It should be noted that the old solution to continue queueing SKBs had
+the additional problem of an overflow of the 32-bit rx_queue_len value
+would result in intermittent Tx queue enabling.
+
+This is part of XSA-392
+
+Fixes: f48da8b14d04ca8 ("xen-netback: fix unlimited guest Rx internal queue and carrier flapping")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/rx.c |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -88,16 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+-      if (skb_queue_empty(&queue->rx_queue))
+-              xenvif_update_needed_slots(queue, skb);
+-
+-      __skb_queue_tail(&queue->rx_queue, skb);
+-
+-      queue->rx_queue_len += skb->len;
+-      if (queue->rx_queue_len > queue->rx_queue_max) {
++      if (queue->rx_queue_len >= queue->rx_queue_max) {
+               struct net_device *dev = queue->vif->dev;
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
++              kfree_skb(skb);
++              queue->vif->dev->stats.rx_dropped++;
++      } else {
++              if (skb_queue_empty(&queue->rx_queue))
++                      xenvif_update_needed_slots(queue, skb);
++
++              __skb_queue_tail(&queue->rx_queue, skb);
++
++              queue->rx_queue_len += skb->len;
+       }
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+@@ -147,6 +150,7 @@ static void xenvif_rx_queue_drop_expired
+                       break;
+               xenvif_rx_dequeue(queue);
+               kfree_skb(skb);
++              queue->vif->dev->stats.rx_dropped++;
+       }
+ }
diff --git a/queue-4.19/xen-netback-fix-rx-queue-stall-detection.patch b/queue-4.19/xen-netback-fix-rx-queue-stall-detection.patch
new file mode 100644 (file)
index 0000000..59a72bf
--- /dev/null
@@ -0,0 +1,163 @@
+From foo@baz Mon Dec 20 02:55:31 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:25:12 +0100
+Subject: xen/netback: fix rx queue stall detection
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 6032046ec4b70176d247a71836186d47b25d1684 upstream.
+
+Commit 1d5d48523900a4b ("xen-netback: require fewer guest Rx slots when
+not using GSO") introduced a security problem in netback, as an
+interface would only be regarded to be stalled if no slot is available
+in the rx queue ring page. In case the SKB at the head of the queued
+requests will need more than one rx slot and only one slot is free the
+stall detection logic will never trigger, as the test for that is only
+looking for at least one slot to be free.
+
+Fix that by testing for the needed number of slots instead of only one
+slot being available.
+
+In order to not have to take the rx queue lock that often, store the
+number of needed slots in the queue data. As all SKB dequeue operations
+happen in the rx queue kernel thread this is safe, as long as the
+number of needed slots is accessed via READ/WRITE_ONCE() only and
+updates are always done with the rx queue lock held.
+
+Add a small helper for obtaining the number of free slots.
+
+This is part of XSA-392
+
+Fixes: 1d5d48523900a4b ("xen-netback: require fewer guest Rx slots when not using GSO")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/common.h |    1 
+ drivers/net/xen-netback/rx.c     |   65 ++++++++++++++++++++++++---------------
+ 2 files changed, 42 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data
+       unsigned int rx_queue_max;
+       unsigned int rx_queue_len;
+       unsigned long last_rx_time;
++      unsigned int rx_slots_needed;
+       bool stalled;
+       struct xenvif_copy_state rx_copy;
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -33,28 +33,36 @@
+ #include <xen/xen.h>
+ #include <xen/events.h>
+-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
++/*
++ * Update the needed ring page slots for the first SKB queued.
++ * Note that any call sequence outside the RX thread calling this function
++ * needs to wake up the RX thread via a call of xenvif_kick_thread()
++ * afterwards in order to avoid a race with putting the thread to sleep.
++ */
++static void xenvif_update_needed_slots(struct xenvif_queue *queue,
++                                     const struct sk_buff *skb)
+ {
+-      RING_IDX prod, cons;
+-      struct sk_buff *skb;
+-      int needed;
+-      unsigned long flags;
++      unsigned int needed = 0;
+-      spin_lock_irqsave(&queue->rx_queue.lock, flags);
+-
+-      skb = skb_peek(&queue->rx_queue);
+-      if (!skb) {
+-              spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+-              return false;
++      if (skb) {
++              needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
++              if (skb_is_gso(skb))
++                      needed++;
++              if (skb->sw_hash)
++                      needed++;
+       }
+-      needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+-      if (skb_is_gso(skb))
+-              needed++;
+-      if (skb->sw_hash)
+-              needed++;
++      WRITE_ONCE(queue->rx_slots_needed, needed);
++}
+-      spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
++static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
++{
++      RING_IDX prod, cons;
++      unsigned int needed;
++
++      needed = READ_ONCE(queue->rx_slots_needed);
++      if (!needed)
++              return false;
+       do {
+               prod = queue->rx.sring->req_prod;
+@@ -80,6 +88,9 @@ void xenvif_rx_queue_tail(struct xenvif_
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
++      if (skb_queue_empty(&queue->rx_queue))
++              xenvif_update_needed_slots(queue, skb);
++
+       __skb_queue_tail(&queue->rx_queue, skb);
+       queue->rx_queue_len += skb->len;
+@@ -100,6 +111,8 @@ static struct sk_buff *xenvif_rx_dequeue
+       skb = __skb_dequeue(&queue->rx_queue);
+       if (skb) {
++              xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
++
+               queue->rx_queue_len -= skb->len;
+               if (queue->rx_queue_len < queue->rx_queue_max) {
+                       struct netdev_queue *txq;
+@@ -474,27 +487,31 @@ void xenvif_rx_action(struct xenvif_queu
+       xenvif_rx_copy_flush(queue);
+ }
+-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
++static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
+ {
+       RING_IDX prod, cons;
+       prod = queue->rx.sring->req_prod;
+       cons = queue->rx.req_cons;
++      return prod - cons;
++}
++
++static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
++{
++      unsigned int needed = READ_ONCE(queue->rx_slots_needed);
++
+       return !queue->stalled &&
+-              prod - cons < 1 &&
++              xenvif_rx_queue_slots(queue) < needed &&
+               time_after(jiffies,
+                          queue->last_rx_time + queue->vif->stall_timeout);
+ }
+ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+ {
+-      RING_IDX prod, cons;
+-
+-      prod = queue->rx.sring->req_prod;
+-      cons = queue->rx.req_cons;
++      unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+-      return queue->stalled && prod - cons >= 1;
++      return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
+ }
+ bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
diff --git a/queue-4.19/xen-netfront-harden-netfront-against-event-channel-storms.patch b/queue-4.19/xen-netfront-harden-netfront-against-event-channel-storms.patch
new file mode 100644 (file)
index 0000000..39703a8
--- /dev/null
@@ -0,0 +1,293 @@
+From foo@baz Mon Dec 20 02:55:31 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/netfront: harden netfront against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b27d47950e481f292c0a5ad57357edb9d95d03ba upstream.
+
+The Xen netfront driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using lateeoi event
+channels.
+
+For being able to detect the case of no rx responses being added while
+the carrier is down a new lock is needed in order to update and test
+rsp_cons and the number of seen unconsumed responses atomically.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c |  125 +++++++++++++++++++++++++++++++++------------
+ 1 file changed, 94 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -142,6 +142,9 @@ struct netfront_queue {
+       struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+       grant_ref_t gref_rx_head;
+       grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++
++      unsigned int rx_rsp_unconsumed;
++      spinlock_t rx_cons_lock;
+ };
+ struct netfront_info {
+@@ -366,12 +369,13 @@ static int xennet_open(struct net_device
+       return 0;
+ }
+-static void xennet_tx_buf_gc(struct netfront_queue *queue)
++static bool xennet_tx_buf_gc(struct netfront_queue *queue)
+ {
+       RING_IDX cons, prod;
+       unsigned short id;
+       struct sk_buff *skb;
+       bool more_to_do;
++      bool work_done = false;
+       const struct device *dev = &queue->info->netdev->dev;
+       BUG_ON(!netif_carrier_ok(queue->info->netdev));
+@@ -388,6 +392,8 @@ static void xennet_tx_buf_gc(struct netf
+               for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
+                       struct xen_netif_tx_response txrsp;
++                      work_done = true;
++
+                       RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+                       if (txrsp.status == XEN_NETIF_RSP_NULL)
+                               continue;
+@@ -431,11 +437,13 @@ static void xennet_tx_buf_gc(struct netf
+       xennet_maybe_wake_tx(queue);
+-      return;
++      return work_done;
+  err:
+       queue->info->broken = true;
+       dev_alert(dev, "Disabled for further use\n");
++
++      return work_done;
+ }
+ struct xennet_gnttab_make_txreq {
+@@ -756,6 +764,16 @@ static int xennet_close(struct net_devic
+       return 0;
+ }
++static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&queue->rx_cons_lock, flags);
++      queue->rx.rsp_cons = val;
++      queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
++      spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
++}
++
+ static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
+                               grant_ref_t ref)
+ {
+@@ -807,7 +825,7 @@ static int xennet_get_extras(struct netf
+               xennet_move_rx_slot(queue, skb, ref);
+       } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
+-      queue->rx.rsp_cons = cons;
++      xennet_set_rx_rsp_cons(queue, cons);
+       return err;
+ }
+@@ -887,7 +905,7 @@ next:
+       }
+       if (unlikely(err))
+-              queue->rx.rsp_cons = cons + slots;
++              xennet_set_rx_rsp_cons(queue, cons + slots);
+       return err;
+ }
+@@ -941,7 +959,8 @@ static int xennet_fill_frags(struct netf
+                       __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+               }
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+-                      queue->rx.rsp_cons = ++cons + skb_queue_len(list);
++                      xennet_set_rx_rsp_cons(queue,
++                                             ++cons + skb_queue_len(list));
+                       kfree_skb(nskb);
+                       return -ENOENT;
+               }
+@@ -954,7 +973,7 @@ static int xennet_fill_frags(struct netf
+               kfree_skb(nskb);
+       }
+-      queue->rx.rsp_cons = cons;
++      xennet_set_rx_rsp_cons(queue, cons);
+       return 0;
+ }
+@@ -1075,7 +1094,9 @@ err:
+                       if (unlikely(xennet_set_skb_gso(skb, gso))) {
+                               __skb_queue_head(&tmpq, skb);
+-                              queue->rx.rsp_cons += skb_queue_len(&tmpq);
++                              xennet_set_rx_rsp_cons(queue,
++                                                     queue->rx.rsp_cons +
++                                                     skb_queue_len(&tmpq));
+                               goto err;
+                       }
+               }
+@@ -1099,7 +1120,8 @@ err:
+               __skb_queue_tail(&rxq, skb);
+-              i = ++queue->rx.rsp_cons;
++              i = queue->rx.rsp_cons + 1;
++              xennet_set_rx_rsp_cons(queue, i);
+               work_done++;
+       }
+@@ -1261,40 +1283,79 @@ static int xennet_set_features(struct ne
+       return 0;
+ }
+-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
++static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
+ {
+-      struct netfront_queue *queue = dev_id;
+       unsigned long flags;
+-      if (queue->info->broken)
+-              return IRQ_HANDLED;
++      if (unlikely(queue->info->broken))
++              return false;
+       spin_lock_irqsave(&queue->tx_lock, flags);
+-      xennet_tx_buf_gc(queue);
++      if (xennet_tx_buf_gc(queue))
++              *eoi = 0;
+       spin_unlock_irqrestore(&queue->tx_lock, flags);
++      return true;
++}
++
++static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
++{
++      unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++      if (likely(xennet_handle_tx(dev_id, &eoiflag)))
++              xen_irq_lateeoi(irq, eoiflag);
++
+       return IRQ_HANDLED;
+ }
+-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
++static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
+ {
+-      struct netfront_queue *queue = dev_id;
+-      struct net_device *dev = queue->info->netdev;
++      unsigned int work_queued;
++      unsigned long flags;
++
++      if (unlikely(queue->info->broken))
++              return false;
+-      if (queue->info->broken)
+-              return IRQ_HANDLED;
++      spin_lock_irqsave(&queue->rx_cons_lock, flags);
++      work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
++      if (work_queued > queue->rx_rsp_unconsumed) {
++              queue->rx_rsp_unconsumed = work_queued;
++              *eoi = 0;
++      } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
++              const struct device *dev = &queue->info->netdev->dev;
++
++              spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
++              dev_alert(dev, "RX producer index going backwards\n");
++              dev_alert(dev, "Disabled for further use\n");
++              queue->info->broken = true;
++              return false;
++      }
++      spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+-      if (likely(netif_carrier_ok(dev) &&
+-                 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
++      if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
+               napi_schedule(&queue->napi);
++      return true;
++}
++
++static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
++{
++      unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++      if (likely(xennet_handle_rx(dev_id, &eoiflag)))
++              xen_irq_lateeoi(irq, eoiflag);
++
+       return IRQ_HANDLED;
+ }
+ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+ {
+-      xennet_tx_interrupt(irq, dev_id);
+-      xennet_rx_interrupt(irq, dev_id);
++      unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++      if (xennet_handle_tx(dev_id, &eoiflag) &&
++          xennet_handle_rx(dev_id, &eoiflag))
++              xen_irq_lateeoi(irq, eoiflag);
++
+       return IRQ_HANDLED;
+ }
+@@ -1528,9 +1589,10 @@ static int setup_netfront_single(struct
+       if (err < 0)
+               goto fail;
+-      err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
+-                                      xennet_interrupt,
+-                                      0, queue->info->netdev->name, queue);
++      err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
++                                              xennet_interrupt, 0,
++                                              queue->info->netdev->name,
++                                              queue);
+       if (err < 0)
+               goto bind_fail;
+       queue->rx_evtchn = queue->tx_evtchn;
+@@ -1558,18 +1620,18 @@ static int setup_netfront_split(struct n
+       snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+                "%s-tx", queue->name);
+-      err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
+-                                      xennet_tx_interrupt,
+-                                      0, queue->tx_irq_name, queue);
++      err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
++                                              xennet_tx_interrupt, 0,
++                                              queue->tx_irq_name, queue);
+       if (err < 0)
+               goto bind_tx_fail;
+       queue->tx_irq = err;
+       snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+                "%s-rx", queue->name);
+-      err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
+-                                      xennet_rx_interrupt,
+-                                      0, queue->rx_irq_name, queue);
++      err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
++                                              xennet_rx_interrupt, 0,
++                                              queue->rx_irq_name, queue);
+       if (err < 0)
+               goto bind_rx_fail;
+       queue->rx_irq = err;
+@@ -1671,6 +1733,7 @@ static int xennet_init_queue(struct netf
+       spin_lock_init(&queue->tx_lock);
+       spin_lock_init(&queue->rx_lock);
++      spin_lock_init(&queue->rx_cons_lock);
+       timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);