--- /dev/null
+From foo@baz Mon Dec 20 02:39:52 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/blkfront: harden blkfront against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 0fd08a34e8e3b67ec9bd8287ac0facf8374b844a upstream.
+
+The Xen blkfront driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using lateeoi event
+channels.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkfront.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1319,11 +1319,13 @@ static irqreturn_t blkif_interrupt(int i
+ unsigned long flags;
+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int error;
++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ spin_lock_irqsave(&info->io_lock, flags);
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ spin_unlock_irqrestore(&info->io_lock, flags);
++ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ return IRQ_HANDLED;
+ }
+
+@@ -1340,6 +1342,8 @@ static irqreturn_t blkif_interrupt(int i
+ unsigned long id;
+ unsigned int op;
+
++ eoiflag = 0;
++
+ RING_COPY_RESPONSE(&info->ring, i, &bret);
+ id = bret.id;
+
+@@ -1444,6 +1448,8 @@ static irqreturn_t blkif_interrupt(int i
+
+ spin_unlock_irqrestore(&info->io_lock, flags);
+
++ xen_irq_lateeoi(irq, eoiflag);
++
+ return IRQ_HANDLED;
+
+ err:
+@@ -1451,6 +1457,8 @@ static irqreturn_t blkif_interrupt(int i
+
+ spin_unlock_irqrestore(&info->io_lock, flags);
+
++ /* No EOI in order to avoid further interrupts. */
++
+ pr_alert("%s disabled for further use\n", info->gd->disk_name);
+ return IRQ_HANDLED;
+ }
+@@ -1489,8 +1497,8 @@ static int setup_blkring(struct xenbus_d
+ if (err)
+ goto fail;
+
+- err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
+- "blkif", info);
++ err = bind_evtchn_to_irqhandler_lateeoi(info->evtchn, blkif_interrupt,
++ 0, "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_evtchn_to_irqhandler failed");
--- /dev/null
+From foo@baz Mon Dec 20 02:39:52 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/console: harden hvc_xen against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+commit fe415186b43df0db1f17fa3a46275fd92107fe71 upstream.
+
+The Xen console driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using a lateeoi event
+channel.
+
+For the normal domU initial console this requires the introduction of
+bind_evtchn_to_irq_lateeoi() as there is no xenbus device available
+at the time the event channel is bound to the irq.
+
+As the decision whether an interrupt was spurious or not requires to
+test for bytes having been read from the backend, move sending the
+event into the if statement, as sending an event without having found
+any bytes to be read is making no sense at all.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/hvc/hvc_xen.c | 30 +++++++++++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -49,6 +49,8 @@ struct xencons_info {
+ struct xenbus_device *xbdev;
+ struct xencons_interface *intf;
+ unsigned int evtchn;
++ XENCONS_RING_IDX out_cons;
++ unsigned int out_cons_same;
+ struct hvc_struct *hvc;
+ int irq;
+ int vtermno;
+@@ -150,6 +152,8 @@ static int domU_read_console(uint32_t vt
+ XENCONS_RING_IDX cons, prod;
+ int recv = 0;
+ struct xencons_info *xencons = vtermno_to_xencons(vtermno);
++ unsigned int eoiflag = 0;
++
+ if (xencons == NULL)
+ return -EINVAL;
+ intf = xencons->intf;
+@@ -169,7 +173,27 @@ static int domU_read_console(uint32_t vt
+ mb(); /* read ring before consuming */
+ intf->in_cons = cons;
+
+- notify_daemon(xencons);
++ /*
++ * When to mark interrupt having been spurious:
++ * - there was no new data to be read, and
++ * - the backend did not consume some output bytes, and
++ * - the previous round with no read data didn't see consumed bytes
++ * (we might have a race with an interrupt being in flight while
++ * updating xencons->out_cons, so account for that by allowing one
++ * round without any visible reason)
++ */
++ if (intf->out_cons != xencons->out_cons) {
++ xencons->out_cons = intf->out_cons;
++ xencons->out_cons_same = 0;
++ }
++ if (recv) {
++ notify_daemon(xencons);
++ } else if (xencons->out_cons_same++ > 1) {
++ eoiflag = XEN_EOI_FLAG_SPURIOUS;
++ }
++
++ xen_irq_lateeoi(xencons->irq, eoiflag);
++
+ return recv;
+ }
+
+@@ -391,7 +415,7 @@ static int xencons_connect_backend(struc
+ if (ret)
+ return ret;
+ info->evtchn = evtchn;
+- irq = bind_evtchn_to_irq(evtchn);
++ irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
+ if (irq < 0)
+ return irq;
+ info->irq = irq;
+@@ -555,7 +579,7 @@ static int __init xen_hvc_init(void)
+ return r;
+
+ info = vtermno_to_xencons(HVC_COOKIE);
+- info->irq = bind_evtchn_to_irq(info->evtchn);
++ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+ }
+ if (info->irq < 0)
+ info->irq = 0; /* NO_IRQ */
--- /dev/null
+From foo@baz Mon Dec 20 02:39:52 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 30 Nov 2021 08:36:12 +0100
+Subject: xen/netback: don't queue unlimited number of packages
+
+From: Juergen Gross <jgross@suse.com>
+
+commit be81992f9086b230623ae3ebbc85ecee4d00a3d3 upstream.
+
+In case a guest isn't consuming incoming network traffic as fast as it
+is coming in, xen-netback is buffering network packages in unlimited
+numbers today. This can result in host OOM situations.
+
+Commit f48da8b14d04ca8 ("xen-netback: fix unlimited guest Rx internal
+queue and carrier flapping") meant to introduce a mechanism to limit
+the amount of buffered data by stopping the Tx queue when reaching the
+data limit, but this doesn't work for cases like UDP.
+
+When hitting the limit don't queue further SKBs, but drop them instead.
+In order to be able to tell Rx packages have been dropped increment the
+rx_dropped statistics counter in this case.
+
+It should be noted that the old solution to continue queueing SKBs had
+the additional problem of an overflow of the 32-bit rx_queue_len value
+would result in intermittent Tx queue enabling.
+
+This is part of XSA-392
+
+Fixes: f48da8b14d04ca8 ("xen-netback: fix unlimited guest Rx internal queue and carrier flapping")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -189,11 +189,15 @@ void xenvif_rx_queue_tail(struct xenvif_
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+- __skb_queue_tail(&queue->rx_queue, skb);
+-
+- queue->rx_queue_len += skb->len;
+- if (queue->rx_queue_len > queue->rx_queue_max)
++ if (queue->rx_queue_len >= queue->rx_queue_max) {
+ netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
++ kfree_skb(skb);
++ queue->vif->dev->stats.rx_dropped++;
++ } else {
++ __skb_queue_tail(&queue->rx_queue, skb);
++
++ queue->rx_queue_len += skb->len;
++ }
+
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+ }
+@@ -243,6 +247,7 @@ static void xenvif_rx_queue_drop_expired
+ break;
+ xenvif_rx_dequeue(queue);
+ kfree_skb(skb);
++ queue->vif->dev->stats.rx_dropped++;
+ }
+ }
+
--- /dev/null
+From foo@baz Mon Dec 20 02:39:52 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 16 Dec 2021 08:24:08 +0100
+Subject: xen/netfront: harden netfront against event channel storms
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b27d47950e481f292c0a5ad57357edb9d95d03ba upstream.
+
+The Xen netfront driver is still vulnerable for an attack via excessive
+number of events sent by the backend. Fix that by using lateeoi event
+channels.
+
+For being able to detect the case of no rx responses being added while
+the carrier is down a new lock is needed in order to update and test
+rsp_cons and the number of seen unconsumed responses atomically.
+
+This is part of XSA-391
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 125 +++++++++++++++++++++++++++++++++------------
+ 1 file changed, 94 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -141,6 +141,9 @@ struct netfront_queue {
+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++
++ unsigned int rx_rsp_unconsumed;
++ spinlock_t rx_cons_lock;
+ };
+
+ struct netfront_info {
+@@ -365,11 +368,12 @@ static int xennet_open(struct net_device
+ return 0;
+ }
+
+-static void xennet_tx_buf_gc(struct netfront_queue *queue)
++static bool xennet_tx_buf_gc(struct netfront_queue *queue)
+ {
+ RING_IDX cons, prod;
+ unsigned short id;
+ struct sk_buff *skb;
++ bool work_done = false;
+ const struct device *dev = &queue->info->netdev->dev;
+
+ BUG_ON(!netif_carrier_ok(queue->info->netdev));
+@@ -386,6 +390,8 @@ static void xennet_tx_buf_gc(struct netf
+ for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
+ struct xen_netif_tx_response txrsp;
+
++ work_done = true;
++
+ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+ if (txrsp.status == XEN_NETIF_RSP_NULL)
+ continue;
+@@ -439,11 +445,13 @@ static void xennet_tx_buf_gc(struct netf
+
+ xennet_maybe_wake_tx(queue);
+
+- return;
++ return work_done;
+
+ err:
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
++
++ return work_done;
+ }
+
+ struct xennet_gnttab_make_txreq {
+@@ -748,6 +756,16 @@ static int xennet_close(struct net_devic
+ return 0;
+ }
+
++static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&queue->rx_cons_lock, flags);
++ queue->rx.rsp_cons = val;
++ queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
++}
++
+ static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
+ grant_ref_t ref)
+ {
+@@ -799,7 +817,7 @@ static int xennet_get_extras(struct netf
+ xennet_move_rx_slot(queue, skb, ref);
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+- queue->rx.rsp_cons = cons;
++ xennet_set_rx_rsp_cons(queue, cons);
+ return err;
+ }
+
+@@ -879,7 +897,7 @@ next:
+ }
+
+ if (unlikely(err))
+- queue->rx.rsp_cons = cons + slots;
++ xennet_set_rx_rsp_cons(queue, cons + slots);
+
+ return err;
+ }
+@@ -933,7 +951,8 @@ static int xennet_fill_frags(struct netf
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+- queue->rx.rsp_cons = ++cons + skb_queue_len(list);
++ xennet_set_rx_rsp_cons(queue,
++ ++cons + skb_queue_len(list));
+ kfree_skb(nskb);
+ return -ENOENT;
+ }
+@@ -946,7 +965,7 @@ static int xennet_fill_frags(struct netf
+ kfree_skb(nskb);
+ }
+
+- queue->rx.rsp_cons = cons;
++ xennet_set_rx_rsp_cons(queue, cons);
+
+ return 0;
+ }
+@@ -1067,7 +1086,9 @@ err:
+
+ if (unlikely(xennet_set_skb_gso(skb, gso))) {
+ __skb_queue_head(&tmpq, skb);
+- queue->rx.rsp_cons += skb_queue_len(&tmpq);
++ xennet_set_rx_rsp_cons(queue,
++ queue->rx.rsp_cons +
++ skb_queue_len(&tmpq));
+ goto err;
+ }
+ }
+@@ -1091,7 +1112,8 @@ err:
+
+ __skb_queue_tail(&rxq, skb);
+
+- i = ++queue->rx.rsp_cons;
++ i = queue->rx.rsp_cons + 1;
++ xennet_set_rx_rsp_cons(queue, i);
+ work_done++;
+ }
+
+@@ -1275,40 +1297,79 @@ static int xennet_set_features(struct ne
+ return 0;
+ }
+
+-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
++static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
+ {
+- struct netfront_queue *queue = dev_id;
+ unsigned long flags;
+
+- if (queue->info->broken)
+- return IRQ_HANDLED;
++ if (unlikely(queue->info->broken))
++ return false;
+
+ spin_lock_irqsave(&queue->tx_lock, flags);
+- xennet_tx_buf_gc(queue);
++ if (xennet_tx_buf_gc(queue))
++ *eoi = 0;
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
+
++ return true;
++}
++
++static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
++{
++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++ if (likely(xennet_handle_tx(dev_id, &eoiflag)))
++ xen_irq_lateeoi(irq, eoiflag);
++
+ return IRQ_HANDLED;
+ }
+
+-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
++static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
+ {
+- struct netfront_queue *queue = dev_id;
+- struct net_device *dev = queue->info->netdev;
++ unsigned int work_queued;
++ unsigned long flags;
++
++ if (unlikely(queue->info->broken))
++ return false;
+
+- if (queue->info->broken)
+- return IRQ_HANDLED;
++ spin_lock_irqsave(&queue->rx_cons_lock, flags);
++ work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
++ if (work_queued > queue->rx_rsp_unconsumed) {
++ queue->rx_rsp_unconsumed = work_queued;
++ *eoi = 0;
++ } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
++ const struct device *dev = &queue->info->netdev->dev;
++
++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
++ dev_alert(dev, "RX producer index going backwards\n");
++ dev_alert(dev, "Disabled for further use\n");
++ queue->info->broken = true;
++ return false;
++ }
++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+
+- if (likely(netif_carrier_ok(dev) &&
+- RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
++ if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
+ napi_schedule(&queue->napi);
+
++ return true;
++}
++
++static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
++{
++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++ if (likely(xennet_handle_rx(dev_id, &eoiflag)))
++ xen_irq_lateeoi(irq, eoiflag);
++
+ return IRQ_HANDLED;
+ }
+
+ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+ {
+- xennet_tx_interrupt(irq, dev_id);
+- xennet_rx_interrupt(irq, dev_id);
++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
++
++ if (xennet_handle_tx(dev_id, &eoiflag) &&
++ xennet_handle_rx(dev_id, &eoiflag))
++ xen_irq_lateeoi(irq, eoiflag);
++
+ return IRQ_HANDLED;
+ }
+
+@@ -1540,9 +1601,10 @@ static int setup_netfront_single(struct
+ if (err < 0)
+ goto fail;
+
+- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
+- xennet_interrupt,
+- 0, queue->info->netdev->name, queue);
++ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
++ xennet_interrupt, 0,
++ queue->info->netdev->name,
++ queue);
+ if (err < 0)
+ goto bind_fail;
+ queue->rx_evtchn = queue->tx_evtchn;
+@@ -1570,18 +1632,18 @@ static int setup_netfront_split(struct n
+
+ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ "%s-tx", queue->name);
+- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
+- xennet_tx_interrupt,
+- 0, queue->tx_irq_name, queue);
++ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
++ xennet_tx_interrupt, 0,
++ queue->tx_irq_name, queue);
+ if (err < 0)
+ goto bind_tx_fail;
+ queue->tx_irq = err;
+
+ snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ "%s-rx", queue->name);
+- err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
+- xennet_rx_interrupt,
+- 0, queue->rx_irq_name, queue);
++ err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
++ xennet_rx_interrupt, 0,
++ queue->rx_irq_name, queue);
+ if (err < 0)
+ goto bind_rx_fail;
+ queue->rx_irq = err;
+@@ -1683,6 +1745,7 @@ static int xennet_init_queue(struct netf
+
+ spin_lock_init(&queue->tx_lock);
+ spin_lock_init(&queue->rx_lock);
++ spin_lock_init(&queue->rx_cons_lock);
+
+ setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+ (unsigned long)queue);