]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
xen/events: block rogue events for some time
authorJuergen Gross <jgross@suse.com>
Tue, 3 Nov 2020 16:22:38 +0000 (17:22 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Nov 2020 17:25:03 +0000 (18:25 +0100)
commit 5f7f77400ab5b357b5fdb7122c3442239672186c upstream.

In order to avoid high dom0 load due to rogue guests sending events at
high frequency, block those events in case there was no action needed
in dom0 to handle the events.

This is done by adding a per-event counter, which set to zero in case
an EOI without the XEN_EOI_FLAG_SPURIOUS is received from a backend
driver, and incremented when this flag has been set. In case the
counter is 2 or higher delay the EOI by 1 << (cnt - 2) jiffies, but
not more than 1 second.

In order not to waste memory shorten the per-event refcnt to two bytes
(it should normally never exceed a value of 2). Add an overflow check
to evtchn_get() to make sure the 2 bytes really won't overflow.

This is part of XSA-332.

Cc: stable@vger.kernel.org
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Reviewed-by: Wei Liu <wl@xen.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/xen/events/events_base.c
drivers/xen/events/events_internal.h

index 07cef5fb45152dd10ee8872eab567f1840cdea84..ec4074c66d9dbb22635e32003ae88a359d98cc71 100644 (file)
@@ -468,17 +468,34 @@ static void lateeoi_list_add(struct irq_info *info)
        spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
 }
 
-static void xen_irq_lateeoi_locked(struct irq_info *info)
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
 {
        evtchn_port_t evtchn;
        unsigned int cpu;
+       unsigned int delay = 0;
 
        evtchn = info->evtchn;
        if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
                return;
 
+       if (spurious) {
+               if ((1 << info->spurious_cnt) < (HZ << 2))
+                       info->spurious_cnt++;
+               if (info->spurious_cnt > 1) {
+                       delay = 1 << (info->spurious_cnt - 2);
+                       if (delay > HZ)
+                               delay = HZ;
+                       if (!info->eoi_time)
+                               info->eoi_cpu = smp_processor_id();
+                       info->eoi_time = get_jiffies_64() + delay;
+               }
+       } else {
+               info->spurious_cnt = 0;
+       }
+
        cpu = info->eoi_cpu;
-       if (info->eoi_time && info->irq_epoch == per_cpu(irq_epoch, cpu)) {
+       if (info->eoi_time &&
+           (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
                lateeoi_list_add(info);
                return;
        }
@@ -515,7 +532,7 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
 
                info->eoi_time = 0;
 
-               xen_irq_lateeoi_locked(info);
+               xen_irq_lateeoi_locked(info, false);
        }
 
        if (info)
@@ -544,7 +561,7 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
        info = info_for_irq(irq);
 
        if (info)
-               xen_irq_lateeoi_locked(info);
+               xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
 
        read_unlock_irqrestore(&evtchn_rwlock, flags);
 }
@@ -1447,7 +1464,7 @@ int evtchn_get(unsigned int evtchn)
                goto done;
 
        err = -EINVAL;
-       if (info->refcnt <= 0)
+       if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
                goto done;
 
        info->refcnt++;
index 2cb9c2d2c5c0fc4b633b4a2a671aac09f67c4913..b9b4f59198930dbc185680ed2438c989ba85d770 100644 (file)
@@ -33,7 +33,8 @@ enum xen_irq_type {
 struct irq_info {
        struct list_head list;
        struct list_head eoi_list;
-       int refcnt;
+       short refcnt;
+       short spurious_cnt;
        enum xen_irq_type type; /* type */
        unsigned irq;
        unsigned int evtchn;    /* event channel */