]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Mar 2021 12:16:27 +0000 (13:16 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Mar 2021 12:16:27 +0000 (13:16 +0100)
added patches:
iio-imu-adis16400-fix-memory-leak.patch
iio-imu-adis16400-release-allocated-memory-on-failure.patch
xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch
xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch
xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch

queue-4.4/iio-imu-adis16400-fix-memory-leak.patch [new file with mode: 0644]
queue-4.4/iio-imu-adis16400-release-allocated-memory-on-failure.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch [new file with mode: 0644]
queue-4.4/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch [new file with mode: 0644]
queue-4.4/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch [new file with mode: 0644]

diff --git a/queue-4.4/iio-imu-adis16400-fix-memory-leak.patch b/queue-4.4/iio-imu-adis16400-fix-memory-leak.patch
new file mode 100644 (file)
index 0000000..1cb1953
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Mon Mar 15 01:10:28 PM CET 2021
+From: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Date: Sat, 13 Mar 2021 18:29:50 +0100
+Subject: iio: imu: adis16400: fix memory leak
+To: stable@vger.kernel.org
+Cc: Navid Emamdoost <navid.emamdoost@gmail.com>, Alexandru Ardelean <alexandru.ardelean@analog.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Message-ID: <20210313172950.6224-2-krzysztof.kozlowski@canonical.com>
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit 9c0530e898f384c5d279bfcebd8bb17af1105873 upstream.
+
+In adis_update_scan_mode_burst, if adis->buffer allocation fails release
+the adis->xfer.
+
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Reviewed-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[krzk: backport applied to adis16400_buffer.c instead of adis_buffer.c]
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/adis16400_buffer.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/imu/adis16400_buffer.c
++++ b/drivers/iio/imu/adis16400_buffer.c
+@@ -37,8 +37,11 @@ int adis16400_update_scan_mode(struct ii
+               return -ENOMEM;
+       adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+-      if (!adis->buffer)
++      if (!adis->buffer) {
++              kfree(adis->xfer);
++              adis->xfer = NULL;
+               return -ENOMEM;
++      }
+       tx = adis->buffer + burst_length;
+       tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
diff --git a/queue-4.4/iio-imu-adis16400-release-allocated-memory-on-failure.patch b/queue-4.4/iio-imu-adis16400-release-allocated-memory-on-failure.patch
new file mode 100644 (file)
index 0000000..cfa7d77
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Mon Mar 15 01:10:28 PM CET 2021
+From: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Date: Sat, 13 Mar 2021 18:29:49 +0100
+Subject: iio: imu: adis16400: release allocated memory on failure
+To: stable@vger.kernel.org
+Cc: Navid Emamdoost <navid.emamdoost@gmail.com>, Alexandru Ardelean <alexandru.ardelean@analog.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Message-ID: <20210313172950.6224-1-krzysztof.kozlowski@canonical.com>
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit ab612b1daf415b62c58e130cb3d0f30b255a14d0 upstream.
+
+In adis_update_scan_mode, if allocation for adis->buffer fails,
+previously allocated adis->xfer needs to be released.
+
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Reviewed-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/adis_buffer.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/imu/adis_buffer.c
++++ b/drivers/iio/imu/adis_buffer.c
+@@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev
+               return -ENOMEM;
+       adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL);
+-      if (!adis->buffer)
++      if (!adis->buffer) {
++              kfree(adis->xfer);
++              adis->xfer = NULL;
+               return -ENOMEM;
++      }
+       rx = adis->buffer;
+       tx = rx + scan_count;
index 34469143de44fa78ae07a272de350d0ac3b64a28..3f9b961dd84f8d515ce8b8e544605a7b56f3bc00 100644 (file)
@@ -68,3 +68,8 @@ alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch
 powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch
 media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch
 kvm-arm64-fix-exclusive-limit-for-ipa-size.patch
+iio-imu-adis16400-release-allocated-memory-on-failure.patch
+iio-imu-adis16400-fix-memory-leak.patch
+xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch
+xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch
+xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch
diff --git a/queue-4.4/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch b/queue-4.4/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch
new file mode 100644 (file)
index 0000000..82a7466
--- /dev/null
@@ -0,0 +1,126 @@
+From foo@baz Mon Mar 15 01:16:07 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 15 Mar 2021 10:06:31 +0100
+Subject: xen/events: avoid handling the same event on two cpus at the same time
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b6622798bc50b625a1e62f82c7190df40c1f5b21 upstream.
+
+When changing the cpu affinity of an event it can happen today that
+(with some unlucky timing) the same event will be handled on the old
+and the new cpu at the same time.
+
+Avoid that by adding an "event active" flag to the per-event data and
+call the handler only if this flag isn't set.
+
+Cc: stable@vger.kernel.org
+Reported-by: Julien Grall <julien@xen.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Julien Grall <jgrall@amazon.com>
+Link: https://lore.kernel.org/r/20210306161833.4552-4-jgross@suse.com
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/events/events_base.c     |   25 +++++++++++++++++--------
+ drivers/xen/events/events_internal.h |    1 +
+ 2 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -702,6 +702,12 @@ static void xen_evtchn_close(unsigned in
+               BUG();
+ }
++static void event_handler_exit(struct irq_info *info)
++{
++      smp_store_release(&info->is_active, 0);
++      clear_evtchn(info->evtchn);
++}
++
+ static void pirq_query_unmask(int irq)
+ {
+       struct physdev_irq_status_query irq_status;
+@@ -732,13 +738,13 @@ static void eoi_pirq(struct irq_data *da
+           likely(!irqd_irq_disabled(data))) {
+               do_mask(info, EVT_MASK_REASON_TEMPORARY);
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+               irq_move_masked_irq(data);
+               do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       } else
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+       if (pirq_needs_eoi(data->irq)) {
+               rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
+@@ -1573,6 +1579,8 @@ void handle_irq_for_port(evtchn_port_t p
+       }
+       info = info_for_irq(irq);
++      if (xchg_acquire(&info->is_active, 1))
++              return;
+       if (ctrl->defer_eoi) {
+               info->eoi_cpu = smp_processor_id();
+@@ -1749,13 +1757,13 @@ static void ack_dynirq(struct irq_data *
+           likely(!irqd_irq_disabled(data))) {
+               do_mask(info, EVT_MASK_REASON_TEMPORARY);
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+               irq_move_masked_irq(data);
+               do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       } else
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+ }
+ static void mask_ack_dynirq(struct irq_data *data)
+@@ -1771,7 +1779,7 @@ static void lateeoi_ack_dynirq(struct ir
+       if (VALID_EVTCHN(evtchn)) {
+               do_mask(info, EVT_MASK_REASON_EOI_PENDING);
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+       }
+ }
+@@ -1782,7 +1790,7 @@ static void lateeoi_mask_ack_dynirq(stru
+       if (VALID_EVTCHN(evtchn)) {
+               do_mask(info, EVT_MASK_REASON_EXPLICIT);
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+       }
+ }
+@@ -1891,10 +1899,11 @@ static void restore_cpu_ipis(unsigned in
+ /* Clear an irq's pending state, in preparation for polling on it */
+ void xen_clear_irq_pending(int irq)
+ {
+-      int evtchn = evtchn_from_irq(irq);
++      struct irq_info *info = info_for_irq(irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
+       if (VALID_EVTCHN(evtchn))
+-              clear_evtchn(evtchn);
++              event_handler_exit(info);
+ }
+ EXPORT_SYMBOL(xen_clear_irq_pending);
+ void xen_set_irq_pending(int irq)
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -40,6 +40,7 @@ struct irq_info {
+ #define EVT_MASK_REASON_EXPLICIT      0x01
+ #define EVT_MASK_REASON_TEMPORARY     0x02
+ #define EVT_MASK_REASON_EOI_PENDING   0x04
++      u8 is_active;           /* Is event just being handled? */
+       unsigned irq;
+       unsigned int evtchn;    /* event channel */
+       unsigned short cpu;     /* cpu bound */
diff --git a/queue-4.4/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch b/queue-4.4/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch
new file mode 100644 (file)
index 0000000..c5c34b9
--- /dev/null
@@ -0,0 +1,368 @@
+From foo@baz Mon Mar 15 01:16:07 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 15 Mar 2021 09:55:36 +0100
+Subject: xen/events: don't unmask an event channel when an eoi is pending
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 25da4618af240fbec6112401498301a6f2bc9702 upstream.
+
+An event channel should be kept masked when an eoi is pending for it.
+When being migrated to another cpu it might be unmasked, though.
+
+In order to avoid this keep three different flags for each event channel
+to be able to distinguish "normal" masking/unmasking from eoi related
+masking/unmasking and temporary masking. The event channel should only
+be able to generate an interrupt if all flags are cleared.
+
+Cc: stable@vger.kernel.org
+Fixes: 54c9de89895e ("xen/events: add a new "late EOI" evtchn framework")
+Reported-by: Julien Grall <julien@xen.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Julien Grall <jgrall@amazon.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Tested-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Link: https://lore.kernel.org/r/20210306161833.4552-3-jgross@suse.com
+
+[boris -- corrected Fixed tag format]
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/events/events_2l.c       |    7 --
+ drivers/xen/events/events_base.c     |  108 ++++++++++++++++++++++++++---------
+ drivers/xen/events/events_fifo.c     |    7 --
+ drivers/xen/events/events_internal.h |   13 +---
+ 4 files changed, 87 insertions(+), 48 deletions(-)
+
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -75,12 +75,6 @@ static bool evtchn_2l_is_pending(unsigne
+       return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+ }
+-static bool evtchn_2l_test_and_set_mask(unsigned port)
+-{
+-      struct shared_info *s = HYPERVISOR_shared_info;
+-      return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
+-}
+-
+ static void evtchn_2l_mask(unsigned port)
+ {
+       struct shared_info *s = HYPERVISOR_shared_info;
+@@ -374,7 +368,6 @@ static const struct evtchn_ops evtchn_op
+       .clear_pending     = evtchn_2l_clear_pending,
+       .set_pending       = evtchn_2l_set_pending,
+       .is_pending        = evtchn_2l_is_pending,
+-      .test_and_set_mask = evtchn_2l_test_and_set_mask,
+       .mask              = evtchn_2l_mask,
+       .unmask            = evtchn_2l_unmask,
+       .handle_events     = evtchn_2l_handle_events,
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -99,6 +99,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
+  *   evtchn_rwlock
+  *     IRQ-desc lock
+  *       percpu eoi_list_lock
++ *         irq_info->lock
+  */
+ static LIST_HEAD(xen_irq_list_head);
+@@ -220,6 +221,8 @@ static int xen_irq_info_common_setup(str
+       info->irq = irq;
+       info->evtchn = evtchn;
+       info->cpu = cpu;
++      info->mask_reason = EVT_MASK_REASON_EXPLICIT;
++      spin_lock_init(&info->lock);
+       ret = set_evtchn_to_irq(evtchn, irq);
+       if (ret < 0)
+@@ -367,6 +370,34 @@ unsigned int cpu_from_evtchn(unsigned in
+       return ret;
+ }
++static void do_mask(struct irq_info *info, u8 reason)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&info->lock, flags);
++
++      if (!info->mask_reason)
++              mask_evtchn(info->evtchn);
++
++      info->mask_reason |= reason;
++
++      spin_unlock_irqrestore(&info->lock, flags);
++}
++
++static void do_unmask(struct irq_info *info, u8 reason)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&info->lock, flags);
++
++      info->mask_reason &= ~reason;
++
++      if (!info->mask_reason)
++              unmask_evtchn(info->evtchn);
++
++      spin_unlock_irqrestore(&info->lock, flags);
++}
++
+ #ifdef CONFIG_X86
+ static bool pirq_check_eoi_map(unsigned irq)
+ {
+@@ -502,7 +533,7 @@ static void xen_irq_lateeoi_locked(struc
+       }
+       info->eoi_time = 0;
+-      unmask_evtchn(evtchn);
++      do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
+ }
+ static void xen_irq_lateeoi_worker(struct work_struct *work)
+@@ -689,7 +720,8 @@ static void pirq_query_unmask(int irq)
+ static void eoi_pirq(struct irq_data *data)
+ {
+-      int evtchn = evtchn_from_irq(data->irq);
++      struct irq_info *info = info_for_irq(data->irq);
++      int evtchn = info ? info->evtchn : 0;
+       struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+       int rc = 0;
+@@ -698,14 +730,13 @@ static void eoi_pirq(struct irq_data *da
+       if (unlikely(irqd_is_setaffinity_pending(data)) &&
+           likely(!irqd_irq_disabled(data))) {
+-              int masked = test_and_set_mask(evtchn);
++              do_mask(info, EVT_MASK_REASON_TEMPORARY);
+               clear_evtchn(evtchn);
+               irq_move_masked_irq(data);
+-              if (!masked)
+-                      unmask_evtchn(evtchn);
++              do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       } else
+               clear_evtchn(evtchn);
+@@ -758,7 +789,8 @@ static unsigned int __startup_pirq(unsig
+               goto err;
+ out:
+-      unmask_evtchn(evtchn);
++      do_unmask(info, EVT_MASK_REASON_EXPLICIT);
++
+       eoi_pirq(irq_get_irq_data(irq));
+       return 0;
+@@ -785,7 +817,7 @@ static void shutdown_pirq(struct irq_dat
+       if (!VALID_EVTCHN(evtchn))
+               return;
+-      mask_evtchn(evtchn);
++      do_mask(info, EVT_MASK_REASON_EXPLICIT);
+       xen_evtchn_close(evtchn);
+       xen_irq_info_cleanup(info);
+ }
+@@ -1647,8 +1679,8 @@ void rebind_evtchn_irq(int evtchn, int i
+ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ {
+       struct evtchn_bind_vcpu bind_vcpu;
+-      int evtchn = evtchn_from_irq(irq);
+-      int masked;
++      struct irq_info *info = info_for_irq(irq);
++      int evtchn = info ? info->evtchn : 0;
+       if (!VALID_EVTCHN(evtchn))
+               return -1;
+@@ -1664,7 +1696,7 @@ static int rebind_irq_to_cpu(unsigned ir
+        * Mask the event while changing the VCPU binding to prevent
+        * it being delivered on an unexpected VCPU.
+        */
+-      masked = test_and_set_mask(evtchn);
++      do_mask(info, EVT_MASK_REASON_TEMPORARY);
+       /*
+        * If this fails, it usually just indicates that we're dealing with a
+@@ -1674,8 +1706,7 @@ static int rebind_irq_to_cpu(unsigned ir
+       if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+               bind_evtchn_to_cpu(evtchn, tcpu);
+-      if (!masked)
+-              unmask_evtchn(evtchn);
++      do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       return 0;
+ }
+@@ -1690,37 +1721,39 @@ static int set_affinity_irq(struct irq_d
+ static void enable_dynirq(struct irq_data *data)
+ {
+-      int evtchn = evtchn_from_irq(data->irq);
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
+       if (VALID_EVTCHN(evtchn))
+-              unmask_evtchn(evtchn);
++              do_unmask(info, EVT_MASK_REASON_EXPLICIT);
+ }
+ static void disable_dynirq(struct irq_data *data)
+ {
+-      int evtchn = evtchn_from_irq(data->irq);
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
+       if (VALID_EVTCHN(evtchn))
+-              mask_evtchn(evtchn);
++              do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ }
+ static void ack_dynirq(struct irq_data *data)
+ {
+-      int evtchn = evtchn_from_irq(data->irq);
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       if (unlikely(irqd_is_setaffinity_pending(data)) &&
+           likely(!irqd_irq_disabled(data))) {
+-              int masked = test_and_set_mask(evtchn);
++              do_mask(info, EVT_MASK_REASON_TEMPORARY);
+               clear_evtchn(evtchn);
+               irq_move_masked_irq(data);
+-              if (!masked)
+-                      unmask_evtchn(evtchn);
++              do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       } else
+               clear_evtchn(evtchn);
+ }
+@@ -1731,18 +1764,39 @@ static void mask_ack_dynirq(struct irq_d
+       ack_dynirq(data);
+ }
++static void lateeoi_ack_dynirq(struct irq_data *data)
++{
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
++
++      if (VALID_EVTCHN(evtchn)) {
++              do_mask(info, EVT_MASK_REASON_EOI_PENDING);
++              clear_evtchn(evtchn);
++      }
++}
++
++static void lateeoi_mask_ack_dynirq(struct irq_data *data)
++{
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
++
++      if (VALID_EVTCHN(evtchn)) {
++              do_mask(info, EVT_MASK_REASON_EXPLICIT);
++              clear_evtchn(evtchn);
++      }
++}
++
+ static int retrigger_dynirq(struct irq_data *data)
+ {
+-      unsigned int evtchn = evtchn_from_irq(data->irq);
+-      int masked;
++      struct irq_info *info = info_for_irq(data->irq);
++      evtchn_port_t evtchn = info ? info->evtchn : 0;
+       if (!VALID_EVTCHN(evtchn))
+               return 0;
+-      masked = test_and_set_mask(evtchn);
++      do_mask(info, EVT_MASK_REASON_TEMPORARY);
+       set_evtchn(evtchn);
+-      if (!masked)
+-              unmask_evtchn(evtchn);
++      do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+       return 1;
+ }
+@@ -1949,8 +2003,8 @@ static struct irq_chip xen_lateeoi_chip
+       .irq_mask               = disable_dynirq,
+       .irq_unmask             = enable_dynirq,
+-      .irq_ack                = mask_ack_dynirq,
+-      .irq_mask_ack           = mask_ack_dynirq,
++      .irq_ack                = lateeoi_ack_dynirq,
++      .irq_mask_ack           = lateeoi_mask_ack_dynirq,
+       .irq_set_affinity       = set_affinity_irq,
+       .irq_retrigger          = retrigger_dynirq,
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsig
+       return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
+ }
+-static bool evtchn_fifo_test_and_set_mask(unsigned port)
+-{
+-      event_word_t *word = event_word_from_port(port);
+-      return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+-}
+-
+ static void evtchn_fifo_mask(unsigned port)
+ {
+       event_word_t *word = event_word_from_port(port);
+@@ -421,7 +415,6 @@ static const struct evtchn_ops evtchn_op
+       .clear_pending     = evtchn_fifo_clear_pending,
+       .set_pending       = evtchn_fifo_set_pending,
+       .is_pending        = evtchn_fifo_is_pending,
+-      .test_and_set_mask = evtchn_fifo_test_and_set_mask,
+       .mask              = evtchn_fifo_mask,
+       .unmask            = evtchn_fifo_unmask,
+       .handle_events     = evtchn_fifo_handle_events,
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -35,13 +35,18 @@ struct irq_info {
+       struct list_head eoi_list;
+       short refcnt;
+       short spurious_cnt;
+-      enum xen_irq_type type; /* type */
++      short type;             /* type */
++      u8 mask_reason;         /* Why is event channel masked */
++#define EVT_MASK_REASON_EXPLICIT      0x01
++#define EVT_MASK_REASON_TEMPORARY     0x02
++#define EVT_MASK_REASON_EOI_PENDING   0x04
+       unsigned irq;
+       unsigned int evtchn;    /* event channel */
+       unsigned short cpu;     /* cpu bound */
+       unsigned short eoi_cpu; /* EOI must happen on this cpu */
+       unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+       u64 eoi_time;           /* Time in jiffies when to EOI. */
++      spinlock_t lock;
+       union {
+               unsigned short virq;
+@@ -73,7 +78,6 @@ struct evtchn_ops {
+       void (*clear_pending)(unsigned port);
+       void (*set_pending)(unsigned port);
+       bool (*is_pending)(unsigned port);
+-      bool (*test_and_set_mask)(unsigned port);
+       void (*mask)(unsigned port);
+       void (*unmask)(unsigned port);
+@@ -138,11 +142,6 @@ static inline bool test_evtchn(unsigned
+       return evtchn_ops->is_pending(port);
+ }
+-static inline bool test_and_set_mask(unsigned port)
+-{
+-      return evtchn_ops->test_and_set_mask(port);
+-}
+-
+ static inline void mask_evtchn(unsigned port)
+ {
+       return evtchn_ops->mask(port);
diff --git a/queue-4.4/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch b/queue-4.4/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch
new file mode 100644 (file)
index 0000000..1e4c40e
--- /dev/null
@@ -0,0 +1,108 @@
+From foo@baz Mon Mar 15 01:16:07 PM CET 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 15 Mar 2021 09:54:02 +0100
+Subject: xen/events: reset affinity of 2-level event when tearing it down
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 9e77d96b8e2724ed00380189f7b0ded61113b39f upstream.
+
+When creating a new event channel with 2-level events the affinity
+needs to be reset initially in order to avoid using an old affinity
+from earlier usage of the event channel port. So when tearing an event
+channel down reset all affinity bits.
+
+The same applies to the affinity when onlining a vcpu: all old
+affinity settings for this vcpu must be reset. As percpu events get
+initialized before the percpu event channel hook is called,
+resetting of the affinities happens after offlining a vcpu (this is
+working, as initial percpu memory is zeroed out).
+
+Cc: stable@vger.kernel.org
+Reported-by: Julien Grall <julien@xen.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Julien Grall <jgrall@amazon.com>
+Link: https://lore.kernel.org/r/20210306161833.4552-2-jgross@suse.com
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/events/events_2l.c       |   15 +++++++++++++++
+ drivers/xen/events/events_base.c     |    1 +
+ drivers/xen/events/events_internal.h |    8 ++++++++
+ 3 files changed, 24 insertions(+)
+
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -46,6 +46,11 @@ static unsigned evtchn_2l_max_channels(v
+       return EVTCHN_2L_NR_CHANNELS;
+ }
++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
++{
++      clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
++}
++
+ static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
+ {
+       clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
+@@ -353,9 +358,18 @@ static void evtchn_2l_resume(void)
+                               EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+ }
++static int evtchn_2l_percpu_deinit(unsigned int cpu)
++{
++      memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
++                      EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
++
++      return 0;
++}
++
+ static const struct evtchn_ops evtchn_ops_2l = {
+       .max_channels      = evtchn_2l_max_channels,
+       .nr_channels       = evtchn_2l_max_channels,
++      .remove            = evtchn_2l_remove,
+       .bind_to_cpu       = evtchn_2l_bind_to_cpu,
+       .clear_pending     = evtchn_2l_clear_pending,
+       .set_pending       = evtchn_2l_set_pending,
+@@ -365,6 +379,7 @@ static const struct evtchn_ops evtchn_op
+       .unmask            = evtchn_2l_unmask,
+       .handle_events     = evtchn_2l_handle_events,
+       .resume            = evtchn_2l_resume,
++      .percpu_deinit     = evtchn_2l_percpu_deinit,
+ };
+ void __init xen_evtchn_2l_init(void)
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -286,6 +286,7 @@ static int xen_irq_info_pirq_setup(unsig
+ static void xen_irq_info_cleanup(struct irq_info *info)
+ {
+       set_evtchn_to_irq(info->evtchn, -1);
++      xen_evtchn_port_remove(info->evtchn, info->cpu);
+       info->evtchn = 0;
+ }
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -67,6 +67,7 @@ struct evtchn_ops {
+       unsigned (*nr_channels)(void);
+       int (*setup)(struct irq_info *info);
++      void (*remove)(evtchn_port_t port, unsigned int cpu);
+       void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
+       void (*clear_pending)(unsigned port);
+@@ -109,6 +110,13 @@ static inline int xen_evtchn_port_setup(
+       return 0;
+ }
++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
++                                        unsigned int cpu)
++{
++      if (evtchn_ops->remove)
++              evtchn_ops->remove(evtchn, cpu);
++}
++
+ static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
+                                              unsigned cpu)
+ {