From a27b24767f1993c288614c7fc453ad2e1700e1ad Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2021 13:17:02 +0100 Subject: [PATCH] 4.14-stable patches added patches: iio-imu-adis16400-release-allocated-memory-on-failure.patch xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch --- ...-release-allocated-memory-on-failure.patch | 39 ++ queue-4.14/series | 4 + ...e-event-on-two-cpus-at-the-same-time.patch | 126 ++++++ ...event-channel-when-an-eoi-is-pending.patch | 379 ++++++++++++++++++ ...f-2-level-event-when-tearing-it-down.patch | 108 +++++ 5 files changed, 656 insertions(+) create mode 100644 queue-4.14/iio-imu-adis16400-release-allocated-memory-on-failure.patch create mode 100644 queue-4.14/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch create mode 100644 queue-4.14/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch create mode 100644 queue-4.14/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch diff --git a/queue-4.14/iio-imu-adis16400-release-allocated-memory-on-failure.patch b/queue-4.14/iio-imu-adis16400-release-allocated-memory-on-failure.patch new file mode 100644 index 00000000000..190c7fe0c2c --- /dev/null +++ b/queue-4.14/iio-imu-adis16400-release-allocated-memory-on-failure.patch @@ -0,0 +1,39 @@ +From foo@baz Mon Mar 15 01:10:05 PM CET 2021 +From: Krzysztof Kozlowski +Date: Sat, 13 Mar 2021 18:29:49 +0100 +Subject: iio: imu: adis16400: release allocated memory on failure +To: stable@vger.kernel.org +Cc: Navid Emamdoost , Alexandru Ardelean , Jonathan Cameron , Krzysztof Kozlowski +Message-ID: <20210313172950.6224-1-krzysztof.kozlowski@canonical.com> + +From: Navid Emamdoost + +commit ab612b1daf415b62c58e130cb3d0f30b255a14d0 upstream. + +In adis_update_scan_mode, if allocation for adis->buffer fails, +previously allocated adis->xfer needs to be released. + +Signed-off-by: Navid Emamdoost +Reviewed-by: Alexandru Ardelean +Signed-off-by: Jonathan Cameron +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/iio/imu/adis_buffer.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev + return -ENOMEM; + + adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + rx = adis->buffer; + tx = rx + scan_count; diff --git a/queue-4.14/series b/queue-4.14/series index 0f39fc8e1a2..1003a174f9b 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -89,3 +89,7 @@ powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch hwmon-lm90-fix-max6658-sporadic-wrong-temperature-reading.patch kvm-arm64-fix-exclusive-limit-for-ipa-size.patch +iio-imu-adis16400-release-allocated-memory-on-failure.patch +xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch +xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch +xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch diff --git a/queue-4.14/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch b/queue-4.14/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch new file mode 100644 index 00000000000..5ab0c0e1589 --- /dev/null +++ b/queue-4.14/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch @@ -0,0 +1,126 @@ +From foo@baz Mon Mar 15 01:15:16 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 09:22:38 +0100 +Subject: xen/events: avoid handling the same event on two cpus at the same time + +From: Juergen Gross + +commit b6622798bc50b625a1e62f82c7190df40c1f5b21 upstream. + +When changing the cpu affinity of an event it can happen today that +(with some unlucky timing) the same event will be handled on the old +and the new cpu at the same time. + +Avoid that by adding an "event active" flag to the per-event data and +call the handler only if this flag isn't set. + +Cc: stable@vger.kernel.org +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Link: https://lore.kernel.org/r/20210306161833.4552-4-jgross@suse.com +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_base.c | 25 +++++++++++++++++-------- + drivers/xen/events/events_internal.h | 1 + + 2 files changed, 18 insertions(+), 8 deletions(-) + +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -693,6 +693,12 @@ static void xen_evtchn_close(unsigned in + BUG(); + } + ++static void event_handler_exit(struct irq_info *info) ++{ ++ smp_store_release(&info->is_active, 0); ++ clear_evtchn(info->evtchn); ++} ++ + static void pirq_query_unmask(int irq) + { + struct physdev_irq_status_query irq_status; +@@ -723,13 +729,13 @@ static void eoi_pirq(struct irq_data *da + likely(!irqd_irq_disabled(data))) { + do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + + do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + if (pirq_needs_eoi(data->irq)) { + rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); +@@ -1565,6 +1571,8 @@ void handle_irq_for_port(evtchn_port_t p + } + + info = info_for_irq(irq); ++ if (xchg_acquire(&info->is_active, 1)) ++ return; + + if (ctrl->defer_eoi) { + info->eoi_cpu = smp_processor_id(); +@@ -1752,13 +1760,13 @@ static void ack_dynirq(struct irq_data * + likely(!irqd_irq_disabled(data))) { + do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + + do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + + static void mask_ack_dynirq(struct irq_data *data) +@@ -1774,7 +1782,7 @@ static void lateeoi_ack_dynirq(struct ir + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + } + +@@ -1785,7 +1793,7 @@ static void lateeoi_mask_ack_dynirq(stru + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + } + +@@ -1894,10 +1902,11 @@ static void restore_cpu_ipis(unsigned in + /* Clear an irq's pending state, in preparation for polling on it */ + void xen_clear_irq_pending(int irq) + { +- int evtchn = evtchn_from_irq(irq); ++ struct irq_info *info = info_for_irq(irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + EXPORT_SYMBOL(xen_clear_irq_pending); + void xen_set_irq_pending(int irq) +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -40,6 +40,7 @@ struct irq_info { + #define EVT_MASK_REASON_EXPLICIT 0x01 + #define EVT_MASK_REASON_TEMPORARY 0x02 + #define EVT_MASK_REASON_EOI_PENDING 0x04 ++ u8 is_active; /* Is event just being handled? */ + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ diff --git a/queue-4.14/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch b/queue-4.14/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch new file mode 100644 index 00000000000..db7a8370fd5 --- /dev/null +++ b/queue-4.14/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch @@ -0,0 +1,379 @@ +From foo@baz Mon Mar 15 01:15:16 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 09:22:37 +0100 +Subject: xen/events: don't unmask an event channel when an eoi is pending + +From: Juergen Gross + +commit 25da4618af240fbec6112401498301a6f2bc9702 upstream. + +An event channel should be kept masked when an eoi is pending for it. +When being migrated to another cpu it might be unmasked, though. + +In order to avoid this keep three different flags for each event channel +to be able to distinguish "normal" masking/unmasking from eoi related +masking/unmasking and temporary masking. The event channel should only +be able to generate an interrupt if all flags are cleared. + +Cc: stable@vger.kernel.org +Fixes: 54c9de89895e ("xen/events: add a new "late EOI" evtchn framework") +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Reviewed-by: Boris Ostrovsky +Tested-by: Ross Lagerwall +Link: https://lore.kernel.org/r/20210306161833.4552-3-jgross@suse.com + +[boris -- corrected Fixed tag format] + +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_2l.c | 7 -- + drivers/xen/events/events_base.c | 110 ++++++++++++++++++++++++++--------- + drivers/xen/events/events_fifo.c | 7 -- + drivers/xen/events/events_internal.h | 13 +--- + 4 files changed, 88 insertions(+), 49 deletions(-) + +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -76,12 +76,6 @@ static bool evtchn_2l_is_pending(unsigne + return sync_test_bit(port, BM(&s->evtchn_pending[0])); + } + +-static bool evtchn_2l_test_and_set_mask(unsigned port) +-{ +- struct shared_info *s = HYPERVISOR_shared_info; +- return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); +-} +- + static void evtchn_2l_mask(unsigned port) + { + struct shared_info *s = HYPERVISOR_shared_info; +@@ -375,7 +369,6 @@ static const struct evtchn_ops evtchn_op + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, + .is_pending = evtchn_2l_is_pending, +- .test_and_set_mask = evtchn_2l_test_and_set_mask, + .mask = evtchn_2l_mask, + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -98,6 +98,7 @@ static DEFINE_RWLOCK(evtchn_rwlock); + * evtchn_rwlock + * IRQ-desc lock + * percpu eoi_list_lock ++ * irq_info->lock + */ + + static LIST_HEAD(xen_irq_list_head); +@@ -219,6 +220,8 @@ static int xen_irq_info_common_setup(str + info->irq = irq; + info->evtchn = evtchn; + info->cpu = cpu; ++ info->mask_reason = EVT_MASK_REASON_EXPLICIT; ++ spin_lock_init(&info->lock); + + ret = set_evtchn_to_irq(evtchn, irq); + if (ret < 0) +@@ -366,6 +369,34 @@ unsigned int cpu_from_evtchn(unsigned in + return ret; + } + ++static void do_mask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ if (!info->mask_reason) ++ mask_evtchn(info->evtchn); ++ ++ info->mask_reason |= reason; ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++static void do_unmask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ info->mask_reason &= ~reason; ++ ++ if (!info->mask_reason) ++ unmask_evtchn(info->evtchn); ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ + #ifdef CONFIG_X86 + static bool pirq_check_eoi_map(unsigned irq) + { +@@ -493,7 +524,7 @@ static void xen_irq_lateeoi_locked(struc + } + + info->eoi_time = 0; +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EOI_PENDING); + } + + static void xen_irq_lateeoi_worker(struct work_struct *work) +@@ -680,7 +711,8 @@ static void pirq_query_unmask(int irq) + + static void eoi_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ int evtchn = info ? info->evtchn : 0; + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + int rc = 0; + +@@ -689,14 +721,13 @@ static void eoi_pirq(struct irq_data *da + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else + clear_evtchn(evtchn); + +@@ -749,7 +780,8 @@ static unsigned int __startup_pirq(unsig + goto err; + + out: +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); ++ + eoi_pirq(irq_get_irq_data(irq)); + + return 0; +@@ -776,7 +808,7 @@ static void shutdown_pirq(struct irq_dat + if (!VALID_EVTCHN(evtchn)) + return; + +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + xen_evtchn_close(evtchn); + xen_irq_info_cleanup(info); + } +@@ -1635,10 +1667,10 @@ void rebind_evtchn_irq(int evtchn, int i + } + + /* Rebind an evtchn so that it gets delivered to a specific cpu */ +-static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) ++static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) + { + struct evtchn_bind_vcpu bind_vcpu; +- int masked; ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return -1; +@@ -1654,7 +1686,7 @@ static int xen_rebind_evtchn_to_cpu(int + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + /* + * If this fails, it usually just indicates that we're dealing with a +@@ -1664,8 +1696,7 @@ static int xen_rebind_evtchn_to_cpu(int + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + bind_evtchn_to_cpu(evtchn, tcpu); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 0; + } +@@ -1674,7 +1705,7 @@ static int set_affinity_irq(struct irq_d + bool force) + { + unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); +- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); ++ int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); + + if (!ret) + irq_data_update_effective_affinity(data, cpumask_of(tcpu)); +@@ -1693,37 +1724,39 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtch + + static void enable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void disable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void ack_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return; + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else + clear_evtchn(evtchn); + } +@@ -1734,18 +1767,39 @@ static void mask_ack_dynirq(struct irq_d + ack_dynirq(data); + } + ++static void lateeoi_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EOI_PENDING); ++ clear_evtchn(evtchn); ++ } ++} ++ ++static void lateeoi_mask_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); ++ clear_evtchn(evtchn); ++ } ++} ++ + static int retrigger_dynirq(struct irq_data *data) + { +- unsigned int evtchn = evtchn_from_irq(data->irq); +- int masked; ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return 0; + +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + set_evtchn(evtchn); +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 1; + } +@@ -1951,8 +2005,8 @@ static struct irq_chip xen_lateeoi_chip + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, + +- .irq_ack = mask_ack_dynirq, +- .irq_mask_ack = mask_ack_dynirq, ++ .irq_ack = lateeoi_ack_dynirq, ++ .irq_mask_ack = lateeoi_mask_ack_dynirq, + + .irq_set_affinity = set_affinity_irq, + .irq_retrigger = retrigger_dynirq, +--- a/drivers/xen/events/events_fifo.c ++++ b/drivers/xen/events/events_fifo.c +@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsig + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); + } + +-static bool evtchn_fifo_test_and_set_mask(unsigned port) +-{ +- event_word_t *word = event_word_from_port(port); +- return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +-} +- + static void evtchn_fifo_mask(unsigned port) + { + event_word_t *word = event_word_from_port(port); +@@ -420,7 +414,6 @@ static const struct evtchn_ops evtchn_op + .clear_pending = evtchn_fifo_clear_pending, + .set_pending = evtchn_fifo_set_pending, + .is_pending = evtchn_fifo_is_pending, +- .test_and_set_mask = evtchn_fifo_test_and_set_mask, + .mask = evtchn_fifo_mask, + .unmask = evtchn_fifo_unmask, + .handle_events = evtchn_fifo_handle_events, +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -35,13 +35,18 @@ struct irq_info { + struct list_head eoi_list; + short refcnt; + short spurious_cnt; +- enum xen_irq_type type; /* type */ ++ short type; /* type */ ++ u8 mask_reason; /* Why is event channel masked */ ++#define EVT_MASK_REASON_EXPLICIT 0x01 ++#define EVT_MASK_REASON_TEMPORARY 0x02 ++#define EVT_MASK_REASON_EOI_PENDING 0x04 + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ + unsigned short eoi_cpu; /* EOI must happen on this cpu */ + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ + u64 eoi_time; /* Time in jiffies when to EOI. */ ++ spinlock_t lock; + + union { + unsigned short virq; +@@ -73,7 +78,6 @@ struct evtchn_ops { + void (*clear_pending)(unsigned port); + void (*set_pending)(unsigned port); + bool (*is_pending)(unsigned port); +- bool (*test_and_set_mask)(unsigned port); + void (*mask)(unsigned port); + void (*unmask)(unsigned port); + +@@ -138,11 +142,6 @@ static inline bool test_evtchn(unsigned + return evtchn_ops->is_pending(port); + } + +-static inline bool test_and_set_mask(unsigned port) +-{ +- return evtchn_ops->test_and_set_mask(port); +-} +- + static inline void mask_evtchn(unsigned port) + { + return evtchn_ops->mask(port); diff --git a/queue-4.14/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch b/queue-4.14/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch new file mode 100644 index 00000000000..2054b26151c --- /dev/null +++ b/queue-4.14/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch @@ -0,0 +1,108 @@ +From foo@baz Mon Mar 15 01:15:16 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 09:22:37 +0100 +Subject: xen/events: reset affinity of 2-level event when tearing it down + +From: Juergen Gross + +commit 9e77d96b8e2724ed00380189f7b0ded61113b39f upstream. + +When creating a new event channel with 2-level events the affinity +needs to be reset initially in order to avoid using an old affinity +from earlier usage of the event channel port. So when tearing an event +channel down reset all affinity bits. + +The same applies to the affinity when onlining a vcpu: all old +affinity settings for this vcpu must be reset. As percpu events get +initialized before the percpu event channel hook is called, +resetting of the affinities happens after offlining a vcpu (this is +working, as initial percpu memory is zeroed out). + +Cc: stable@vger.kernel.org +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Link: https://lore.kernel.org/r/20210306161833.4552-2-jgross@suse.com +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_2l.c | 15 +++++++++++++++ + drivers/xen/events/events_base.c | 1 + + drivers/xen/events/events_internal.h | 8 ++++++++ + 3 files changed, 24 insertions(+) + +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(v + return EVTCHN_2L_NR_CHANNELS; + } + ++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) ++{ ++ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); ++} ++ + static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) + { + clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); +@@ -354,9 +359,18 @@ static void evtchn_2l_resume(void) + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + } + ++static int evtchn_2l_percpu_deinit(unsigned int cpu) ++{ ++ memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * ++ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); ++ ++ return 0; ++} ++ + static const struct evtchn_ops evtchn_ops_2l = { + .max_channels = evtchn_2l_max_channels, + .nr_channels = evtchn_2l_max_channels, ++ .remove = evtchn_2l_remove, + .bind_to_cpu = evtchn_2l_bind_to_cpu, + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, +@@ -366,6 +380,7 @@ static const struct evtchn_ops evtchn_op + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, ++ .percpu_deinit = evtchn_2l_percpu_deinit, + }; + + void __init xen_evtchn_2l_init(void) +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -285,6 +285,7 @@ static int xen_irq_info_pirq_setup(unsig + static void xen_irq_info_cleanup(struct irq_info *info) + { + set_evtchn_to_irq(info->evtchn, -1); ++ xen_evtchn_port_remove(info->evtchn, info->cpu); + info->evtchn = 0; + } + +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -67,6 +67,7 @@ struct evtchn_ops { + unsigned (*nr_channels)(void); + + int (*setup)(struct irq_info *info); ++ void (*remove)(evtchn_port_t port, unsigned int cpu); + void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); + + void (*clear_pending)(unsigned port); +@@ -109,6 +110,13 @@ static inline int xen_evtchn_port_setup( + return 0; + } + ++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, ++ unsigned int cpu) ++{ ++ if (evtchn_ops->remove) ++ evtchn_ops->remove(evtchn, cpu); ++} ++ + static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, + unsigned cpu) + { -- 2.47.3