From: Greg Kroah-Hartman Date: Mon, 15 Mar 2021 12:16:44 +0000 (+0100) Subject: 4.9-stable patches X-Git-Tag: v4.4.262~9 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=1a3e8435206e0d6065245298ba3c753ebff05486;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: iio-imu-adis16400-fix-memory-leak.patch iio-imu-adis16400-release-allocated-memory-on-failure.patch xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch --- diff --git a/queue-4.9/iio-imu-adis16400-fix-memory-leak.patch b/queue-4.9/iio-imu-adis16400-fix-memory-leak.patch new file mode 100644 index 00000000000..34174f3b14d --- /dev/null +++ b/queue-4.9/iio-imu-adis16400-fix-memory-leak.patch @@ -0,0 +1,40 @@ +From foo@baz Mon Mar 15 01:10:25 PM CET 2021 +From: Krzysztof Kozlowski +Date: Sat, 13 Mar 2021 18:29:50 +0100 +Subject: iio: imu: adis16400: fix memory leak +To: stable@vger.kernel.org +Cc: Navid Emamdoost , Alexandru Ardelean , Jonathan Cameron , Krzysztof Kozlowski +Message-ID: <20210313172950.6224-2-krzysztof.kozlowski@canonical.com> + +From: Navid Emamdoost + +commit 9c0530e898f384c5d279bfcebd8bb17af1105873 upstream. + +In adis_update_scan_mode_burst, if adis->buffer allocation fails release +the adis->xfer. + +Signed-off-by: Navid Emamdoost +Reviewed-by: Alexandru Ardelean +Signed-off-by: Jonathan Cameron +[krzk: backport applied to adis16400_buffer.c instead of adis_buffer.c] +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/iio/imu/adis16400_buffer.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/iio/imu/adis16400_buffer.c ++++ b/drivers/iio/imu/adis16400_buffer.c +@@ -37,8 +37,11 @@ int adis16400_update_scan_mode(struct ii + return -ENOMEM; + + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + tx = adis->buffer + burst_length; + tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); diff --git a/queue-4.9/iio-imu-adis16400-release-allocated-memory-on-failure.patch b/queue-4.9/iio-imu-adis16400-release-allocated-memory-on-failure.patch new file mode 100644 index 00000000000..d81afe178a8 --- /dev/null +++ b/queue-4.9/iio-imu-adis16400-release-allocated-memory-on-failure.patch @@ -0,0 +1,41 @@ +From foo@baz Mon Mar 15 01:10:25 PM CET 2021 +From: Krzysztof Kozlowski +Date: Sat, 13 Mar 2021 18:29:49 +0100 +Subject: iio: imu: adis16400: release allocated memory on failure +To: stable@vger.kernel.org +Cc: Navid Emamdoost , Alexandru Ardelean , Jonathan Cameron , Krzysztof Kozlowski +Message-ID: <20210313172950.6224-1-krzysztof.kozlowski@canonical.com> + +From: Krzysztof Kozlowski + +From: Navid Emamdoost + +commit ab612b1daf415b62c58e130cb3d0f30b255a14d0 upstream. + +In adis_update_scan_mode, if allocation for adis->buffer fails, +previously allocated adis->xfer needs to be released. + +Signed-off-by: Navid Emamdoost +Reviewed-by: Alexandru Ardelean +Signed-off-by: Jonathan Cameron +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/iio/imu/adis_buffer.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev + return -ENOMEM; + + adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + rx = adis->buffer; + tx = rx + scan_count; diff --git a/queue-4.9/series b/queue-4.9/series index d2ee9b348e3..b290806f724 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -71,3 +71,8 @@ powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch hwmon-lm90-fix-max6658-sporadic-wrong-temperature-reading.patch kvm-arm64-fix-exclusive-limit-for-ipa-size.patch +iio-imu-adis16400-release-allocated-memory-on-failure.patch +iio-imu-adis16400-fix-memory-leak.patch +xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch +xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch +xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch diff --git a/queue-4.9/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch b/queue-4.9/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch new file mode 100644 index 00000000000..badca95eada --- /dev/null +++ b/queue-4.9/xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch @@ -0,0 +1,126 @@ +From foo@baz Mon Mar 15 01:16:05 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 10:06:31 +0100 +Subject: xen/events: avoid handling the same event on two cpus at the same time + +From: Juergen Gross + +commit b6622798bc50b625a1e62f82c7190df40c1f5b21 upstream. + +When changing the cpu affinity of an event it can happen today that +(with some unlucky timing) the same event will be handled on the old +and the new cpu at the same time. + +Avoid that by adding an "event active" flag to the per-event data and +call the handler only if this flag isn't set. + +Cc: stable@vger.kernel.org +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Link: https://lore.kernel.org/r/20210306161833.4552-4-jgross@suse.com +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_base.c | 25 +++++++++++++++++-------- + drivers/xen/events/events_internal.h | 1 + + 2 files changed, 18 insertions(+), 8 deletions(-) + +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -702,6 +702,12 @@ static void xen_evtchn_close(unsigned in + BUG(); + } + ++static void event_handler_exit(struct irq_info *info) ++{ ++ smp_store_release(&info->is_active, 0); ++ clear_evtchn(info->evtchn); ++} ++ + static void pirq_query_unmask(int irq) + { + struct physdev_irq_status_query irq_status; +@@ -732,13 +738,13 @@ static void eoi_pirq(struct irq_data *da + likely(!irqd_irq_disabled(data))) { + do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + + do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + if (pirq_needs_eoi(data->irq)) { + rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); +@@ -1574,6 +1580,8 @@ void handle_irq_for_port(evtchn_port_t p + } + + info = info_for_irq(irq); ++ if (xchg_acquire(&info->is_active, 1)) ++ return; + + if (ctrl->defer_eoi) { + info->eoi_cpu = smp_processor_id(); +@@ -1750,13 +1758,13 @@ static void ack_dynirq(struct irq_data * + likely(!irqd_irq_disabled(data))) { + do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + + do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + + static void mask_ack_dynirq(struct irq_data *data) +@@ -1772,7 +1780,7 @@ static void lateeoi_ack_dynirq(struct ir + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + } + +@@ -1783,7 +1791,7 @@ static void lateeoi_mask_ack_dynirq(stru + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + } + +@@ -1892,10 +1900,11 @@ static void restore_cpu_ipis(unsigned in + /* Clear an irq's pending state, in preparation for polling on it */ + void xen_clear_irq_pending(int irq) + { +- int evtchn = evtchn_from_irq(irq); ++ struct irq_info *info = info_for_irq(irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + EXPORT_SYMBOL(xen_clear_irq_pending); + void xen_set_irq_pending(int irq) +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -40,6 +40,7 @@ struct irq_info { + #define EVT_MASK_REASON_EXPLICIT 0x01 + #define EVT_MASK_REASON_TEMPORARY 0x02 + #define EVT_MASK_REASON_EOI_PENDING 0x04 ++ u8 is_active; /* Is event just being handled? */ + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ diff --git a/queue-4.9/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch b/queue-4.9/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch new file mode 100644 index 00000000000..32a7640eda5 --- /dev/null +++ b/queue-4.9/xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch @@ -0,0 +1,368 @@ +From foo@baz Mon Mar 15 01:16:05 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 09:55:36 +0100 +Subject: xen/events: don't unmask an event channel when an eoi is pending + +From: Juergen Gross + +commit 25da4618af240fbec6112401498301a6f2bc9702 upstream. + +An event channel should be kept masked when an eoi is pending for it. +When being migrated to another cpu it might be unmasked, though. + +In order to avoid this keep three different flags for each event channel +to be able to distinguish "normal" masking/unmasking from eoi related +masking/unmasking and temporary masking. The event channel should only +be able to generate an interrupt if all flags are cleared. + +Cc: stable@vger.kernel.org +Fixes: 54c9de89895e ("xen/events: add a new "late EOI" evtchn framework") +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Reviewed-by: Boris Ostrovsky +Tested-by: Ross Lagerwall +Link: https://lore.kernel.org/r/20210306161833.4552-3-jgross@suse.com + +[boris -- corrected Fixed tag format] + +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_2l.c | 7 -- + drivers/xen/events/events_base.c | 108 ++++++++++++++++++++++++++--------- + drivers/xen/events/events_fifo.c | 7 -- + drivers/xen/events/events_internal.h | 13 +--- + 4 files changed, 87 insertions(+), 48 deletions(-) + +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -75,12 +75,6 @@ static bool evtchn_2l_is_pending(unsigne + return sync_test_bit(port, BM(&s->evtchn_pending[0])); + } + +-static bool evtchn_2l_test_and_set_mask(unsigned port) +-{ +- struct shared_info *s = HYPERVISOR_shared_info; +- return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); +-} +- + static void evtchn_2l_mask(unsigned port) + { + struct shared_info *s = HYPERVISOR_shared_info; +@@ -374,7 +368,6 @@ static const struct evtchn_ops evtchn_op + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, + .is_pending = evtchn_2l_is_pending, +- .test_and_set_mask = evtchn_2l_test_and_set_mask, + .mask = evtchn_2l_mask, + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -99,6 +99,7 @@ static DEFINE_RWLOCK(evtchn_rwlock); + * evtchn_rwlock + * IRQ-desc lock + * percpu eoi_list_lock ++ * irq_info->lock + */ + + static LIST_HEAD(xen_irq_list_head); +@@ -220,6 +221,8 @@ static int xen_irq_info_common_setup(str + info->irq = irq; + info->evtchn = evtchn; + info->cpu = cpu; ++ info->mask_reason = EVT_MASK_REASON_EXPLICIT; ++ spin_lock_init(&info->lock); + + ret = set_evtchn_to_irq(evtchn, irq); + if (ret < 0) +@@ -367,6 +370,34 @@ unsigned int cpu_from_evtchn(unsigned in + return ret; + } + ++static void do_mask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ if (!info->mask_reason) ++ mask_evtchn(info->evtchn); ++ ++ info->mask_reason |= reason; ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++static void do_unmask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ info->mask_reason &= ~reason; ++ ++ if (!info->mask_reason) ++ unmask_evtchn(info->evtchn); ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ + #ifdef CONFIG_X86 + static bool pirq_check_eoi_map(unsigned irq) + { +@@ -502,7 +533,7 @@ static void xen_irq_lateeoi_locked(struc + } + + info->eoi_time = 0; +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EOI_PENDING); + } + + static void xen_irq_lateeoi_worker(struct work_struct *work) +@@ -689,7 +720,8 @@ static void pirq_query_unmask(int irq) + + static void eoi_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ int evtchn = info ? info->evtchn : 0; + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + int rc = 0; + +@@ -698,14 +730,13 @@ static void eoi_pirq(struct irq_data *da + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else + clear_evtchn(evtchn); + +@@ -758,7 +789,8 @@ static unsigned int __startup_pirq(unsig + goto err; + + out: +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); ++ + eoi_pirq(irq_get_irq_data(irq)); + + return 0; +@@ -785,7 +817,7 @@ static void shutdown_pirq(struct irq_dat + if (!VALID_EVTCHN(evtchn)) + return; + +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + xen_evtchn_close(evtchn); + xen_irq_info_cleanup(info); + } +@@ -1648,8 +1680,8 @@ void rebind_evtchn_irq(int evtchn, int i + static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) + { + struct evtchn_bind_vcpu bind_vcpu; +- int evtchn = evtchn_from_irq(irq); +- int masked; ++ struct irq_info *info = info_for_irq(irq); ++ int evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return -1; +@@ -1665,7 +1697,7 @@ static int rebind_irq_to_cpu(unsigned ir + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + /* + * If this fails, it usually just indicates that we're dealing with a +@@ -1675,8 +1707,7 @@ static int rebind_irq_to_cpu(unsigned ir + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + bind_evtchn_to_cpu(evtchn, tcpu); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 0; + } +@@ -1691,37 +1722,39 @@ static int set_affinity_irq(struct irq_d + + static void enable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void disable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void ack_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return; + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else + clear_evtchn(evtchn); + } +@@ -1732,18 +1765,39 @@ static void mask_ack_dynirq(struct irq_d + ack_dynirq(data); + } + ++static void lateeoi_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EOI_PENDING); ++ clear_evtchn(evtchn); ++ } ++} ++ ++static void lateeoi_mask_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); ++ clear_evtchn(evtchn); ++ } ++} ++ + static int retrigger_dynirq(struct irq_data *data) + { +- unsigned int evtchn = evtchn_from_irq(data->irq); +- int masked; ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return 0; + +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + set_evtchn(evtchn); +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 1; + } +@@ -1950,8 +2004,8 @@ static struct irq_chip xen_lateeoi_chip + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, + +- .irq_ack = mask_ack_dynirq, +- .irq_mask_ack = mask_ack_dynirq, ++ .irq_ack = lateeoi_ack_dynirq, ++ .irq_mask_ack = lateeoi_mask_ack_dynirq, + + .irq_set_affinity = set_affinity_irq, + .irq_retrigger = retrigger_dynirq, +--- a/drivers/xen/events/events_fifo.c ++++ b/drivers/xen/events/events_fifo.c +@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsig + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); + } + +-static bool evtchn_fifo_test_and_set_mask(unsigned port) +-{ +- event_word_t *word = event_word_from_port(port); +- return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +-} +- + static void evtchn_fifo_mask(unsigned port) + { + event_word_t *word = event_word_from_port(port); +@@ -421,7 +415,6 @@ static const struct evtchn_ops evtchn_op + .clear_pending = evtchn_fifo_clear_pending, + .set_pending = evtchn_fifo_set_pending, + .is_pending = evtchn_fifo_is_pending, +- .test_and_set_mask = evtchn_fifo_test_and_set_mask, + .mask = evtchn_fifo_mask, + .unmask = evtchn_fifo_unmask, + .handle_events = evtchn_fifo_handle_events, +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -35,13 +35,18 @@ struct irq_info { + struct list_head eoi_list; + short refcnt; + short spurious_cnt; +- enum xen_irq_type type; /* type */ ++ short type; /* type */ ++ u8 mask_reason; /* Why is event channel masked */ ++#define EVT_MASK_REASON_EXPLICIT 0x01 ++#define EVT_MASK_REASON_TEMPORARY 0x02 ++#define EVT_MASK_REASON_EOI_PENDING 0x04 + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ + unsigned short eoi_cpu; /* EOI must happen on this cpu */ + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ + u64 eoi_time; /* Time in jiffies when to EOI. */ ++ spinlock_t lock; + + union { + unsigned short virq; +@@ -73,7 +78,6 @@ struct evtchn_ops { + void (*clear_pending)(unsigned port); + void (*set_pending)(unsigned port); + bool (*is_pending)(unsigned port); +- bool (*test_and_set_mask)(unsigned port); + void (*mask)(unsigned port); + void (*unmask)(unsigned port); + +@@ -138,11 +142,6 @@ static inline bool test_evtchn(unsigned + return evtchn_ops->is_pending(port); + } + +-static inline bool test_and_set_mask(unsigned port) +-{ +- return evtchn_ops->test_and_set_mask(port); +-} +- + static inline void mask_evtchn(unsigned port) + { + return evtchn_ops->mask(port); diff --git a/queue-4.9/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch b/queue-4.9/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch new file mode 100644 index 00000000000..8d0814fc133 --- /dev/null +++ b/queue-4.9/xen-events-reset-affinity-of-2-level-event-when-tearing-it-down.patch @@ -0,0 +1,108 @@ +From foo@baz Mon Mar 15 01:16:05 PM CET 2021 +From: Juergen Gross +Date: Mon, 15 Mar 2021 09:54:02 +0100 +Subject: xen/events: reset affinity of 2-level event when tearing it down + +From: Juergen Gross + +commit 9e77d96b8e2724ed00380189f7b0ded61113b39f upstream. + +When creating a new event channel with 2-level events the affinity +needs to be reset initially in order to avoid using an old affinity +from earlier usage of the event channel port. So when tearing an event +channel down reset all affinity bits. + +The same applies to the affinity when onlining a vcpu: all old +affinity settings for this vcpu must be reset. As percpu events get +initialized before the percpu event channel hook is called, +resetting of the affinities happens after offlining a vcpu (this is +working, as initial percpu memory is zeroed out). + +Cc: stable@vger.kernel.org +Reported-by: Julien Grall +Signed-off-by: Juergen Gross +Reviewed-by: Julien Grall +Link: https://lore.kernel.org/r/20210306161833.4552-2-jgross@suse.com +Signed-off-by: Boris Ostrovsky +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/events/events_2l.c | 15 +++++++++++++++ + drivers/xen/events/events_base.c | 1 + + drivers/xen/events/events_internal.h | 8 ++++++++ + 3 files changed, 24 insertions(+) + +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -46,6 +46,11 @@ static unsigned evtchn_2l_max_channels(v + return EVTCHN_2L_NR_CHANNELS; + } + ++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) ++{ ++ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); ++} ++ + static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) + { + clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); +@@ -353,9 +358,18 @@ static void evtchn_2l_resume(void) + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + } + ++static int evtchn_2l_percpu_deinit(unsigned int cpu) ++{ ++ memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * ++ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); ++ ++ return 0; ++} ++ + static const struct evtchn_ops evtchn_ops_2l = { + .max_channels = evtchn_2l_max_channels, + .nr_channels = evtchn_2l_max_channels, ++ .remove = evtchn_2l_remove, + .bind_to_cpu = evtchn_2l_bind_to_cpu, + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, +@@ -365,6 +379,7 @@ static const struct evtchn_ops evtchn_op + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, ++ .percpu_deinit = evtchn_2l_percpu_deinit, + }; + + void __init xen_evtchn_2l_init(void) +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -286,6 +286,7 @@ static int xen_irq_info_pirq_setup(unsig + static void xen_irq_info_cleanup(struct irq_info *info) + { + set_evtchn_to_irq(info->evtchn, -1); ++ xen_evtchn_port_remove(info->evtchn, info->cpu); + info->evtchn = 0; + } + +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -67,6 +67,7 @@ struct evtchn_ops { + unsigned (*nr_channels)(void); + + int (*setup)(struct irq_info *info); ++ void (*remove)(evtchn_port_t port, unsigned int cpu); + void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); + + void (*clear_pending)(unsigned port); +@@ -109,6 +110,13 @@ static inline int xen_evtchn_port_setup( + return 0; + } + ++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, ++ unsigned int cpu) ++{ ++ if (evtchn_ops->remove) ++ evtchn_ops->remove(evtchn, cpu); ++} ++ + static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, + unsigned cpu) + {