From: Greg Kroah-Hartman Date: Sat, 30 Mar 2024 08:59:19 +0000 (+0100) Subject: 5.15-stable patches X-Git-Tag: v6.7.12~130 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a0c20d6037b1d2abf1fd321fadeb53eecabcd8f2;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: selftests-mptcp-diag-return-ksft_fail-not-test_cnt.patch vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch vfio-pci-create-persistent-intx-handler.patch vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch vfio-pci-lock-external-intx-masking-ops.patch vfio-platform-create-persistent-irq-handlers.patch --- diff --git a/queue-5.15/selftests-mptcp-diag-return-ksft_fail-not-test_cnt.patch b/queue-5.15/selftests-mptcp-diag-return-ksft_fail-not-test_cnt.patch new file mode 100644 index 00000000000..4726cc67efd --- /dev/null +++ b/queue-5.15/selftests-mptcp-diag-return-ksft_fail-not-test_cnt.patch @@ -0,0 +1,52 @@ +From 45bcc0346561daa3f59e19a753cc7f3e08e8dff1 Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Fri, 1 Mar 2024 18:11:22 +0100 +Subject: selftests: mptcp: diag: return KSFT_FAIL not test_cnt + +From: Geliang Tang + +commit 45bcc0346561daa3f59e19a753cc7f3e08e8dff1 upstream. + +The test counter 'test_cnt' should not be returned in diag.sh, e.g. what +if only the 4th test fail? Will do 'exit 4' which is 'exit ${KSFT_SKIP}', +the whole test will be marked as skipped instead of 'failed'! + +So we should do ret=${KSFT_FAIL} instead. + +Fixes: df62f2ec3df6 ("selftests/mptcp: add diag interface tests") +Cc: stable@vger.kernel.org +Fixes: 42fb6cddec3b ("selftests: mptcp: more stable diag tests") +Signed-off-by: Geliang Tang +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Signed-off-by: David S. Miller +Signed-off-by: Matthieu Baerts (NGI0) +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/net/mptcp/diag.sh | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/tools/testing/selftests/net/mptcp/diag.sh ++++ b/tools/testing/selftests/net/mptcp/diag.sh +@@ -53,7 +53,7 @@ __chk_nr() + printf "%-50s" "$msg" + if [ $nr != $expected ]; then + echo "[ fail ] expected $expected found $nr" +- ret=$test_cnt ++ ret=${KSFT_FAIL} + else + echo "[ ok ]" + fi +@@ -88,10 +88,10 @@ wait_msk_nr() + printf "%-50s" "$msg" + if [ $i -ge $timeout ]; then + echo "[ fail ] timeout while expecting $expected max $max last $nr" +- ret=$test_cnt ++ ret=${KSFT_FAIL} + elif [ $nr != $expected ]; then + echo "[ fail ] expected $expected found $nr" +- ret=$test_cnt ++ ret=${KSFT_FAIL} + else + echo "[ ok ]" + fi diff --git a/queue-5.15/series b/queue-5.15/series index 34e7eda705f..b7cdf1ccd3c 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -557,3 +557,10 @@ nfsd-fix-a-regression-in-nfsd_setattr.patch perf-core-fix-reentry-problem-in-perf_output_read_group.patch efivarfs-request-at-most-512-bytes-for-variable-names.patch powerpc-xor_vmx-add-mhard-float-to-cflags.patch +selftests-mptcp-diag-return-ksft_fail-not-test_cnt.patch +vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch +vfio-pci-lock-external-intx-masking-ops.patch +vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch +vfio-pci-create-persistent-intx-handler.patch +vfio-platform-create-persistent-irq-handlers.patch +vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch diff --git a/queue-5.15/vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch b/queue-5.15/vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch new file mode 100644 index 00000000000..6f91ab93016 --- /dev/null +++ b/queue-5.15/vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch @@ -0,0 +1,56 @@ +From stable+bounces-33778-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:33 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:42 -0600 +Subject: vfio/fsl-mc: Block calling interrupt handler without trigger +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Diana Craciun , Kevin Tian +Message-ID: <20240329225944.3294388-7-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit 7447d911af699a15f8d050dfcb7c680a86f87012 ] + +The eventfd_ctx trigger pointer of the vfio_fsl_mc_irq object is +initially NULL and may become NULL if the user sets the trigger +eventfd to -1. The interrupt handler itself is guaranteed that +trigger is always valid between request_irq() and free_irq(), but +the loopback testing mechanisms to invoke the handler function +need to test the trigger. The triggering and setting ioctl paths +both make use of igate and are therefore mutually exclusive. + +The vfio-fsl-mc driver does not make use of irqfds, nor does it +support any sort of masking operations, therefore unlike vfio-pci +and vfio-platform, the flow can remain essentially unchanged. + +Cc: Diana Craciun +Cc: +Fixes: cc0ee20bd969 ("vfio/fsl-mc: trigger an interrupt via eventfd") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-8-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +@@ -142,13 +142,14 @@ static int vfio_fsl_mc_set_irq_trigger(s + irq = &vdev->mc_irqs[index]; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_fsl_mc_irq_handler(hwirq, irq); ++ if (irq->trigger) ++ eventfd_signal(irq->trigger, 1); + + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + u8 trigger = *(u8 *)data; + +- if (trigger) +- vfio_fsl_mc_irq_handler(hwirq, irq); ++ if (trigger && irq->trigger) ++ eventfd_signal(irq->trigger, 1); + } + + return 0; diff --git a/queue-5.15/vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch b/queue-5.15/vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch new file mode 100644 index 00000000000..255a61e8714 --- /dev/null +++ b/queue-5.15/vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch @@ -0,0 +1,92 @@ +From stable+bounces-33777-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:32 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:39 -0600 +Subject: vfio: Introduce interface to flush virqfd inject workqueue +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian , Reinette Chatre +Message-ID: <20240329225944.3294388-4-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit b620ecbd17a03cacd06f014a5d3f3a11285ce053 ] + +In order to synchronize changes that can affect the thread callback, +introduce an interface to force a flush of the inject workqueue. The +irqfd pointer is only valid under spinlock, but the workqueue cannot +be flushed under spinlock. Therefore the flush work for the irqfd is +queued under spinlock. The vfio_irqfd_cleanup_wq workqueue is re-used +for queuing this work such that flushing the workqueue is also ordered +relative to shutdown. + +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-4-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/virqfd.c | 21 +++++++++++++++++++++ + include/linux/vfio.h | 2 ++ + 2 files changed, 23 insertions(+) + +--- a/drivers/vfio/virqfd.c ++++ b/drivers/vfio/virqfd.c +@@ -104,6 +104,13 @@ static void virqfd_inject(struct work_st + virqfd->thread(virqfd->opaque, virqfd->data); + } + ++static void virqfd_flush_inject(struct work_struct *work) ++{ ++ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject); ++ ++ flush_work(&virqfd->inject); ++} ++ + int vfio_virqfd_enable(void *opaque, + int (*handler)(void *, void *), + void (*thread)(void *, void *), +@@ -127,6 +134,7 @@ int vfio_virqfd_enable(void *opaque, + + INIT_WORK(&virqfd->shutdown, virqfd_shutdown); + INIT_WORK(&virqfd->inject, virqfd_inject); ++ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject); + + irqfd = fdget(fd); + if (!irqfd.file) { +@@ -217,6 +225,19 @@ void vfio_virqfd_disable(struct virqfd * + } + EXPORT_SYMBOL_GPL(vfio_virqfd_disable); + ++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&virqfd_lock, flags); ++ if (*pvirqfd && (*pvirqfd)->thread) ++ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject); ++ spin_unlock_irqrestore(&virqfd_lock, flags); ++ ++ flush_workqueue(vfio_irqfd_cleanup_wq); ++} ++EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread); ++ + module_init(vfio_virqfd_init); + module_exit(vfio_virqfd_exit); + +--- a/include/linux/vfio.h ++++ b/include/linux/vfio.h +@@ -243,6 +243,7 @@ struct virqfd { + wait_queue_entry_t wait; + poll_table pt; + struct work_struct shutdown; ++ struct work_struct flush_inject; + struct virqfd **pvirqfd; + }; + +@@ -251,5 +252,6 @@ extern int vfio_virqfd_enable(void *opaq + void (*thread)(void *, void *), + void *data, struct virqfd **pvirqfd, int fd); + extern void vfio_virqfd_disable(struct virqfd **pvirqfd); ++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd); + + #endif /* VFIO_H */ diff --git a/queue-5.15/vfio-pci-create-persistent-intx-handler.patch b/queue-5.15/vfio-pci-create-persistent-intx-handler.patch new file mode 100644 index 00000000000..d030f802c82 --- /dev/null +++ b/queue-5.15/vfio-pci-create-persistent-intx-handler.patch @@ -0,0 +1,257 @@ +From stable+bounces-33779-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:35 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:40 -0600 +Subject: vfio/pci: Create persistent INTx handler +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Reinette Chatre , Kevin Tian +Message-ID: <20240329225944.3294388-5-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit 18c198c96a815c962adc2b9b77909eec0be7df4d ] + +A vulnerability exists where the eventfd for INTx signaling can be +deconfigured, which unregisters the IRQ handler but still allows +eventfds to be signaled with a NULL context through the SET_IRQS ioctl +or through unmask irqfd if the device interrupt is pending. + +Ideally this could be solved with some additional locking; the igate +mutex serializes the ioctl and config space accesses, and the interrupt +handler is unregistered relative to the trigger, but the irqfd path +runs asynchronous to those. The igate mutex cannot be acquired from the +atomic context of the eventfd wake function. Disabling the irqfd +relative to the eventfd registration is potentially incompatible with +existing userspace. + +As a result, the solution implemented here moves configuration of the +INTx interrupt handler to track the lifetime of the INTx context object +and irq_type configuration, rather than registration of a particular +trigger eventfd. Synchronization is added between the ioctl path and +eventfd_signal() wrapper such that the eventfd trigger can be +dynamically updated relative to in-flight interrupts or irqfd callbacks. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reported-by: Reinette Chatre +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-5-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/pci/vfio_pci_intrs.c | 149 ++++++++++++++++++++------------------ + 1 file changed, 82 insertions(+), 67 deletions(-) + +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -29,8 +29,13 @@ static void vfio_send_intx_eventfd(void + { + struct vfio_pci_core_device *vdev = opaque; + +- if (likely(is_intx(vdev) && !vdev->virq_disabled)) +- eventfd_signal(vdev->ctx[0].trigger, 1); ++ if (likely(is_intx(vdev) && !vdev->virq_disabled)) { ++ struct eventfd_ctx *trigger; ++ ++ trigger = READ_ONCE(vdev->ctx[0].trigger); ++ if (likely(trigger)) ++ eventfd_signal(trigger, 1); ++ } + } + + static void __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +@@ -157,98 +162,104 @@ static irqreturn_t vfio_intx_handler(int + return ret; + } + +-static int vfio_intx_enable(struct vfio_pci_core_device *vdev) ++static int vfio_intx_enable(struct vfio_pci_core_device *vdev, ++ struct eventfd_ctx *trigger) + { ++ struct pci_dev *pdev = vdev->pdev; ++ unsigned long irqflags; ++ char *name; ++ int ret; ++ + if (!is_irq_none(vdev)) + return -EINVAL; + +- if (!vdev->pdev->irq) ++ if (!pdev->irq) + return -ENODEV; + ++ name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", pci_name(pdev)); ++ if (!name) ++ return -ENOMEM; ++ + vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + + vdev->num_ctx = 1; + ++ vdev->ctx[0].name = name; ++ vdev->ctx[0].trigger = trigger; ++ + /* +- * If the virtual interrupt is masked, restore it. Devices +- * supporting DisINTx can be masked at the hardware level +- * here, non-PCI-2.3 devices will have to wait until the +- * interrupt is enabled. ++ * Fill the initial masked state based on virq_disabled. After ++ * enable, changing the DisINTx bit in vconfig directly changes INTx ++ * masking. igate prevents races during setup, once running masked ++ * is protected via irqlock. ++ * ++ * Devices supporting DisINTx also reflect the current mask state in ++ * the physical DisINTx bit, which is not affected during IRQ setup. ++ * ++ * Devices without DisINTx support require an exclusive interrupt. ++ * IRQ masking is performed at the IRQ chip. Again, igate protects ++ * against races during setup and IRQ handlers and irqfds are not ++ * yet active, therefore masked is stable and can be used to ++ * conditionally auto-enable the IRQ. ++ * ++ * irq_type must be stable while the IRQ handler is registered, ++ * therefore it must be set before request_irq(). + */ + vdev->ctx[0].masked = vdev->virq_disabled; +- if (vdev->pci_2_3) +- pci_intx(vdev->pdev, !vdev->ctx[0].masked); ++ if (vdev->pci_2_3) { ++ pci_intx(pdev, !vdev->ctx[0].masked); ++ irqflags = IRQF_SHARED; ++ } else { ++ irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0; ++ } + + vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + ++ ret = request_irq(pdev->irq, vfio_intx_handler, ++ irqflags, vdev->ctx[0].name, vdev); ++ if (ret) { ++ vdev->irq_type = VFIO_PCI_NUM_IRQS; ++ kfree(name); ++ vdev->num_ctx = 0; ++ kfree(vdev->ctx); ++ return ret; ++ } ++ + return 0; + } + +-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) ++static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, ++ struct eventfd_ctx *trigger) + { + struct pci_dev *pdev = vdev->pdev; +- unsigned long irqflags = IRQF_SHARED; +- struct eventfd_ctx *trigger; +- unsigned long flags; +- int ret; +- +- if (vdev->ctx[0].trigger) { +- free_irq(pdev->irq, vdev); +- kfree(vdev->ctx[0].name); +- eventfd_ctx_put(vdev->ctx[0].trigger); +- vdev->ctx[0].trigger = NULL; +- } +- +- if (fd < 0) /* Disable only */ +- return 0; +- +- vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", +- pci_name(pdev)); +- if (!vdev->ctx[0].name) +- return -ENOMEM; +- +- trigger = eventfd_ctx_fdget(fd); +- if (IS_ERR(trigger)) { +- kfree(vdev->ctx[0].name); +- return PTR_ERR(trigger); +- } ++ struct eventfd_ctx *old; + +- vdev->ctx[0].trigger = trigger; ++ old = vdev->ctx[0].trigger; + +- /* +- * Devices without DisINTx support require an exclusive interrupt, +- * IRQ masking is performed at the IRQ chip. The masked status is +- * protected by vdev->irqlock. Setup the IRQ without auto-enable and +- * unmask as necessary below under lock. DisINTx is unmodified by +- * the IRQ configuration and may therefore use auto-enable. +- */ +- if (!vdev->pci_2_3) +- irqflags = IRQF_NO_AUTOEN; ++ WRITE_ONCE(vdev->ctx[0].trigger, trigger); + +- ret = request_irq(pdev->irq, vfio_intx_handler, +- irqflags, vdev->ctx[0].name, vdev); +- if (ret) { +- vdev->ctx[0].trigger = NULL; +- kfree(vdev->ctx[0].name); +- eventfd_ctx_put(trigger); +- return ret; ++ /* Releasing an old ctx requires synchronizing in-flight users */ ++ if (old) { ++ synchronize_irq(pdev->irq); ++ vfio_virqfd_flush_thread(&vdev->ctx[0].unmask); ++ eventfd_ctx_put(old); + } + +- spin_lock_irqsave(&vdev->irqlock, flags); +- if (!vdev->pci_2_3 && !vdev->ctx[0].masked) +- enable_irq(pdev->irq); +- spin_unlock_irqrestore(&vdev->irqlock, flags); +- + return 0; + } + + static void vfio_intx_disable(struct vfio_pci_core_device *vdev) + { ++ struct pci_dev *pdev = vdev->pdev; ++ + vfio_virqfd_disable(&vdev->ctx[0].unmask); + vfio_virqfd_disable(&vdev->ctx[0].mask); +- vfio_intx_set_signal(vdev, -1); ++ free_irq(pdev->irq, vdev); ++ if (vdev->ctx[0].trigger) ++ eventfd_ctx_put(vdev->ctx[0].trigger); ++ kfree(vdev->ctx[0].name); + vdev->irq_type = VFIO_PCI_NUM_IRQS; + vdev->num_ctx = 0; + kfree(vdev->ctx); +@@ -498,19 +509,23 @@ static int vfio_pci_set_intx_trigger(str + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ struct eventfd_ctx *trigger = NULL; + int32_t fd = *(int32_t *)data; + int ret; + ++ if (fd >= 0) { ++ trigger = eventfd_ctx_fdget(fd); ++ if (IS_ERR(trigger)) ++ return PTR_ERR(trigger); ++ } ++ + if (is_intx(vdev)) +- return vfio_intx_set_signal(vdev, fd); ++ ret = vfio_intx_set_signal(vdev, trigger); ++ else ++ ret = vfio_intx_enable(vdev, trigger); + +- ret = vfio_intx_enable(vdev); +- if (ret) +- return ret; +- +- ret = vfio_intx_set_signal(vdev, fd); +- if (ret) +- vfio_intx_disable(vdev); ++ if (ret && trigger) ++ eventfd_ctx_put(trigger); + + return ret; + } diff --git a/queue-5.15/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch b/queue-5.15/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch new file mode 100644 index 00000000000..619ddf09d22 --- /dev/null +++ b/queue-5.15/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch @@ -0,0 +1,69 @@ +From stable+bounces-33776-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:32 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:37 -0600 +Subject: vfio/pci: Disable auto-enable of exclusive INTx IRQ +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian +Message-ID: <20240329225944.3294388-2-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit fe9a7082684eb059b925c535682e68c34d487d43 ] + +Currently for devices requiring masking at the irqchip for INTx, ie. +devices without DisINTx support, the IRQ is enabled in request_irq() +and subsequently disabled as necessary to align with the masked status +flag. This presents a window where the interrupt could fire between +these events, resulting in the IRQ incrementing the disable depth twice. +This would be unrecoverable for a user since the masked flag prevents +nested enables through vfio. + +Instead, invert the logic using IRQF_NO_AUTOEN such that exclusive INTx +is never auto-enabled, then unmask as required. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-2-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/pci/vfio_pci_intrs.c | 17 ++++++++++------- + 1 file changed, 10 insertions(+), 7 deletions(-) + +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -199,8 +199,15 @@ static int vfio_intx_set_signal(struct v + + vdev->ctx[0].trigger = trigger; + ++ /* ++ * Devices without DisINTx support require an exclusive interrupt, ++ * IRQ masking is performed at the IRQ chip. The masked status is ++ * protected by vdev->irqlock. Setup the IRQ without auto-enable and ++ * unmask as necessary below under lock. DisINTx is unmodified by ++ * the IRQ configuration and may therefore use auto-enable. ++ */ + if (!vdev->pci_2_3) +- irqflags = 0; ++ irqflags = IRQF_NO_AUTOEN; + + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, vdev->ctx[0].name, vdev); +@@ -211,13 +218,9 @@ static int vfio_intx_set_signal(struct v + return ret; + } + +- /* +- * INTx disable will stick across the new irq setup, +- * disable_irq won't. +- */ + spin_lock_irqsave(&vdev->irqlock, flags); +- if (!vdev->pci_2_3 && vdev->ctx[0].masked) +- disable_irq_nosync(pdev->irq); ++ if (!vdev->pci_2_3 && !vdev->ctx[0].masked) ++ enable_irq(pdev->irq); + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return 0; diff --git a/queue-5.15/vfio-pci-lock-external-intx-masking-ops.patch b/queue-5.15/vfio-pci-lock-external-intx-masking-ops.patch new file mode 100644 index 00000000000..8d931478196 --- /dev/null +++ b/queue-5.15/vfio-pci-lock-external-intx-masking-ops.patch @@ -0,0 +1,120 @@ +From stable+bounces-33775-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:31 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:38 -0600 +Subject: vfio/pci: Lock external INTx masking ops +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Reinette Chatre , Kevin Tian +Message-ID: <20240329225944.3294388-3-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit 810cd4bb53456d0503cc4e7934e063835152c1b7 ] + +Mask operations through config space changes to DisINTx may race INTx +configuration changes via ioctl. Create wrappers that add locking for +paths outside of the core interrupt code. + +In particular, irq_type is updated holding igate, therefore testing +is_intx() requires holding igate. For example clearing DisINTx from +config space can otherwise race changes of the interrupt configuration. + +This aligns interfaces which may trigger the INTx eventfd into two +camps, one side serialized by igate and the other only enabled while +INTx is configured. A subsequent patch introduces synchronization for +the latter flows. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reported-by: Reinette Chatre +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-3-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/pci/vfio_pci_intrs.c | 30 ++++++++++++++++++++++++------ + 1 file changed, 24 insertions(+), 6 deletions(-) + +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -33,11 +33,13 @@ static void vfio_send_intx_eventfd(void + eventfd_signal(vdev->ctx[0].trigger, 1); + } + +-void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) ++static void __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) + { + struct pci_dev *pdev = vdev->pdev; + unsigned long flags; + ++ lockdep_assert_held(&vdev->igate); ++ + spin_lock_irqsave(&vdev->irqlock, flags); + + /* +@@ -65,6 +67,13 @@ void vfio_pci_intx_mask(struct vfio_pci_ + spin_unlock_irqrestore(&vdev->irqlock, flags); + } + ++void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) ++{ ++ mutex_lock(&vdev->igate); ++ __vfio_pci_intx_mask(vdev); ++ mutex_unlock(&vdev->igate); ++} ++ + /* + * If this is triggered by an eventfd, we can't call eventfd_signal + * or else we'll deadlock on the eventfd wait queue. Return >0 when +@@ -107,12 +116,21 @@ static int vfio_pci_intx_unmask_handler( + return ret; + } + +-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) ++static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) + { ++ lockdep_assert_held(&vdev->igate); ++ + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) + vfio_send_intx_eventfd(vdev, NULL); + } + ++void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) ++{ ++ mutex_lock(&vdev->igate); ++ __vfio_pci_intx_unmask(vdev); ++ mutex_unlock(&vdev->igate); ++} ++ + static irqreturn_t vfio_intx_handler(int irq, void *dev_id) + { + struct vfio_pci_core_device *vdev = dev_id; +@@ -428,11 +446,11 @@ static int vfio_pci_set_intx_unmask(stru + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_pci_intx_unmask(vdev); ++ __vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t unmask = *(uint8_t *)data; + if (unmask) +- vfio_pci_intx_unmask(vdev); ++ __vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + if (fd >= 0) +@@ -455,11 +473,11 @@ static int vfio_pci_set_intx_mask(struct + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_pci_intx_mask(vdev); ++ __vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t mask = *(uint8_t *)data; + if (mask) +- vfio_pci_intx_mask(vdev); ++ __vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + return -ENOTTY; /* XXX implement me */ + } diff --git a/queue-5.15/vfio-platform-create-persistent-irq-handlers.patch b/queue-5.15/vfio-platform-create-persistent-irq-handlers.patch new file mode 100644 index 00000000000..6e714a8d764 --- /dev/null +++ b/queue-5.15/vfio-platform-create-persistent-irq-handlers.patch @@ -0,0 +1,253 @@ +From stable+bounces-33780-greg=kroah.com@vger.kernel.org Sat Mar 30 00:00:35 2024 +From: Alex Williamson +Date: Fri, 29 Mar 2024 16:59:41 -0600 +Subject: vfio/platform: Create persistent IRQ handlers +To: stable@vger.kernel.org +Cc: Alex Williamson , sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian +Message-ID: <20240329225944.3294388-6-alex.williamson@redhat.com> + +From: Alex Williamson + +[ Upstream commit 675daf435e9f8e5a5eab140a9864dfad6668b375 ] + +The vfio-platform SET_IRQS ioctl currently allows loopback triggering of +an interrupt before a signaling eventfd has been configured by the user, +which thereby allows a NULL pointer dereference. + +Rather than register the IRQ relative to a valid trigger, register all +IRQs in a disabled state in the device open path. This allows mask +operations on the IRQ to nest within the overall enable state governed +by a valid eventfd signal. This decouples @masked, protected by the +@locked spinlock from @trigger, protected via the @igate mutex. + +In doing so, it's guaranteed that changes to @trigger cannot race the +IRQ handlers because the IRQ handler is synchronously disabled before +modifying the trigger, and loopback triggering of the IRQ via ioctl is +safe due to serialization with trigger changes via igate. + +For compatibility, request_irq() failures are maintained to be local to +the SET_IRQS ioctl rather than a fatal error in the open device path. +This allows, for example, a userspace driver with polling mode support +to continue to work regardless of moving the request_irq() call site. +This necessarily blocks all SET_IRQS access to the failed index. + +Cc: Eric Auger +Cc: +Fixes: 57f972e2b341 ("vfio/platform: trigger an interrupt via eventfd") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-7-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/platform/vfio_platform_irq.c | 101 ++++++++++++++++++++---------- + 1 file changed, 68 insertions(+), 33 deletions(-) + +--- a/drivers/vfio/platform/vfio_platform_irq.c ++++ b/drivers/vfio/platform/vfio_platform_irq.c +@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask( + return 0; + } + ++/* ++ * The trigger eventfd is guaranteed valid in the interrupt path ++ * and protected by the igate mutex when triggered via ioctl. ++ */ ++static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx) ++{ ++ if (likely(irq_ctx->trigger)) ++ eventfd_signal(irq_ctx->trigger, 1); ++} ++ + static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) + { + struct vfio_platform_irq *irq_ctx = dev_id; +@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_h + spin_unlock_irqrestore(&irq_ctx->lock, flags); + + if (ret == IRQ_HANDLED) +- eventfd_signal(irq_ctx->trigger, 1); ++ vfio_send_eventfd(irq_ctx); + + return ret; + } +@@ -164,22 +174,19 @@ static irqreturn_t vfio_irq_handler(int + { + struct vfio_platform_irq *irq_ctx = dev_id; + +- eventfd_signal(irq_ctx->trigger, 1); ++ vfio_send_eventfd(irq_ctx); + + return IRQ_HANDLED; + } + + static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, +- int fd, irq_handler_t handler) ++ int fd) + { + struct vfio_platform_irq *irq = &vdev->irqs[index]; + struct eventfd_ctx *trigger; +- int ret; + + if (irq->trigger) { +- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); +- free_irq(irq->hwirq, irq); +- kfree(irq->name); ++ disable_irq(irq->hwirq); + eventfd_ctx_put(irq->trigger); + irq->trigger = NULL; + } +@@ -187,30 +194,20 @@ static int vfio_set_trigger(struct vfio_ + if (fd < 0) /* Disable only */ + return 0; + +- irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", +- irq->hwirq, vdev->name); +- if (!irq->name) +- return -ENOMEM; +- + trigger = eventfd_ctx_fdget(fd); +- if (IS_ERR(trigger)) { +- kfree(irq->name); ++ if (IS_ERR(trigger)) + return PTR_ERR(trigger); +- } + + irq->trigger = trigger; + +- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); +- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); +- if (ret) { +- kfree(irq->name); +- eventfd_ctx_put(trigger); +- irq->trigger = NULL; +- return ret; +- } +- +- if (!irq->masked) +- enable_irq(irq->hwirq); ++ /* ++ * irq->masked effectively provides nested disables within the overall ++ * enable relative to trigger. Specifically request_irq() is called ++ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user ++ * may only further disable the IRQ with a MASK operations because ++ * irq->masked is initially false. ++ */ ++ enable_irq(irq->hwirq); + + return 0; + } +@@ -229,7 +226,7 @@ static int vfio_platform_set_irq_trigger + handler = vfio_irq_handler; + + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) +- return vfio_set_trigger(vdev, index, -1, handler); ++ return vfio_set_trigger(vdev, index, -1); + + if (start != 0 || count != 1) + return -EINVAL; +@@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + +- return vfio_set_trigger(vdev, index, fd, handler); ++ return vfio_set_trigger(vdev, index, fd); + } + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +@@ -261,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct + unsigned start, unsigned count, uint32_t flags, + void *data) = NULL; + ++ /* ++ * For compatibility, errors from request_irq() are local to the ++ * SET_IRQS path and reflected in the name pointer. This allows, ++ * for example, polling mode fallback for an exclusive IRQ failure. ++ */ ++ if (IS_ERR(vdev->irqs[index].name)) ++ return PTR_ERR(vdev->irqs[index].name); ++ + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = vfio_platform_set_irq_mask; +@@ -281,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct + + int vfio_platform_irq_init(struct vfio_platform_device *vdev) + { +- int cnt = 0, i; ++ int cnt = 0, i, ret = 0; + + while (vdev->get_irq(vdev, cnt) >= 0) + cnt++; +@@ -292,29 +297,54 @@ int vfio_platform_irq_init(struct vfio_p + + for (i = 0; i < cnt; i++) { + int hwirq = vdev->get_irq(vdev, i); ++ irq_handler_t handler = vfio_irq_handler; + +- if (hwirq < 0) ++ if (hwirq < 0) { ++ ret = -EINVAL; + goto err; ++ } + + spin_lock_init(&vdev->irqs[i].lock); + + vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; + +- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) ++ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) { + vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE + | VFIO_IRQ_INFO_AUTOMASKED; ++ handler = vfio_automasked_irq_handler; ++ } + + vdev->irqs[i].count = 1; + vdev->irqs[i].hwirq = hwirq; + vdev->irqs[i].masked = false; ++ vdev->irqs[i].name = kasprintf(GFP_KERNEL, ++ "vfio-irq[%d](%s)", hwirq, ++ vdev->name); ++ if (!vdev->irqs[i].name) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN, ++ vdev->irqs[i].name, &vdev->irqs[i]); ++ if (ret) { ++ kfree(vdev->irqs[i].name); ++ vdev->irqs[i].name = ERR_PTR(ret); ++ } + } + + vdev->num_irqs = cnt; + + return 0; + err: ++ for (--i; i >= 0; i--) { ++ if (!IS_ERR(vdev->irqs[i].name)) { ++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); ++ kfree(vdev->irqs[i].name); ++ } ++ } + kfree(vdev->irqs); +- return -EINVAL; ++ return ret; + } + + void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) +@@ -324,7 +354,12 @@ void vfio_platform_irq_cleanup(struct vf + for (i = 0; i < vdev->num_irqs; i++) { + vfio_virqfd_disable(&vdev->irqs[i].mask); + vfio_virqfd_disable(&vdev->irqs[i].unmask); +- vfio_set_trigger(vdev, i, -1, NULL); ++ if (!IS_ERR(vdev->irqs[i].name)) { ++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); ++ if (vdev->irqs[i].trigger) ++ eventfd_ctx_put(vdev->irqs[i].trigger); ++ kfree(vdev->irqs[i].name); ++ } + } + + vdev->num_irqs = 0;