--- /dev/null
+From a8c22921a08a8d50b10fc836cff4348d5dde17e2 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 2 Apr 2024 08:28:04 -0600
+Subject: io_uring: ensure '0' is returned on file registration success
+
+From: Jens Axboe <axboe@kernel.dk>
+
+A previous backport mistakenly removed code that cleared 'ret' to zero,
+as the SCM logging was performed. Fix up the return value so we don't
+return an errant error on fixed file registration.
+
+Fixes: a6771f343af9 ("io_uring: drop any code related to SCM_RIGHTS")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -8247,7 +8247,7 @@ static int io_sqe_files_register(struct
+ }
+
+ io_rsrc_node_switch(ctx, NULL);
+- return ret;
++ return 0;
+ out_fput:
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ file = io_file_from_index(ctx, i);
--- /dev/null
+From c567f2948f57bdc03ed03403ae0234085f376b7d Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Mon, 25 Mar 2024 11:47:51 +0100
+Subject: Revert "x86/mm/ident_map: Use gbpages only where full GB page should be mapped."
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit c567f2948f57bdc03ed03403ae0234085f376b7d upstream.
+
+This reverts commit d794734c9bbfe22f86686dc2909c25f5ffe1a572.
+
+While the original change tries to fix a bug, it also unintentionally broke
+existing systems, see the regressions reported at:
+
+ https://lore.kernel.org/all/3a1b9909-45ac-4f97-ad68-d16ef1ce99db@pavinjoseph.com/
+
+Since d794734c9bbf was also marked for -stable, let's back it out before
+causing more damage.
+
+Note that due to another upstream change the revert was not 100% automatic:
+
+ 0a845e0f6348 mm/treewide: replace pud_large() with pud_leaf()
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Russ Anderson <rja@hpe.com>
+Cc: Steve Wahl <steve.wahl@hpe.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/3a1b9909-45ac-4f97-ad68-d16ef1ce99db@pavinjoseph.com/
+Fixes: d794734c9bbf ("x86/mm/ident_map: Use gbpages only where full GB page should be mapped.")
+Signed-off-by: Steve Wahl <steve.wahl@hpe.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ident_map.c | 23 +++++------------------
+ 1 file changed, 5 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_map
+ for (; addr < end; addr = next) {
+ pud_t *pud = pud_page + pud_index(addr);
+ pmd_t *pmd;
+- bool use_gbpage;
+
+ next = (addr & PUD_MASK) + PUD_SIZE;
+ if (next > end)
+ next = end;
+
+- /* if this is already a gbpage, this portion is already mapped */
+- if (pud_large(*pud))
+- continue;
+-
+- /* Is using a gbpage allowed? */
+- use_gbpage = info->direct_gbpages;
+-
+- /* Don't use gbpage if it maps more than the requested region. */
+- /* at the begining: */
+- use_gbpage &= ((addr & ~PUD_MASK) == 0);
+- /* ... or at the end: */
+- use_gbpage &= ((next & ~PUD_MASK) == 0);
+-
+- /* Never overwrite existing mappings */
+- use_gbpage &= !pud_present(*pud);
+-
+- if (use_gbpage) {
++ if (info->direct_gbpages) {
+ pud_t pudval;
+
++ if (pud_present(*pud))
++ continue;
++
++ addr &= PUD_MASK;
+ pudval = __pud((addr - info->offset) | info->page_flag);
+ set_pud(pud, pudval);
+ continue;
bluetooth-fix-toctou-in-hci-debugfs-implementation.patch
netfilter-nf_tables-disallow-timeout-for-anonymous-sets.patch
net-rds-fix-possible-cp-null-dereference.patch
+vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch
+vfio-pci-lock-external-intx-masking-ops.patch
+vfio-introduce-interface-to-flush-virqfd-inject-workqueue.patch
+vfio-pci-create-persistent-intx-handler.patch
+vfio-platform-create-persistent-irq-handlers.patch
+vfio-fsl-mc-block-calling-interrupt-handler-without-trigger.patch
+io_uring-ensure-0-is-returned-on-file-registration-success.patch
+revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch
--- /dev/null
+From stable+bounces-35129-greg=kroah.com@vger.kernel.org Mon Apr 1 18:54:07 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:53:00 -0600
+Subject: vfio/fsl-mc: Block calling interrupt handler without trigger
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Diana Craciun <diana.craciun@oss.nxp.com>, Kevin Tian <kevin.tian@intel.com>
+Message-ID: <20240401165302.3699643-7-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 7447d911af699a15f8d050dfcb7c680a86f87012 ]
+
+The eventfd_ctx trigger pointer of the vfio_fsl_mc_irq object is
+initially NULL and may become NULL if the user sets the trigger
+eventfd to -1. The interrupt handler itself is guaranteed that
+trigger is always valid between request_irq() and free_irq(), but
+the loopback testing mechanisms to invoke the handler function
+need to test the trigger. The triggering and setting ioctl paths
+both make use of igate and are therefore mutually exclusive.
+
+The vfio-fsl-mc driver does not make use of irqfds, nor does it
+support any sort of masking operations, therefore unlike vfio-pci
+and vfio-platform, the flow can remain essentially unchanged.
+
+Cc: Diana Craciun <diana.craciun@oss.nxp.com>
+Cc: <stable@vger.kernel.org>
+Fixes: cc0ee20bd969 ("vfio/fsl-mc: trigger an interrupt via eventfd")
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-8-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+@@ -142,13 +142,14 @@ static int vfio_fsl_mc_set_irq_trigger(s
+ irq = &vdev->mc_irqs[index];
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_fsl_mc_irq_handler(hwirq, irq);
++ if (irq->trigger)
++ eventfd_signal(irq->trigger, 1);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ u8 trigger = *(u8 *)data;
+
+- if (trigger)
+- vfio_fsl_mc_irq_handler(hwirq, irq);
++ if (trigger && irq->trigger)
++ eventfd_signal(irq->trigger, 1);
+ }
+
+ return 0;
--- /dev/null
+From stable+bounces-35125-greg=kroah.com@vger.kernel.org Mon Apr 1 18:54:02 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:52:57 -0600
+Subject: vfio: Introduce interface to flush virqfd inject workqueue
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian <kevin.tian@intel.com>, Reinette Chatre <reinette.chatre@intel.com>
+Message-ID: <20240401165302.3699643-4-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit b620ecbd17a03cacd06f014a5d3f3a11285ce053 ]
+
+In order to synchronize changes that can affect the thread callback,
+introduce an interface to force a flush of the inject workqueue. The
+irqfd pointer is only valid under spinlock, but the workqueue cannot
+be flushed under spinlock. Therefore the flush work for the irqfd is
+queued under spinlock. The vfio_irqfd_cleanup_wq workqueue is re-used
+for queuing this work such that flushing the workqueue is also ordered
+relative to shutdown.
+
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-4-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/virqfd.c | 21 +++++++++++++++++++++
+ include/linux/vfio.h | 2 ++
+ 2 files changed, 23 insertions(+)
+
+--- a/drivers/vfio/virqfd.c
++++ b/drivers/vfio/virqfd.c
+@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_st
+ virqfd->thread(virqfd->opaque, virqfd->data);
+ }
+
++static void virqfd_flush_inject(struct work_struct *work)
++{
++ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
++
++ flush_work(&virqfd->inject);
++}
++
+ int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,
+
+ INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
+ INIT_WORK(&virqfd->inject, virqfd_inject);
++ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
+
+ irqfd = fdget(fd);
+ if (!irqfd.file) {
+@@ -214,6 +222,19 @@ void vfio_virqfd_disable(struct virqfd *
+ }
+ EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&virqfd_lock, flags);
++ if (*pvirqfd && (*pvirqfd)->thread)
++ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
++ spin_unlock_irqrestore(&virqfd_lock, flags);
++
++ flush_workqueue(vfio_irqfd_cleanup_wq);
++}
++EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
++
+ module_init(vfio_virqfd_init);
+ module_exit(vfio_virqfd_exit);
+
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -221,6 +221,7 @@ struct virqfd {
+ wait_queue_entry_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
++ struct work_struct flush_inject;
+ struct virqfd **pvirqfd;
+ };
+
+@@ -229,5 +230,6 @@ extern int vfio_virqfd_enable(void *opaq
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd);
+ extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
+
+ #endif /* VFIO_H */
--- /dev/null
+From stable+bounces-35126-greg=kroah.com@vger.kernel.org Mon Apr 1 18:54:05 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:52:58 -0600
+Subject: vfio/pci: Create persistent INTx handler
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Reinette Chatre <reinette.chatre@intel.com>, Kevin Tian <kevin.tian@intel.com>
+Message-ID: <20240401165302.3699643-5-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 18c198c96a815c962adc2b9b77909eec0be7df4d ]
+
+A vulnerability exists where the eventfd for INTx signaling can be
+deconfigured, which unregisters the IRQ handler but still allows
+eventfds to be signaled with a NULL context through the SET_IRQS ioctl
+or through unmask irqfd if the device interrupt is pending.
+
+Ideally this could be solved with some additional locking; the igate
+mutex serializes the ioctl and config space accesses, and the interrupt
+handler is unregistered relative to the trigger, but the irqfd path
+runs asynchronous to those. The igate mutex cannot be acquired from the
+atomic context of the eventfd wake function. Disabling the irqfd
+relative to the eventfd registration is potentially incompatible with
+existing userspace.
+
+As a result, the solution implemented here moves configuration of the
+INTx interrupt handler to track the lifetime of the INTx context object
+and irq_type configuration, rather than registration of a particular
+trigger eventfd. Synchronization is added between the ioctl path and
+eventfd_signal() wrapper such that the eventfd trigger can be
+dynamically updated relative to in-flight interrupts or irqfd callbacks.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver")
+Reported-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-5-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 149 ++++++++++++++++++++------------------
+ 1 file changed, 82 insertions(+), 67 deletions(-)
+
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -29,8 +29,13 @@ static void vfio_send_intx_eventfd(void
+ {
+ struct vfio_pci_device *vdev = opaque;
+
+- if (likely(is_intx(vdev) && !vdev->virq_disabled))
+- eventfd_signal(vdev->ctx[0].trigger, 1);
++ if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
++ struct eventfd_ctx *trigger;
++
++ trigger = READ_ONCE(vdev->ctx[0].trigger);
++ if (likely(trigger))
++ eventfd_signal(trigger, 1);
++ }
+ }
+
+ static void __vfio_pci_intx_mask(struct vfio_pci_device *vdev)
+@@ -157,98 +162,104 @@ static irqreturn_t vfio_intx_handler(int
+ return ret;
+ }
+
+-static int vfio_intx_enable(struct vfio_pci_device *vdev)
++static int vfio_intx_enable(struct vfio_pci_device *vdev,
++ struct eventfd_ctx *trigger)
+ {
++ struct pci_dev *pdev = vdev->pdev;
++ unsigned long irqflags;
++ char *name;
++ int ret;
++
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+- if (!vdev->pdev->irq)
++ if (!pdev->irq)
+ return -ENODEV;
+
++ name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", pci_name(pdev));
++ if (!name)
++ return -ENOMEM;
++
+ vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+ vdev->num_ctx = 1;
+
++ vdev->ctx[0].name = name;
++ vdev->ctx[0].trigger = trigger;
++
+ /*
+- * If the virtual interrupt is masked, restore it. Devices
+- * supporting DisINTx can be masked at the hardware level
+- * here, non-PCI-2.3 devices will have to wait until the
+- * interrupt is enabled.
++ * Fill the initial masked state based on virq_disabled. After
++ * enable, changing the DisINTx bit in vconfig directly changes INTx
++ * masking. igate prevents races during setup, once running masked
++ * is protected via irqlock.
++ *
++ * Devices supporting DisINTx also reflect the current mask state in
++ * the physical DisINTx bit, which is not affected during IRQ setup.
++ *
++ * Devices without DisINTx support require an exclusive interrupt.
++ * IRQ masking is performed at the IRQ chip. Again, igate protects
++ * against races during setup and IRQ handlers and irqfds are not
++ * yet active, therefore masked is stable and can be used to
++ * conditionally auto-enable the IRQ.
++ *
++ * irq_type must be stable while the IRQ handler is registered,
++ * therefore it must be set before request_irq().
+ */
+ vdev->ctx[0].masked = vdev->virq_disabled;
+- if (vdev->pci_2_3)
+- pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++ if (vdev->pci_2_3) {
++ pci_intx(pdev, !vdev->ctx[0].masked);
++ irqflags = IRQF_SHARED;
++ } else {
++ irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0;
++ }
+
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
++ ret = request_irq(pdev->irq, vfio_intx_handler,
++ irqflags, vdev->ctx[0].name, vdev);
++ if (ret) {
++ vdev->irq_type = VFIO_PCI_NUM_IRQS;
++ kfree(name);
++ vdev->num_ctx = 0;
++ kfree(vdev->ctx);
++ return ret;
++ }
++
+ return 0;
+ }
+
+-static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
++static int vfio_intx_set_signal(struct vfio_pci_device *vdev,
++ struct eventfd_ctx *trigger)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+- unsigned long irqflags = IRQF_SHARED;
+- struct eventfd_ctx *trigger;
+- unsigned long flags;
+- int ret;
+-
+- if (vdev->ctx[0].trigger) {
+- free_irq(pdev->irq, vdev);
+- kfree(vdev->ctx[0].name);
+- eventfd_ctx_put(vdev->ctx[0].trigger);
+- vdev->ctx[0].trigger = NULL;
+- }
+-
+- if (fd < 0) /* Disable only */
+- return 0;
+-
+- vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
+- pci_name(pdev));
+- if (!vdev->ctx[0].name)
+- return -ENOMEM;
+-
+- trigger = eventfd_ctx_fdget(fd);
+- if (IS_ERR(trigger)) {
+- kfree(vdev->ctx[0].name);
+- return PTR_ERR(trigger);
+- }
++ struct eventfd_ctx *old;
+
+- vdev->ctx[0].trigger = trigger;
++ old = vdev->ctx[0].trigger;
+
+- /*
+- * Devices without DisINTx support require an exclusive interrupt,
+- * IRQ masking is performed at the IRQ chip. The masked status is
+- * protected by vdev->irqlock. Setup the IRQ without auto-enable and
+- * unmask as necessary below under lock. DisINTx is unmodified by
+- * the IRQ configuration and may therefore use auto-enable.
+- */
+- if (!vdev->pci_2_3)
+- irqflags = IRQF_NO_AUTOEN;
++ WRITE_ONCE(vdev->ctx[0].trigger, trigger);
+
+- ret = request_irq(pdev->irq, vfio_intx_handler,
+- irqflags, vdev->ctx[0].name, vdev);
+- if (ret) {
+- vdev->ctx[0].trigger = NULL;
+- kfree(vdev->ctx[0].name);
+- eventfd_ctx_put(trigger);
+- return ret;
++ /* Releasing an old ctx requires synchronizing in-flight users */
++ if (old) {
++ synchronize_irq(pdev->irq);
++ vfio_virqfd_flush_thread(&vdev->ctx[0].unmask);
++ eventfd_ctx_put(old);
+ }
+
+- spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && !vdev->ctx[0].masked)
+- enable_irq(pdev->irq);
+- spin_unlock_irqrestore(&vdev->irqlock, flags);
+-
+ return 0;
+ }
+
+ static void vfio_intx_disable(struct vfio_pci_device *vdev)
+ {
++ struct pci_dev *pdev = vdev->pdev;
++
+ vfio_virqfd_disable(&vdev->ctx[0].unmask);
+ vfio_virqfd_disable(&vdev->ctx[0].mask);
+- vfio_intx_set_signal(vdev, -1);
++ free_irq(pdev->irq, vdev);
++ if (vdev->ctx[0].trigger)
++ eventfd_ctx_put(vdev->ctx[0].trigger);
++ kfree(vdev->ctx[0].name);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+ kfree(vdev->ctx);
+@@ -498,19 +509,23 @@ static int vfio_pci_set_intx_trigger(str
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ struct eventfd_ctx *trigger = NULL;
+ int32_t fd = *(int32_t *)data;
+ int ret;
+
++ if (fd >= 0) {
++ trigger = eventfd_ctx_fdget(fd);
++ if (IS_ERR(trigger))
++ return PTR_ERR(trigger);
++ }
++
+ if (is_intx(vdev))
+- return vfio_intx_set_signal(vdev, fd);
++ ret = vfio_intx_set_signal(vdev, trigger);
++ else
++ ret = vfio_intx_enable(vdev, trigger);
+
+- ret = vfio_intx_enable(vdev);
+- if (ret)
+- return ret;
+-
+- ret = vfio_intx_set_signal(vdev, fd);
+- if (ret)
+- vfio_intx_disable(vdev);
++ if (ret && trigger)
++ eventfd_ctx_put(trigger);
+
+ return ret;
+ }
--- /dev/null
+From stable+bounces-35122-greg=kroah.com@vger.kernel.org Mon Apr 1 18:53:59 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:52:55 -0600
+Subject: vfio/pci: Disable auto-enable of exclusive INTx IRQ
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian <kevin.tian@intel.com>
+Message-ID: <20240401165302.3699643-2-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit fe9a7082684eb059b925c535682e68c34d487d43 ]
+
+Currently for devices requiring masking at the irqchip for INTx, ie.
+devices without DisINTx support, the IRQ is enabled in request_irq()
+and subsequently disabled as necessary to align with the masked status
+flag. This presents a window where the interrupt could fire between
+these events, resulting in the IRQ incrementing the disable depth twice.
+This would be unrecoverable for a user since the masked flag prevents
+nested enables through vfio.
+
+Instead, invert the logic using IRQF_NO_AUTOEN such that exclusive INTx
+is never auto-enabled, then unmask as required.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver")
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-2-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -199,8 +199,15 @@ static int vfio_intx_set_signal(struct v
+
+ vdev->ctx[0].trigger = trigger;
+
++ /*
++ * Devices without DisINTx support require an exclusive interrupt,
++ * IRQ masking is performed at the IRQ chip. The masked status is
++ * protected by vdev->irqlock. Setup the IRQ without auto-enable and
++ * unmask as necessary below under lock. DisINTx is unmodified by
++ * the IRQ configuration and may therefore use auto-enable.
++ */
+ if (!vdev->pci_2_3)
+- irqflags = 0;
++ irqflags = IRQF_NO_AUTOEN;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, vdev->ctx[0].name, vdev);
+@@ -211,13 +218,9 @@ static int vfio_intx_set_signal(struct v
+ return ret;
+ }
+
+- /*
+- * INTx disable will stick across the new irq setup,
+- * disable_irq won't.
+- */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+- disable_irq_nosync(pdev->irq);
++ if (!vdev->pci_2_3 && !vdev->ctx[0].masked)
++ enable_irq(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return 0;
--- /dev/null
+From stable+bounces-35123-greg=kroah.com@vger.kernel.org Mon Apr 1 18:54:00 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:52:56 -0600
+Subject: vfio/pci: Lock external INTx masking ops
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Reinette Chatre <reinette.chatre@intel.com>, Kevin Tian <kevin.tian@intel.com>
+Message-ID: <20240401165302.3699643-3-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 810cd4bb53456d0503cc4e7934e063835152c1b7 ]
+
+Mask operations through config space changes to DisINTx may race INTx
+configuration changes via ioctl. Create wrappers that add locking for
+paths outside of the core interrupt code.
+
+In particular, irq_type is updated holding igate, therefore testing
+is_intx() requires holding igate. For example clearing DisINTx from
+config space can otherwise race changes of the interrupt configuration.
+
+This aligns interfaces which may trigger the INTx eventfd into two
+camps, one side serialized by igate and the other only enabled while
+INTx is configured. A subsequent patch introduces synchronization for
+the latter flows.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver")
+Reported-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-3-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -33,11 +33,13 @@ static void vfio_send_intx_eventfd(void
+ eventfd_signal(vdev->ctx[0].trigger, 1);
+ }
+
+-void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
++static void __vfio_pci_intx_mask(struct vfio_pci_device *vdev)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long flags;
+
++ lockdep_assert_held(&vdev->igate);
++
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+@@ -65,6 +67,13 @@ void vfio_pci_intx_mask(struct vfio_pci_
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+ }
+
++void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
++{
++ mutex_lock(&vdev->igate);
++ __vfio_pci_intx_mask(vdev);
++ mutex_unlock(&vdev->igate);
++}
++
+ /*
+ * If this is triggered by an eventfd, we can't call eventfd_signal
+ * or else we'll deadlock on the eventfd wait queue. Return >0 when
+@@ -107,12 +116,21 @@ static int vfio_pci_intx_unmask_handler(
+ return ret;
+ }
+
+-void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
++static void __vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
+ {
++ lockdep_assert_held(&vdev->igate);
++
+ if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ vfio_send_intx_eventfd(vdev, NULL);
+ }
+
++void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
++{
++ mutex_lock(&vdev->igate);
++ __vfio_pci_intx_unmask(vdev);
++ mutex_unlock(&vdev->igate);
++}
++
+ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ {
+ struct vfio_pci_device *vdev = dev_id;
+@@ -428,11 +446,11 @@ static int vfio_pci_set_intx_unmask(stru
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+ if (unmask)
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+ if (fd >= 0)
+@@ -455,11 +473,11 @@ static int vfio_pci_set_intx_mask(struct
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+ if (mask)
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ return -ENOTTY; /* XXX implement me */
+ }
--- /dev/null
+From stable+bounces-35128-greg=kroah.com@vger.kernel.org Mon Apr 1 18:54:07 2024
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Mon, 1 Apr 2024 10:52:59 -0600
+Subject: vfio/platform: Create persistent IRQ handlers
+To: stable@vger.kernel.org
+Cc: Alex Williamson <alex.williamson@redhat.com>, sashal@kernel.org, gregkh@linuxfoundation.org, eric.auger@redhat.com, Kevin Tian <kevin.tian@intel.com>
+Message-ID: <20240401165302.3699643-6-alex.williamson@redhat.com>
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 675daf435e9f8e5a5eab140a9864dfad6668b375 ]
+
+The vfio-platform SET_IRQS ioctl currently allows loopback triggering of
+an interrupt before a signaling eventfd has been configured by the user,
+which thereby allows a NULL pointer dereference.
+
+Rather than register the IRQ relative to a valid trigger, register all
+IRQs in a disabled state in the device open path. This allows mask
+operations on the IRQ to nest within the overall enable state governed
+by a valid eventfd signal. This decouples @masked, protected by the
+@locked spinlock from @trigger, protected via the @igate mutex.
+
+In doing so, it's guaranteed that changes to @trigger cannot race the
+IRQ handlers because the IRQ handler is synchronously disabled before
+modifying the trigger, and loopback triggering of the IRQ via ioctl is
+safe due to serialization with trigger changes via igate.
+
+For compatibility, request_irq() failures are maintained to be local to
+the SET_IRQS ioctl rather than a fatal error in the open device path.
+This allows, for example, a userspace driver with polling mode support
+to continue to work regardless of moving the request_irq() call site.
+This necessarily blocks all SET_IRQS access to the failed index.
+
+Cc: Eric Auger <eric.auger@redhat.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 57f972e2b341 ("vfio/platform: trigger an interrupt via eventfd")
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-7-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/platform/vfio_platform_irq.c | 101 ++++++++++++++++++++----------
+ 1 file changed, 68 insertions(+), 33 deletions(-)
+
+--- a/drivers/vfio/platform/vfio_platform_irq.c
++++ b/drivers/vfio/platform/vfio_platform_irq.c
+@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(
+ return 0;
+ }
+
++/*
++ * The trigger eventfd is guaranteed valid in the interrupt path
++ * and protected by the igate mutex when triggered via ioctl.
++ */
++static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
++{
++ if (likely(irq_ctx->trigger))
++ eventfd_signal(irq_ctx->trigger, 1);
++}
++
+ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+ {
+ struct vfio_platform_irq *irq_ctx = dev_id;
+@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_h
+ spin_unlock_irqrestore(&irq_ctx->lock, flags);
+
+ if (ret == IRQ_HANDLED)
+- eventfd_signal(irq_ctx->trigger, 1);
++ vfio_send_eventfd(irq_ctx);
+
+ return ret;
+ }
+@@ -164,22 +174,19 @@ static irqreturn_t vfio_irq_handler(int
+ {
+ struct vfio_platform_irq *irq_ctx = dev_id;
+
+- eventfd_signal(irq_ctx->trigger, 1);
++ vfio_send_eventfd(irq_ctx);
+
+ return IRQ_HANDLED;
+ }
+
+ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
+- int fd, irq_handler_t handler)
++ int fd)
+ {
+ struct vfio_platform_irq *irq = &vdev->irqs[index];
+ struct eventfd_ctx *trigger;
+- int ret;
+
+ if (irq->trigger) {
+- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+- free_irq(irq->hwirq, irq);
+- kfree(irq->name);
++ disable_irq(irq->hwirq);
+ eventfd_ctx_put(irq->trigger);
+ irq->trigger = NULL;
+ }
+@@ -187,30 +194,20 @@ static int vfio_set_trigger(struct vfio_
+ if (fd < 0) /* Disable only */
+ return 0;
+
+- irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
+- irq->hwirq, vdev->name);
+- if (!irq->name)
+- return -ENOMEM;
+-
+ trigger = eventfd_ctx_fdget(fd);
+- if (IS_ERR(trigger)) {
+- kfree(irq->name);
++ if (IS_ERR(trigger))
+ return PTR_ERR(trigger);
+- }
+
+ irq->trigger = trigger;
+
+- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
+- if (ret) {
+- kfree(irq->name);
+- eventfd_ctx_put(trigger);
+- irq->trigger = NULL;
+- return ret;
+- }
+-
+- if (!irq->masked)
+- enable_irq(irq->hwirq);
++ /*
++ * irq->masked effectively provides nested disables within the overall
++ * enable relative to trigger. Specifically request_irq() is called
++ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
++ * may only further disable the IRQ with a MASK operations because
++ * irq->masked is initially false.
++ */
++ enable_irq(irq->hwirq);
+
+ return 0;
+ }
+@@ -229,7 +226,7 @@ static int vfio_platform_set_irq_trigger
+ handler = vfio_irq_handler;
+
+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
+- return vfio_set_trigger(vdev, index, -1, handler);
++ return vfio_set_trigger(vdev, index, -1);
+
+ if (start != 0 || count != 1)
+ return -EINVAL;
+@@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+
+- return vfio_set_trigger(vdev, index, fd, handler);
++ return vfio_set_trigger(vdev, index, fd);
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+@@ -261,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
+
++ /*
++ * For compatibility, errors from request_irq() are local to the
++ * SET_IRQS path and reflected in the name pointer. This allows,
++ * for example, polling mode fallback for an exclusive IRQ failure.
++ */
++ if (IS_ERR(vdev->irqs[index].name))
++ return PTR_ERR(vdev->irqs[index].name);
++
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = vfio_platform_set_irq_mask;
+@@ -281,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct
+
+ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+ {
+- int cnt = 0, i;
++ int cnt = 0, i, ret = 0;
+
+ while (vdev->get_irq(vdev, cnt) >= 0)
+ cnt++;
+@@ -292,29 +297,54 @@ int vfio_platform_irq_init(struct vfio_p
+
+ for (i = 0; i < cnt; i++) {
+ int hwirq = vdev->get_irq(vdev, i);
++ irq_handler_t handler = vfio_irq_handler;
+
+- if (hwirq < 0)
++ if (hwirq < 0) {
++ ret = -EINVAL;
+ goto err;
++ }
+
+ spin_lock_init(&vdev->irqs[i].lock);
+
+ vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
+
+- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
++ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
+ vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
+ | VFIO_IRQ_INFO_AUTOMASKED;
++ handler = vfio_automasked_irq_handler;
++ }
+
+ vdev->irqs[i].count = 1;
+ vdev->irqs[i].hwirq = hwirq;
+ vdev->irqs[i].masked = false;
++ vdev->irqs[i].name = kasprintf(GFP_KERNEL,
++ "vfio-irq[%d](%s)", hwirq,
++ vdev->name);
++ if (!vdev->irqs[i].name) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
++ vdev->irqs[i].name, &vdev->irqs[i]);
++ if (ret) {
++ kfree(vdev->irqs[i].name);
++ vdev->irqs[i].name = ERR_PTR(ret);
++ }
+ }
+
+ vdev->num_irqs = cnt;
+
+ return 0;
+ err:
++ for (--i; i >= 0; i--) {
++ if (!IS_ERR(vdev->irqs[i].name)) {
++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++ kfree(vdev->irqs[i].name);
++ }
++ }
+ kfree(vdev->irqs);
+- return -EINVAL;
++ return ret;
+ }
+
+ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
+@@ -324,7 +354,12 @@ void vfio_platform_irq_cleanup(struct vf
+ for (i = 0; i < vdev->num_irqs; i++) {
+ vfio_virqfd_disable(&vdev->irqs[i].mask);
+ vfio_virqfd_disable(&vdev->irqs[i].unmask);
+- vfio_set_trigger(vdev, i, -1, NULL);
++ if (!IS_ERR(vdev->irqs[i].name)) {
++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++ if (vdev->irqs[i].trigger)
++ eventfd_ctx_put(vdev->irqs[i].trigger);
++ kfree(vdev->irqs[i].name);
++ }
+ }
+
+ vdev->num_irqs = 0;