]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
PCI/MSI: Switch to MSI descriptor locking to guard()
authorThomas Gleixner <tglx@linutronix.de>
Thu, 13 Mar 2025 13:03:44 +0000 (14:03 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 13 Mar 2025 17:58:00 +0000 (18:58 +0100)
Convert the code to use the new guard(msi_descs_lock).

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/all/20250313130321.695027112@linutronix.de
drivers/pci/msi/api.c
drivers/pci/msi/msi.c

index b956ce591f964b41a7cac5e563dc7a2747026907..d89f491afdf05c760767410ab43fbe57c010fe94 100644 (file)
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
        if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
                return;
 
-       msi_lock_descs(&dev->dev);
+       guard(msi_descs_lock)(&dev->dev);
        pci_msi_shutdown(dev);
        pci_free_msi_irqs(dev);
-       msi_unlock_descs(&dev->dev);
 }
 EXPORT_SYMBOL(pci_disable_msi);
 
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
        if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
                return;
 
-       msi_lock_descs(&dev->dev);
+       guard(msi_descs_lock)(&dev->dev);
        pci_msix_shutdown(dev);
        pci_free_msi_irqs(dev);
-       msi_unlock_descs(&dev->dev);
 }
 EXPORT_SYMBOL(pci_disable_msix);
 
index 2f647cac4cae3484276ecdf658537d9006a7f3d7..b243510999e49cc5fad5babe1e79aac33c2bba23 100644 (file)
@@ -336,41 +336,11 @@ static int msi_verify_entries(struct pci_dev *dev)
        return !entry ? 0 : -EIO;
 }
 
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts.  A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ.  A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
-                              struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
 {
-       struct irq_affinity_desc *masks = NULL;
+       int ret = msi_setup_msi_desc(dev, nvec, masks);
        struct msi_desc *entry, desc;
-       int ret;
-
-       /* Reject multi-MSI early on irq domain enabled architectures */
-       if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
-               return 1;
 
-       /*
-        * Disable MSI during setup in the hardware, but mark it enabled
-        * so that setup code can evaluate it.
-        */
-       pci_msi_set_enable(dev, 0);
-       dev->msi_enabled = 1;
-
-       if (affd)
-               masks = irq_create_affinity_masks(nvec, affd);
-
-       msi_lock_descs(&dev->dev);
-       ret = msi_setup_msi_desc(dev, nvec, masks);
        if (ret)
                goto fail;
 
@@ -399,19 +369,48 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
 
        pcibios_free_irq(dev);
        dev->irq = entry->irq;
-       goto unlock;
-
+       return 0;
 err:
        pci_msi_unmask(&desc, msi_multi_mask(&desc));
        pci_free_msi_irqs(dev);
 fail:
        dev->msi_enabled = 0;
-unlock:
-       msi_unlock_descs(&dev->dev);
-       kfree(masks);
        return ret;
 }
 
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts.  A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ.  A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+                              struct irq_affinity *affd)
+{
+       /* Reject multi-MSI early on irq domain enabled architectures */
+       if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+               return 1;
+
+       /*
+        * Disable MSI during setup in the hardware, but mark it enabled
+        * so that setup code can evaluate it.
+        */
+       pci_msi_set_enable(dev, 0);
+       dev->msi_enabled = 1;
+
+       struct irq_affinity_desc *masks __free(kfree) =
+               affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+       guard(msi_descs_lock)(&dev->dev);
+       return __msi_capability_init(dev, nvec, masks);
+}
+
 int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
                           struct irq_affinity *affd)
 {
@@ -666,40 +665,41 @@ static void msix_mask_all(void __iomem *base, int tsize)
                writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
 }
 
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
-                                int nvec, struct irq_affinity *affd)
+static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+                                  int nvec, struct irq_affinity_desc *masks)
 {
-       struct irq_affinity_desc *masks = NULL;
-       int ret;
-
-       if (affd)
-               masks = irq_create_affinity_masks(nvec, affd);
+       int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
 
-       msi_lock_descs(&dev->dev);
-       ret = msix_setup_msi_descs(dev, entries, nvec, masks);
        if (ret)
-               goto out_free;
+               goto fail;
 
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
        if (ret)
-               goto out_free;
+               goto fail;
 
        /* Check if all MSI entries honor device restrictions */
        ret = msi_verify_entries(dev);
        if (ret)
-               goto out_free;
+               goto fail;
 
        msix_update_entries(dev, entries);
-       goto out_unlock;
+       return 0;
 
-out_free:
+fail:
        pci_free_msi_irqs(dev);
-out_unlock:
-       msi_unlock_descs(&dev->dev);
-       kfree(masks);
        return ret;
 }
 
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+                                int nvec, struct irq_affinity *affd)
+{
+       struct irq_affinity_desc *masks __free(kfree) =
+               affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+       guard(msi_descs_lock)(&dev->dev);
+       return __msix_setup_interrupts(dev, entries, nvec, masks);
+}
+
 /**
  * msix_capability_init - configure device's MSI-X capability
  * @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -871,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
 
        write_msg = arch_restore_msi_irqs(dev);
 
-       msi_lock_descs(&dev->dev);
-       msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
-               if (write_msg)
-                       __pci_write_msi_msg(entry, &entry->msg);
-               pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+       scoped_guard (msi_descs_lock, &dev->dev) {
+               msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+                       if (write_msg)
+                               __pci_write_msi_msg(entry, &entry->msg);
+                       pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+               }
        }
-       msi_unlock_descs(&dev->dev);
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }