]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: xilinx: axienet: Use devres for resource management in probe path
authorSean Anderson <sean.anderson@linux.dev>
Fri, 16 Jan 2026 19:27:24 +0000 (00:57 +0530)
committerJakub Kicinski <kuba@kernel.org>
Thu, 22 Jan 2026 02:57:07 +0000 (18:57 -0800)
Transition axienet_probe() to managed resource allocation using devm_*
APIs for network device and clock handling, while improving error paths
with dev_err_probe(). This eliminates the need for manual resource
cleanup during probe failures and streamlines the remove() function.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Co-developed-by: Suraj Gupta <suraj.gupta2@amd.com>
Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
Link: https://patch.msgid.link/20260116192725.972966-3-suraj.gupta2@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 284031fb2e2c7f989c53894cb6ab3a4d239f699c..998bacd508b8281d091bc90cf31b4250c0fe63b1 100644 (file)
@@ -2787,7 +2787,7 @@ static int axienet_probe(struct platform_device *pdev)
        int addr_width = 32;
        u32 value;
 
-       ndev = alloc_etherdev(sizeof(*lp));
+       ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
        if (!ndev)
                return -ENOMEM;
 
@@ -2815,41 +2815,32 @@ static int axienet_probe(struct platform_device *pdev)
        seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
        INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
 
-       lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+       lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
+                                                   "s_axi_lite_clk");
        if (!lp->axi_clk) {
                /* For backward compatibility, if named AXI clock is not present,
                 * treat the first clock specified as the AXI clock.
                 */
-               lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
-       }
-       if (IS_ERR(lp->axi_clk)) {
-               ret = PTR_ERR(lp->axi_clk);
-               goto free_netdev;
-       }
-       ret = clk_prepare_enable(lp->axi_clk);
-       if (ret) {
-               dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
-               goto free_netdev;
+               lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
        }
+       if (IS_ERR(lp->axi_clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
+                                    "could not get AXI clock\n");
 
        lp->misc_clks[0].id = "axis_clk";
        lp->misc_clks[1].id = "ref_clk";
        lp->misc_clks[2].id = "mgt_clk";
 
-       ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
-       if (ret)
-               goto cleanup_clk;
-
-       ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+       ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
+                                               lp->misc_clks);
        if (ret)
-               goto cleanup_clk;
+               return dev_err_probe(&pdev->dev, ret,
+                                    "could not get/enable misc. clocks\n");
 
        /* Map device registers */
        lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
-       if (IS_ERR(lp->regs)) {
-               ret = PTR_ERR(lp->regs);
-               goto cleanup_clk;
-       }
+       if (IS_ERR(lp->regs))
+               return PTR_ERR(lp->regs);
        lp->regs_start = ethres->start;
 
        /* Setup checksum offload, but default to off if not specified */
@@ -2918,19 +2909,17 @@ static int axienet_probe(struct platform_device *pdev)
                        lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
                        break;
                default:
-                       ret = -EINVAL;
-                       goto cleanup_clk;
+                       return -EINVAL;
                }
        } else {
                ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
                if (ret)
-                       goto cleanup_clk;
+                       return ret;
        }
        if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
            lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
                dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
-               ret = -EINVAL;
-               goto cleanup_clk;
+               return -EINVAL;
        }
 
        if (!of_property_present(pdev->dev.of_node, "dmas")) {
@@ -2945,7 +2934,7 @@ static int axienet_probe(struct platform_device *pdev)
                                dev_err(&pdev->dev,
                                        "unable to get DMA resource\n");
                                of_node_put(np);
-                               goto cleanup_clk;
+                               return ret;
                        }
                        lp->dma_regs = devm_ioremap_resource(&pdev->dev,
                                                             &dmares);
@@ -2962,19 +2951,17 @@ static int axienet_probe(struct platform_device *pdev)
                }
                if (IS_ERR(lp->dma_regs)) {
                        dev_err(&pdev->dev, "could not map DMA regs\n");
-                       ret = PTR_ERR(lp->dma_regs);
-                       goto cleanup_clk;
+                       return PTR_ERR(lp->dma_regs);
                }
                if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
                        dev_err(&pdev->dev, "could not determine irqs\n");
-                       ret = -ENOMEM;
-                       goto cleanup_clk;
+                       return -ENOMEM;
                }
 
                /* Reset core now that clocks are enabled, prior to accessing MDIO */
                ret = __axienet_device_reset(lp);
                if (ret)
-                       goto cleanup_clk;
+                       return ret;
 
                /* Autodetect the need for 64-bit DMA pointers.
                 * When the IP is configured for a bus width bigger than 32 bits,
@@ -3001,14 +2988,13 @@ static int axienet_probe(struct platform_device *pdev)
                }
                if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
                        dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
-                       ret = -EINVAL;
-                       goto cleanup_clk;
+                       return -EINVAL;
                }
 
                ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
                if (ret) {
                        dev_err(&pdev->dev, "No suitable DMA available\n");
-                       goto cleanup_clk;
+                       return ret;
                }
                netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
                netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
@@ -3018,15 +3004,12 @@ static int axienet_probe(struct platform_device *pdev)
 
                lp->eth_irq = platform_get_irq_optional(pdev, 0);
                if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
-                       ret = lp->eth_irq;
-                       goto cleanup_clk;
+                       return lp->eth_irq;
                }
                tx_chan = dma_request_chan(lp->dev, "tx_chan0");
-               if (IS_ERR(tx_chan)) {
-                       ret = PTR_ERR(tx_chan);
-                       dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
-                       goto cleanup_clk;
-               }
+               if (IS_ERR(tx_chan))
+                       return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
+                                            "No Ethernet DMA (TX) channel found\n");
 
                cfg.reset = 1;
                /* As name says VDMA but it has support for DMA channel reset */
@@ -3034,7 +3017,7 @@ static int axienet_probe(struct platform_device *pdev)
                if (ret < 0) {
                        dev_err(&pdev->dev, "Reset channel failed\n");
                        dma_release_channel(tx_chan);
-                       goto cleanup_clk;
+                       return ret;
                }
 
                dma_release_channel(tx_chan);
@@ -3139,13 +3122,6 @@ cleanup_mdio:
                put_device(&lp->pcs_phy->dev);
        if (lp->mii_bus)
                axienet_mdio_teardown(lp);
-cleanup_clk:
-       clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
-       clk_disable_unprepare(lp->axi_clk);
-
-free_netdev:
-       free_netdev(ndev);
-
        return ret;
 }
 
@@ -3163,11 +3139,6 @@ static void axienet_remove(struct platform_device *pdev)
                put_device(&lp->pcs_phy->dev);
 
        axienet_mdio_teardown(lp);
-
-       clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
-       clk_disable_unprepare(lp->axi_clk);
-
-       free_netdev(ndev);
 }
 
 static void axienet_shutdown(struct platform_device *pdev)