]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5: Fix build -Wframe-larger-than warnings
authorZhu Yanjun <yanjun.zhu@linux.dev>
Tue, 22 Jul 2025 21:20:23 +0000 (14:20 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 25 Jul 2025 01:49:37 +0000 (18:49 -0700)
When building, the following warnings will appear.
"
pci_irq.c: In function ‘mlx5_ctrl_irq_request’:
pci_irq.c:494:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]

pci_irq.c: In function ‘mlx5_irq_request_vector’:
pci_irq.c:561:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]

eq.c: In function ‘comp_irq_request_sf’:
eq.c:897:1: warning: the frame size of 1080 bytes is larger than 1024 bytes [-Wframe-larger-than=]

irq_affinity.c: In function ‘irq_pool_request_irq’:
irq_affinity.c:74:1: warning: the frame size of 1048 bytes is larger than 1024 bytes [-Wframe-larger-than=]
"

These warnings indicate that the stack frame size exceeds 1024 bytes in
these functions.

To resolve this, instead of allocating large memory buffers on the stack,
it is better to use kvzalloc to allocate memory dynamically on the heap.
This approach reduces stack usage and eliminates these frame size warnings.

Acked-by: Junxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20250722212023.244296-1-yanjun.zhu@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

index 66dce17219a6c7f5506ce927eacabeab26431e95..1ab77159409d67b576c5a493fa0ec47704a88ae8 100644 (file)
@@ -876,19 +876,25 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
 {
        struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
        struct mlx5_eq_table *table = dev->priv.eq_table;
-       struct irq_affinity_desc af_desc = {};
+       struct irq_affinity_desc *af_desc;
        struct mlx5_irq *irq;
 
-       /* In case SF irq pool does not exist, fallback to the PF irqs*/
+       /* In case SF irq pool does not exist, fallback to the PF irqs */
        if (!mlx5_irq_pool_is_sf_pool(pool))
                return comp_irq_request_pci(dev, vecidx);
 
-       af_desc.is_managed = false;
-       cpumask_copy(&af_desc.mask, cpu_online_mask);
-       cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
-       irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
-       if (IS_ERR(irq))
+       af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+       if (!af_desc)
+               return -ENOMEM;
+
+       af_desc->is_managed = false;
+       cpumask_copy(&af_desc->mask, cpu_online_mask);
+       cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
+       irq = mlx5_irq_affinity_request(dev, pool, af_desc);
+       if (IS_ERR(irq)) {
+               kvfree(af_desc);
                return PTR_ERR(irq);
+       }
 
        cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
        mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -896,6 +902,8 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
                      cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
                      mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
 
+       kvfree(af_desc);
+
        return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
 }
 
index 2691d88cdee1f7f61da738a3a0bac9cf1ab7f296..82d3c25682443b4b9ffc99f07183b64a179772d4 100644 (file)
@@ -47,29 +47,40 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
 static struct mlx5_irq *
 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
 {
-       struct irq_affinity_desc auto_desc = {};
+       struct irq_affinity_desc *auto_desc;
        struct mlx5_irq *irq;
        u32 irq_index;
        int err;
 
+       auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
+       if (!auto_desc)
+               return ERR_PTR(-ENOMEM);
+
        err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
-       if (err)
+       if (err) {
+               kvfree(auto_desc);
                return ERR_PTR(err);
+       }
+
        if (pool->irqs_per_cpu) {
                if (cpumask_weight(&af_desc->mask) > 1)
                        /* if req_mask contain more then one CPU, set the least loadad CPU
                         * of req_mask
                         */
                        cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
-                                       &auto_desc.mask);
+                                       &auto_desc->mask);
                else
                        cpu_get(pool, cpumask_first(&af_desc->mask));
        }
+
        irq = mlx5_irq_alloc(pool, irq_index,
-                            cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+                            cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
                             NULL);
        if (IS_ERR(irq))
                xa_erase(&pool->irqs, irq_index);
+
+       kvfree(auto_desc);
+
        return irq;
 }
 
index 40024cfa30998452648003ebc8044512915dbf35..692ef9c2f72933fd3a56ab25da27e43e8f5482bb 100644 (file)
@@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
 struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
 {
        struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
-       struct irq_affinity_desc af_desc;
+       struct irq_affinity_desc *af_desc;
        struct mlx5_irq *irq;
 
-       cpumask_copy(&af_desc.mask, cpu_online_mask);
-       af_desc.is_managed = false;
+       af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+       if (!af_desc)
+               return ERR_PTR(-ENOMEM);
+
+       cpumask_copy(&af_desc->mask, cpu_online_mask);
+       af_desc->is_managed = false;
        if (!mlx5_irq_pool_is_sf_pool(pool)) {
                /* In case we are allocating a control IRQ from a pci device's pool.
                 * This can happen also for a SF if the SFs pool is empty.
                 */
                if (!pool->xa_num_irqs.max) {
-                       cpumask_clear(&af_desc.mask);
+                       cpumask_clear(&af_desc->mask);
                        /* In case we only have a single IRQ for PF/VF */
-                       cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
+                       cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
                }
                /* Allocate the IRQ in index 0. The vector was already allocated */
-               irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
+               irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
        } else {
-               irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
+               irq = mlx5_irq_affinity_request(dev, pool, af_desc);
        }
 
+       kvfree(af_desc);
+
        return irq;
 }
 
@@ -548,16 +554,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
 {
        struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
        struct mlx5_irq_pool *pool = table->pcif_pool;
-       struct irq_affinity_desc af_desc;
        int offset = MLX5_IRQ_VEC_COMP_BASE;
+       struct irq_affinity_desc *af_desc;
+       struct mlx5_irq *irq;
+
+       af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+       if (!af_desc)
+               return ERR_PTR(-ENOMEM);
 
        if (!pool->xa_num_irqs.max)
                offset = 0;
 
-       af_desc.is_managed = false;
-       cpumask_clear(&af_desc.mask);
-       cpumask_set_cpu(cpu, &af_desc.mask);
-       return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
+       af_desc->is_managed = false;
+       cpumask_clear(&af_desc->mask);
+       cpumask_set_cpu(cpu, &af_desc->mask);
+
+       irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
+
+       kvfree(af_desc);
+
+       return irq;
 }
 
 static struct mlx5_irq_pool *