{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- int err;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i, err;
if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
dev_err(dev, "dpseci_reset() failed\n");
}
+ for_each_cpu(i, priv->clean_mask) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ free_netdev(ppriv->net_dev);
+ }
+ free_cpumask_var(priv->clean_mask);
+
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
}
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
- cpumask_var_t clean_mask;
int err, cpu;
u8 i;
err = -ENOMEM;
- if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
- goto err_cpumask;
-
priv = dev_get_drvdata(dev);
+ if (!zalloc_cpumask_var(&priv->clean_mask, GFP_KERNEL))
+ goto err_cpumask;
+
priv->dev = dev;
priv->dpsec_id = ls_dev->obj_desc.id;
err = -ENOMEM;
goto err_alloc_netdev;
}
- cpumask_set_cpu(cpu, clean_mask);
+ cpumask_set_cpu(cpu, priv->clean_mask);
ppriv->net_dev->dev = *dev;
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
DPAA2_CAAM_NAPI_WEIGHT);
}
- err = 0;
- goto free_cpumask;
+ return 0;
err_alloc_netdev:
- free_dpaa2_pcpu_netdev(priv, clean_mask);
+ free_dpaa2_pcpu_netdev(priv, priv->clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
-free_cpumask:
- free_cpumask_var(clean_mask);
+ free_cpumask_var(priv->clean_mask);
err_cpumask:
return err;
}
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
- free_netdev(ppriv->net_dev);
}
return 0;