rdev->nqr = NULL;
}
+/* When DEL_GID fails, driver is not freeing GID ctx memory.
+ * To avoid the memory leak, free the memory during unload
+ */
+static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ int i;
+
+ if (!sgid_tbl->active)
+ return;
+
+ ctx_tbl = sgid_tbl->ctx;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (sgid_tbl->hw_id[i] == 0xFFFF)
+ continue;
+
+ ctx = ctx_tbl[i];
+ kfree(ctx);
+ }
+}
+
+ static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+ free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+
+ return rc;
+ }
+
+ static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+ free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+
+ return rc;
+ }
+
+ static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+ }
+
+ static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+ }
+
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
bnxt_re_net_unregister_async_event(rdev);
bnxt_re_uninit_dcb_wq(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work_sync(&rdev->worker);
+ bnxt_re_put_stats3_ctx(rdev);
+ bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev);