]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Add support for flow create/destroy
authorSaravanan Vajravel <saravanan.vajravel@broadcom.com>
Fri, 22 Aug 2025 04:07:58 +0000 (09:37 +0530)
committerLeon Romanovsky <leon@kernel.org>
Thu, 11 Sep 2025 06:18:18 +0000 (02:18 -0400)
- Added support for create_flow and destroy_flow verbs. These
  verbs are used on RawEth QP to add a specific flow action.
- To support TCP dump on RoCE, added IB_FLOW_ATTR_SNIFFER
  attribute.
- In create_flow verb, driver allocates mirror_vnic and configure it
  with RawEth QP. Once this is done, driver will enable mirroring.
- In destroy_flow, driver will disable mirroring and free the mirror
  vnic.

Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Reviewed-by: Kashyap Desai <kashyap.desai@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Link: https://patch.msgid.link/20250822040801.776196-8-kalesh-anakkur.purayil@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/qplib_sp.h
drivers/infiniband/hw/bnxt_re/roce_hsi.h

index 4bf10d7db77bebb0b58eb380712c28581a393802..ae4476c87be75f01cce7b7409a66b4d7fa0e11f4 100644 (file)
@@ -232,6 +232,7 @@ struct bnxt_re_dev {
        u16 mirror_vnic_id;
        union ib_gid ugid;
        u32 ugid_index;
+       u8 sniffer_flow_created : 1;
 };
 
 #define to_bnxt_re_dev(ptr, member)    \
index c83809c72f5b97b5c97c959a63f67bd86e5b55ac..90c23d0ee26296f9b76b56686e573079d231fa97 100644 (file)
@@ -4449,6 +4449,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
        }
 }
 
+static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+       int rc;
+
+       rc = bnxt_re_hwrm_alloc_vnic(rdev);
+       if (rc)
+               return rc;
+
+       rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
+       if (rc)
+               goto out_free_vnic;
+
+       return 0;
+out_free_vnic:
+       bnxt_re_hwrm_free_vnic(rdev);
+       return rc;
+}
+
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+                                   struct ib_flow_attr *attr,
+                                   struct ib_udata *udata)
+{
+       struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+       struct bnxt_re_dev *rdev = qp->rdev;
+       struct bnxt_re_flow *flow;
+       int rc;
+
+       if (attr->type != IB_FLOW_ATTR_SNIFFER ||
+           !rdev->rcfw.roce_mirror)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       mutex_lock(&rdev->qp_lock);
+       if (rdev->sniffer_flow_created) {
+               ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
+               mutex_unlock(&rdev->qp_lock);
+               return ERR_PTR(-EBUSY);
+       }
+
+       flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+       if (!flow) {
+               mutex_unlock(&rdev->qp_lock);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       flow->rdev = rdev;
+
+       rc = bnxt_re_setup_vnic(rdev, qp);
+       if (rc)
+               goto out_free_flow;
+
+       rc = bnxt_qplib_create_flow(&rdev->qplib_res);
+       if (rc)
+               goto out_free_vnic;
+
+       rdev->sniffer_flow_created = 1;
+       mutex_unlock(&rdev->qp_lock);
+
+       return &flow->ib_flow;
+
+out_free_vnic:
+       bnxt_re_hwrm_free_vnic(rdev);
+out_free_flow:
+       mutex_unlock(&rdev->qp_lock);
+       kfree(flow);
+       return ERR_PTR(rc);
+}
+
+int bnxt_re_destroy_flow(struct ib_flow *flow_id)
+{
+       struct bnxt_re_flow *flow =
+               container_of(flow_id, struct bnxt_re_flow, ib_flow);
+       struct bnxt_re_dev *rdev = flow->rdev;
+       int rc;
+
+       mutex_lock(&rdev->qp_lock);
+       rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
+       if (rc)
+               ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
+       rdev->sniffer_flow_created = 0;
+
+       bnxt_re_hwrm_free_vnic(rdev);
+       mutex_unlock(&rdev->qp_lock);
+       kfree(flow);
+
+       return rc;
+}
+
 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
 {
        struct bnxt_re_cq *cq = NULL, *tmp_cq;
index 445a28b3cd968ffa52d695c0c043b45f2bd7d4a6..76ba9ab04d5ce44f826793252bacd6645a02df94 100644 (file)
@@ -272,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
                                         struct uverbs_attr_bundle *attrs);
 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
 void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+                                   struct ib_flow_attr *attr,
+                                   struct ib_udata *udata);
+int bnxt_re_destroy_flow(struct ib_flow *flow_id);
+
 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
 
index a284f762738aa1c8623398c84b1288d7714bda0d..8179501a0135b64c709d9d944431150aa6074a4c 100644 (file)
@@ -1365,6 +1365,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
        .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
        .req_notify_cq = bnxt_re_req_notify_cq,
        .resize_cq = bnxt_re_resize_cq,
+       .create_flow = bnxt_re_create_flow,
+       .destroy_flow = bnxt_re_destroy_flow,
        INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
        INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
        INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
index 3d68e44a2dcec5bc53482c9a579b5ed29c589bed..698fcad9f0342b4be6251e71fcb3a5c5187db2fb 100644 (file)
@@ -1146,3 +1146,40 @@ out:
        dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
        return rc;
 }
+
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
+{
+       struct creq_roce_mirror_cfg_resp resp = {};
+       struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+       struct cmdq_roce_mirror_cfg req = {};
+       struct bnxt_qplib_cmdqmsg msg = {};
+
+       bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+                                CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+                                sizeof(req));
+
+       req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
+
+       bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+                               sizeof(resp), 0);
+       return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
+{
+       struct creq_roce_mirror_cfg_resp resp = {};
+       struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+       struct cmdq_roce_mirror_cfg req = {};
+       struct bnxt_qplib_cmdqmsg msg = {};
+
+       bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+                                CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+                                sizeof(req));
+
+       req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
+
+       bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+                               sizeof(resp), 0);
+
+       return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
index 58f90f3e57f7ea8f1351ec3cba197d3f2152e0db..147b5d9c03138b31bcca06936ccd19969bb6a687 100644 (file)
@@ -360,6 +360,8 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
 int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
                              struct bnxt_qplib_cc_param *cc_param);
 void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
 
 #define BNXT_VAR_MAX_WQE       4352
 #define BNXT_VAR_MAX_SLOT_ALIGN 256
index f9ac37335a1d5b5638b2ac642756fe8738163bd7..cfdf69a3fe9a8aa6a95caa7c01df0c183d6ebf1d 100644 (file)
@@ -144,7 +144,8 @@ struct cmdq_base {
        #define CMDQ_BASE_OPCODE_MODIFY_CQ              0x90UL
        #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND        0x91UL
        #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT   0x92UL
-       #define CMDQ_BASE_OPCODE_LAST                  CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
+       #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG        0x99UL
+       #define CMDQ_BASE_OPCODE_LAST                   CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
        u8      cmd_size;
        __le16  flags;
        __le16  cookie;
@@ -2109,6 +2110,43 @@ struct creq_query_roce_stats_ext_resp_sb {
        __le64  dup_req;
 };
 
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+       u8      opcode;
+       #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG     0x99UL
+       #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST                \
+                               CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+       u8      cmd_size;
+       __le16  flags;
+       __le16  cookie;
+       u8      resp_size;
+       u8      reserved8;
+       __le64  resp_addr;
+       u8      mirror_flags;
+       #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE              0x1UL
+       u8      rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+       u8      type;
+       #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK     0x3fUL
+       #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT      0
+       #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+       #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST     \
+                       CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+       u8      status;
+       __le16  cookie;
+       __le32  reserved32;
+       u8      v;
+       #define CREQ_ROCE_MIRROR_CFG_RESP_V             0x1UL
+       u8      event;
+       #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
+       #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST    \
+                       CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+       u8      reserved48[6];
+};
+
 /* cmdq_query_func (size:128b/16B) */
 struct cmdq_query_func {
        u8      opcode;