{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ struct bnxt_re_query_device_ex_resp resp = {};
+ size_t outlen = (udata) ? udata->outlen : 0;
+ int rc = 0;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
ib_attr->max_pkeys = 1;
ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
- return 0;
+
+ if ((offsetofend(typeof(resp), packet_pacing_caps) <= outlen) &&
+ _is_modify_qp_rate_limit_supported(dev_attr->dev_cap_flags2)) {
+ resp.packet_pacing_caps.qp_rate_limit_min =
+ dev_attr->rate_limit_min;
+ resp.packet_pacing_caps.qp_rate_limit_max =
+ dev_attr->rate_limit_max;
+ resp.packet_pacing_caps.supported_qpts =
+ 1 << IB_QPT_RC;
+ }
+ if (outlen)
+ rc = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), outlen));
+
+ return rc;
}
int bnxt_re_modify_device(struct ib_device *ibdev,
if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
+ if (_is_modify_qp_rate_limit_supported(dev_attr->dev_cap_flags2))
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_QP_RATE_LIMIT_ENABLED;
+
if (udata->inlen >= sizeof(ureq)) {
rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
if (rc)
BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED = 0x40,
+ BNXT_RE_UCNTX_CMASK_QP_RATE_LIMIT_ENABLED = 0x80ULL,
};
enum bnxt_re_wqe_mode {
BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
};
+
+struct bnxt_re_packet_pacing_caps {
+ __u32 qp_rate_limit_min;
+ __u32 qp_rate_limit_max; /* In kbps */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_RC
+ */
+ __u32 supported_qpts;
+ __u32 reserved;
+};
+
+struct bnxt_re_query_device_ex_resp {
+ struct bnxt_re_packet_pacing_caps packet_pacing_caps;
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/