mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) {
+ if (id_priv->restricted_node_type != RDMA_NODE_UNSPECIFIED &&
+ id_priv->restricted_node_type != cma_dev->device->node_type)
+ continue;
rdma_for_each_port (cma_dev->device, port) {
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
return ERR_PTR(-ENOMEM);
id_priv->state = RDMA_CM_IDLE;
+ id_priv->restricted_node_type = RDMA_NODE_UNSPECIFIED;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps;
}
EXPORT_SYMBOL(rdma_resolve_addr);
+int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret = 0;
+
+ switch (node_type) {
+ case RDMA_NODE_UNSPECIFIED:
+ case RDMA_NODE_IB_CA:
+ case RDMA_NODE_RNIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&lock);
+ if (id_priv->cma_dev)
+ ret = -EALREADY;
+ else
+ id_priv->restricted_node_type = node_type;
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(rdma_restrict_node_type);
+
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv =
*/
void rdma_destroy_id(struct rdma_cm_id *id);
+/**
+ * rdma_restrict_node_type - Restrict an RDMA identifier to specific
+ * RDMA device node type.
+ *
+ * @id: RDMA identifier.
+ * @node_type: The device node type. Only RDMA_NODE_UNSPECIFIED (default),
+ * RDMA_NODE_RNIC and RDMA_NODE_IB_CA are allowed
+ *
+ * This allows the caller to restrict the possible devices
+ * used to iWarp (RDMA_NODE_RNIC) or InfiniBand/RoCEv1/RoCEv2 (RDMA_NODE_IB_CA).
+ *
+ * It needs to be called before the RDMA identifier is bound
+ * to an device, which mean it should be called before
+ * rdma_bind_addr(), rdma_bind_addr() and rdma_listen().
+ */
+int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type);
+
/**
* rdma_bind_addr - Bind an RDMA identifier to a source address and
* associated RDMA device, if needed.