1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <rdma/ib_verbs.h>
26 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
28 #define SMC_QP_MIN_RNR_TIMER 5
29 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
30 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
31 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
33 struct smc_ib_devices smc_ib_devices
= { /* smc-registered ib devices */
34 .lock
= __SPIN_LOCK_UNLOCKED(smc_ib_devices
.lock
),
35 .list
= LIST_HEAD_INIT(smc_ib_devices
.list
),
38 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
40 u8 local_systemid
[SMC_SYSTEMID_LEN
] = SMC_LOCAL_SYSTEMID_RESET
; /* unique system
44 static int smc_ib_modify_qp_init(struct smc_link
*lnk
)
46 struct ib_qp_attr qp_attr
;
48 memset(&qp_attr
, 0, sizeof(qp_attr
));
49 qp_attr
.qp_state
= IB_QPS_INIT
;
50 qp_attr
.pkey_index
= 0;
51 qp_attr
.port_num
= lnk
->ibport
;
52 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
53 | IB_ACCESS_REMOTE_WRITE
;
54 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
55 IB_QP_STATE
| IB_QP_PKEY_INDEX
|
56 IB_QP_ACCESS_FLAGS
| IB_QP_PORT
);
59 static int smc_ib_modify_qp_rtr(struct smc_link
*lnk
)
61 enum ib_qp_attr_mask qp_attr_mask
=
62 IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
| IB_QP_DEST_QPN
|
63 IB_QP_RQ_PSN
| IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
;
64 struct ib_qp_attr qp_attr
;
66 memset(&qp_attr
, 0, sizeof(qp_attr
));
67 qp_attr
.qp_state
= IB_QPS_RTR
;
68 qp_attr
.path_mtu
= min(lnk
->path_mtu
, lnk
->peer_mtu
);
69 qp_attr
.ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
70 rdma_ah_set_port_num(&qp_attr
.ah_attr
, lnk
->ibport
);
71 rdma_ah_set_grh(&qp_attr
.ah_attr
, NULL
, 0, 0, 1, 0);
72 rdma_ah_set_dgid_raw(&qp_attr
.ah_attr
, lnk
->peer_gid
);
73 memcpy(&qp_attr
.ah_attr
.roce
.dmac
, lnk
->peer_mac
,
74 sizeof(lnk
->peer_mac
));
75 qp_attr
.dest_qp_num
= lnk
->peer_qpn
;
76 qp_attr
.rq_psn
= lnk
->peer_psn
; /* starting receive packet seq # */
77 qp_attr
.max_dest_rd_atomic
= 1; /* max # of resources for incoming
80 qp_attr
.min_rnr_timer
= SMC_QP_MIN_RNR_TIMER
;
82 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, qp_attr_mask
);
85 int smc_ib_modify_qp_rts(struct smc_link
*lnk
)
87 struct ib_qp_attr qp_attr
;
89 memset(&qp_attr
, 0, sizeof(qp_attr
));
90 qp_attr
.qp_state
= IB_QPS_RTS
;
91 qp_attr
.timeout
= SMC_QP_TIMEOUT
; /* local ack timeout */
92 qp_attr
.retry_cnt
= SMC_QP_RETRY_CNT
; /* retry count */
93 qp_attr
.rnr_retry
= SMC_QP_RNR_RETRY
; /* RNR retries, 7=infinite */
94 qp_attr
.sq_psn
= lnk
->psn_initial
; /* starting send packet seq # */
95 qp_attr
.max_rd_atomic
= 1; /* # of outstanding RDMA reads and
98 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
99 IB_QP_STATE
| IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
100 IB_QP_SQ_PSN
| IB_QP_RNR_RETRY
|
101 IB_QP_MAX_QP_RD_ATOMIC
);
104 int smc_ib_modify_qp_reset(struct smc_link
*lnk
)
106 struct ib_qp_attr qp_attr
;
108 memset(&qp_attr
, 0, sizeof(qp_attr
));
109 qp_attr
.qp_state
= IB_QPS_RESET
;
110 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, IB_QP_STATE
);
113 int smc_ib_ready_link(struct smc_link
*lnk
)
115 struct smc_link_group
*lgr
=
116 container_of(lnk
, struct smc_link_group
, lnk
[0]);
119 rc
= smc_ib_modify_qp_init(lnk
);
123 rc
= smc_ib_modify_qp_rtr(lnk
);
126 smc_wr_remember_qp_attr(lnk
);
127 rc
= ib_req_notify_cq(lnk
->smcibdev
->roce_cq_recv
,
128 IB_CQ_SOLICITED_MASK
);
131 rc
= smc_wr_rx_post_init(lnk
);
134 smc_wr_remember_qp_attr(lnk
);
136 if (lgr
->role
== SMC_SERV
) {
137 rc
= smc_ib_modify_qp_rts(lnk
);
140 smc_wr_remember_qp_attr(lnk
);
146 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
147 static void smc_ib_port_event_work(struct work_struct
*work
)
149 struct smc_ib_device
*smcibdev
= container_of(
150 work
, struct smc_ib_device
, port_event_work
);
153 for_each_set_bit(port_idx
, &smcibdev
->port_event_mask
, SMC_MAX_PORTS
) {
154 smc_ib_remember_port_attr(smcibdev
, port_idx
+ 1);
155 clear_bit(port_idx
, &smcibdev
->port_event_mask
);
159 /* can be called in IRQ context */
160 static void smc_ib_global_event_handler(struct ib_event_handler
*handler
,
161 struct ib_event
*ibevent
)
163 struct smc_ib_device
*smcibdev
;
166 smcibdev
= container_of(handler
, struct smc_ib_device
, event_handler
);
168 switch (ibevent
->event
) {
169 case IB_EVENT_PORT_ERR
:
170 port_idx
= ibevent
->element
.port_num
- 1;
171 set_bit(port_idx
, &smcibdev
->port_event_mask
);
172 schedule_work(&smcibdev
->port_event_work
);
174 case IB_EVENT_DEVICE_FATAL
:
175 /* tbd in follow-on patch:
176 * abnormal close of corresponding connections
179 case IB_EVENT_PORT_ACTIVE
:
180 port_idx
= ibevent
->element
.port_num
- 1;
181 set_bit(port_idx
, &smcibdev
->port_event_mask
);
182 schedule_work(&smcibdev
->port_event_work
);
189 void smc_ib_dealloc_protection_domain(struct smc_link
*lnk
)
191 ib_dealloc_pd(lnk
->roce_pd
);
195 int smc_ib_create_protection_domain(struct smc_link
*lnk
)
199 lnk
->roce_pd
= ib_alloc_pd(lnk
->smcibdev
->ibdev
, 0);
200 rc
= PTR_ERR_OR_ZERO(lnk
->roce_pd
);
201 if (IS_ERR(lnk
->roce_pd
))
206 static void smc_ib_qp_event_handler(struct ib_event
*ibevent
, void *priv
)
208 switch (ibevent
->event
) {
209 case IB_EVENT_DEVICE_FATAL
:
210 case IB_EVENT_GID_CHANGE
:
211 case IB_EVENT_PORT_ERR
:
212 case IB_EVENT_QP_ACCESS_ERR
:
213 /* tbd in follow-on patch:
214 * abnormal close of corresponding connections
222 void smc_ib_destroy_queue_pair(struct smc_link
*lnk
)
224 ib_destroy_qp(lnk
->roce_qp
);
228 /* create a queue pair within the protection domain for a link */
229 int smc_ib_create_queue_pair(struct smc_link
*lnk
)
231 struct ib_qp_init_attr qp_attr
= {
232 .event_handler
= smc_ib_qp_event_handler
,
234 .send_cq
= lnk
->smcibdev
->roce_cq_send
,
235 .recv_cq
= lnk
->smcibdev
->roce_cq_recv
,
238 /* include unsolicited rdma_writes as well,
239 * there are max. 2 RDMA_WRITE per 1 WR_SEND
241 .max_send_wr
= SMC_WR_BUF_CNT
* 3,
242 .max_recv_wr
= SMC_WR_BUF_CNT
* 3,
243 .max_send_sge
= SMC_IB_MAX_SEND_SGE
,
246 .sq_sig_type
= IB_SIGNAL_REQ_WR
,
247 .qp_type
= IB_QPT_RC
,
251 lnk
->roce_qp
= ib_create_qp(lnk
->roce_pd
, &qp_attr
);
252 rc
= PTR_ERR_OR_ZERO(lnk
->roce_qp
);
253 if (IS_ERR(lnk
->roce_qp
))
256 smc_wr_remember_qp_attr(lnk
);
260 void smc_ib_put_memory_region(struct ib_mr
*mr
)
265 static int smc_ib_map_mr_sg(struct smc_buf_desc
*buf_slot
)
267 unsigned int offset
= 0;
270 /* map the largest prefix of a dma mapped SG list */
271 sg_num
= ib_map_mr_sg(buf_slot
->mr_rx
[SMC_SINGLE_LINK
],
272 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
273 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
279 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
280 int smc_ib_get_memory_region(struct ib_pd
*pd
, int access_flags
,
281 struct smc_buf_desc
*buf_slot
)
283 if (buf_slot
->mr_rx
[SMC_SINGLE_LINK
])
284 return 0; /* already done */
286 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] =
287 ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, 1 << buf_slot
->order
);
288 if (IS_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
])) {
291 rc
= PTR_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
]);
292 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] = NULL
;
296 if (smc_ib_map_mr_sg(buf_slot
) != 1)
302 /* synchronize buffer usage for cpu access */
303 void smc_ib_sync_sg_for_cpu(struct smc_ib_device
*smcibdev
,
304 struct smc_buf_desc
*buf_slot
,
305 enum dma_data_direction data_direction
)
307 struct scatterlist
*sg
;
310 /* for now there is just one DMA address */
311 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
312 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
315 ib_dma_sync_single_for_cpu(smcibdev
->ibdev
,
322 /* synchronize buffer usage for device access */
323 void smc_ib_sync_sg_for_device(struct smc_ib_device
*smcibdev
,
324 struct smc_buf_desc
*buf_slot
,
325 enum dma_data_direction data_direction
)
327 struct scatterlist
*sg
;
330 /* for now there is just one DMA address */
331 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
332 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
335 ib_dma_sync_single_for_device(smcibdev
->ibdev
,
342 /* Map a new TX or RX buffer SG-table to DMA */
343 int smc_ib_buf_map_sg(struct smc_ib_device
*smcibdev
,
344 struct smc_buf_desc
*buf_slot
,
345 enum dma_data_direction data_direction
)
349 mapped_nents
= ib_dma_map_sg(smcibdev
->ibdev
,
350 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
351 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
359 void smc_ib_buf_unmap_sg(struct smc_ib_device
*smcibdev
,
360 struct smc_buf_desc
*buf_slot
,
361 enum dma_data_direction data_direction
)
363 if (!buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
)
364 return; /* already unmapped */
366 ib_dma_unmap_sg(smcibdev
->ibdev
,
367 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
368 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
370 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
= 0;
373 static int smc_ib_fill_gid_and_mac(struct smc_ib_device
*smcibdev
, u8 ibport
)
375 struct net_device
*ndev
;
378 rc
= ib_query_gid(smcibdev
->ibdev
, ibport
, 0,
379 &smcibdev
->gid
[ibport
- 1], NULL
);
380 /* the SMC protocol requires specification of the roce MAC address;
381 * if net_device cannot be determined, it can be derived from gid 0
383 ndev
= smcibdev
->ibdev
->get_netdev(smcibdev
->ibdev
, ibport
);
385 memcpy(&smcibdev
->mac
, ndev
->dev_addr
, ETH_ALEN
);
388 memcpy(&smcibdev
->mac
[ibport
- 1][0],
389 &smcibdev
->gid
[ibport
- 1].raw
[8], 3);
390 memcpy(&smcibdev
->mac
[ibport
- 1][3],
391 &smcibdev
->gid
[ibport
- 1].raw
[13], 3);
392 smcibdev
->mac
[ibport
- 1][0] &= ~0x02;
397 /* Create an identifier unique for this instance of SMC-R.
398 * The MAC-address of the first active registered IB device
399 * plus a random 2-byte number is used to create this identifier.
400 * This name is delivered to the peer during connection initialization.
402 static inline void smc_ib_define_local_systemid(struct smc_ib_device
*smcibdev
,
405 memcpy(&local_systemid
[2], &smcibdev
->mac
[ibport
- 1],
406 sizeof(smcibdev
->mac
[ibport
- 1]));
407 get_random_bytes(&local_systemid
[0], 2);
410 bool smc_ib_port_active(struct smc_ib_device
*smcibdev
, u8 ibport
)
412 return smcibdev
->pattr
[ibport
- 1].state
== IB_PORT_ACTIVE
;
415 int smc_ib_remember_port_attr(struct smc_ib_device
*smcibdev
, u8 ibport
)
419 memset(&smcibdev
->pattr
[ibport
- 1], 0,
420 sizeof(smcibdev
->pattr
[ibport
- 1]));
421 rc
= ib_query_port(smcibdev
->ibdev
, ibport
,
422 &smcibdev
->pattr
[ibport
- 1]);
425 rc
= smc_ib_fill_gid_and_mac(smcibdev
, ibport
);
428 if (!strncmp(local_systemid
, SMC_LOCAL_SYSTEMID_RESET
,
429 sizeof(local_systemid
)) &&
430 smc_ib_port_active(smcibdev
, ibport
))
431 /* create unique system identifier */
432 smc_ib_define_local_systemid(smcibdev
, ibport
);
437 long smc_ib_setup_per_ibdev(struct smc_ib_device
*smcibdev
)
439 struct ib_cq_init_attr cqattr
= {
440 .cqe
= SMC_MAX_CQE
, .comp_vector
= 0 };
441 int cqe_size_order
, smc_order
;
444 /* the calculated number of cq entries fits to mlx5 cq allocation */
445 cqe_size_order
= cache_line_size() == 128 ? 7 : 6;
446 smc_order
= MAX_ORDER
- cqe_size_order
- 1;
447 if (SMC_MAX_CQE
+ 2 > (0x00000001 << smc_order
) * PAGE_SIZE
)
448 cqattr
.cqe
= (0x00000001 << smc_order
) * PAGE_SIZE
- 2;
449 smcibdev
->roce_cq_send
= ib_create_cq(smcibdev
->ibdev
,
450 smc_wr_tx_cq_handler
, NULL
,
452 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_send
);
453 if (IS_ERR(smcibdev
->roce_cq_send
)) {
454 smcibdev
->roce_cq_send
= NULL
;
457 smcibdev
->roce_cq_recv
= ib_create_cq(smcibdev
->ibdev
,
458 smc_wr_rx_cq_handler
, NULL
,
460 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_recv
);
461 if (IS_ERR(smcibdev
->roce_cq_recv
)) {
462 smcibdev
->roce_cq_recv
= NULL
;
465 INIT_IB_EVENT_HANDLER(&smcibdev
->event_handler
, smcibdev
->ibdev
,
466 smc_ib_global_event_handler
);
467 ib_register_event_handler(&smcibdev
->event_handler
);
468 smc_wr_add_dev(smcibdev
);
469 smcibdev
->initialized
= 1;
473 ib_destroy_cq(smcibdev
->roce_cq_send
);
477 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device
*smcibdev
)
479 if (!smcibdev
->initialized
)
481 smc_wr_remove_dev(smcibdev
);
482 ib_unregister_event_handler(&smcibdev
->event_handler
);
483 ib_destroy_cq(smcibdev
->roce_cq_recv
);
484 ib_destroy_cq(smcibdev
->roce_cq_send
);
487 static struct ib_client smc_ib_client
;
489 /* callback function for ib_register_client() */
490 static void smc_ib_add_dev(struct ib_device
*ibdev
)
492 struct smc_ib_device
*smcibdev
;
494 if (ibdev
->node_type
!= RDMA_NODE_IB_CA
)
497 smcibdev
= kzalloc(sizeof(*smcibdev
), GFP_KERNEL
);
501 smcibdev
->ibdev
= ibdev
;
502 INIT_WORK(&smcibdev
->port_event_work
, smc_ib_port_event_work
);
504 spin_lock(&smc_ib_devices
.lock
);
505 list_add_tail(&smcibdev
->list
, &smc_ib_devices
.list
);
506 spin_unlock(&smc_ib_devices
.lock
);
507 ib_set_client_data(ibdev
, &smc_ib_client
, smcibdev
);
510 /* callback function for ib_register_client() */
511 static void smc_ib_remove_dev(struct ib_device
*ibdev
, void *client_data
)
513 struct smc_ib_device
*smcibdev
;
515 smcibdev
= ib_get_client_data(ibdev
, &smc_ib_client
);
516 if (!smcibdev
|| smcibdev
->ibdev
!= ibdev
)
518 ib_set_client_data(ibdev
, &smc_ib_client
, NULL
);
519 spin_lock(&smc_ib_devices
.lock
);
520 list_del_init(&smcibdev
->list
); /* remove from smc_ib_devices */
521 spin_unlock(&smc_ib_devices
.lock
);
522 smc_pnet_remove_by_ibdev(smcibdev
);
523 smc_ib_cleanup_per_ibdev(smcibdev
);
527 static struct ib_client smc_ib_client
= {
529 .add
= smc_ib_add_dev
,
530 .remove
= smc_ib_remove_dev
,
533 int __init
smc_ib_register_client(void)
535 return ib_register_client(&smc_ib_client
);
538 void smc_ib_unregister_client(void)
540 ib_unregister_client(&smc_ib_client
);