1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
5 #include "hw_channel.h"
7 static int mana_hwc_get_msg_index(struct hw_channel_context
*hwc
, u16
*msg_id
)
9 struct gdma_resource
*r
= &hwc
->inflight_msg_res
;
15 spin_lock_irqsave(&r
->lock
, flags
);
17 index
= find_first_zero_bit(hwc
->inflight_msg_res
.map
,
18 hwc
->inflight_msg_res
.size
);
20 bitmap_set(hwc
->inflight_msg_res
.map
, index
, 1);
22 spin_unlock_irqrestore(&r
->lock
, flags
);
29 static void mana_hwc_put_msg_index(struct hw_channel_context
*hwc
, u16 msg_id
)
31 struct gdma_resource
*r
= &hwc
->inflight_msg_res
;
34 spin_lock_irqsave(&r
->lock
, flags
);
35 bitmap_clear(hwc
->inflight_msg_res
.map
, msg_id
, 1);
36 spin_unlock_irqrestore(&r
->lock
, flags
);
41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx
*caller_ctx
,
42 const struct gdma_resp_hdr
*resp_msg
,
45 if (resp_len
< sizeof(*resp_msg
))
48 if (resp_len
> caller_ctx
->output_buflen
)
54 static void mana_hwc_handle_resp(struct hw_channel_context
*hwc
, u32 resp_len
,
55 const struct gdma_resp_hdr
*resp_msg
)
57 struct hwc_caller_ctx
*ctx
;
60 if (!test_bit(resp_msg
->response
.hwc_msg_id
,
61 hwc
->inflight_msg_res
.map
)) {
62 dev_err(hwc
->dev
, "hwc_rx: invalid msg_id = %u\n",
63 resp_msg
->response
.hwc_msg_id
);
67 ctx
= hwc
->caller_ctx
+ resp_msg
->response
.hwc_msg_id
;
68 err
= mana_hwc_verify_resp_msg(ctx
, resp_msg
, resp_len
);
72 ctx
->status_code
= resp_msg
->status
;
74 memcpy(ctx
->output_buf
, resp_msg
, resp_len
);
77 complete(&ctx
->comp_event
);
80 static int mana_hwc_post_rx_wqe(const struct hwc_wq
*hwc_rxq
,
81 struct hwc_work_request
*req
)
83 struct device
*dev
= hwc_rxq
->hwc
->dev
;
88 sge
->address
= (u64
)req
->buf_sge_addr
;
89 sge
->mem_key
= hwc_rxq
->msg_buf
->gpa_mkey
;
90 sge
->size
= req
->buf_len
;
92 memset(&req
->wqe_req
, 0, sizeof(struct gdma_wqe_request
));
93 req
->wqe_req
.sgl
= sge
;
94 req
->wqe_req
.num_sge
= 1;
95 req
->wqe_req
.client_data_unit
= 0;
97 err
= mana_gd_post_and_ring(hwc_rxq
->gdma_wq
, &req
->wqe_req
, NULL
);
99 dev_err(dev
, "Failed to post WQE on HWC RQ: %d\n", err
);
103 static void mana_hwc_init_event_handler(void *ctx
, struct gdma_queue
*q_self
,
104 struct gdma_event
*event
)
106 struct hw_channel_context
*hwc
= ctx
;
107 struct gdma_dev
*gd
= hwc
->gdma_dev
;
108 union hwc_init_type_data type_data
;
109 union hwc_init_eq_id_db eq_db
;
112 switch (event
->type
) {
113 case GDMA_EQE_HWC_INIT_EQ_ID_DB
:
114 eq_db
.as_uint32
= event
->details
[0];
115 hwc
->cq
->gdma_eq
->id
= eq_db
.eq_id
;
116 gd
->doorbell
= eq_db
.doorbell
;
119 case GDMA_EQE_HWC_INIT_DATA
:
120 type_data
.as_uint32
= event
->details
[0];
121 type
= type_data
.type
;
122 val
= type_data
.value
;
125 case HWC_INIT_DATA_CQID
:
126 hwc
->cq
->gdma_cq
->id
= val
;
129 case HWC_INIT_DATA_RQID
:
130 hwc
->rxq
->gdma_wq
->id
= val
;
133 case HWC_INIT_DATA_SQID
:
134 hwc
->txq
->gdma_wq
->id
= val
;
137 case HWC_INIT_DATA_QUEUE_DEPTH
:
138 hwc
->hwc_init_q_depth_max
= (u16
)val
;
141 case HWC_INIT_DATA_MAX_REQUEST
:
142 hwc
->hwc_init_max_req_msg_size
= val
;
145 case HWC_INIT_DATA_MAX_RESPONSE
:
146 hwc
->hwc_init_max_resp_msg_size
= val
;
149 case HWC_INIT_DATA_MAX_NUM_CQS
:
150 gd
->gdma_context
->max_num_cqs
= val
;
153 case HWC_INIT_DATA_PDID
:
154 hwc
->gdma_dev
->pdid
= val
;
157 case HWC_INIT_DATA_GPA_MKEY
:
158 hwc
->rxq
->msg_buf
->gpa_mkey
= val
;
159 hwc
->txq
->msg_buf
->gpa_mkey
= val
;
165 case GDMA_EQE_HWC_INIT_DONE
:
166 complete(&hwc
->hwc_init_eqe_comp
);
170 /* Ignore unknown events, which should never happen. */
175 static void mana_hwc_rx_event_handler(void *ctx
, u32 gdma_rxq_id
,
176 const struct hwc_rx_oob
*rx_oob
)
178 struct hw_channel_context
*hwc
= ctx
;
179 struct hwc_wq
*hwc_rxq
= hwc
->rxq
;
180 struct hwc_work_request
*rx_req
;
181 struct gdma_resp_hdr
*resp
;
182 struct gdma_wqe
*dma_oob
;
183 struct gdma_queue
*rq
;
184 struct gdma_sge
*sge
;
189 if (WARN_ON_ONCE(hwc_rxq
->gdma_wq
->id
!= gdma_rxq_id
))
192 rq
= hwc_rxq
->gdma_wq
;
193 wqe
= mana_gd_get_wqe_ptr(rq
, rx_oob
->wqe_offset
/ GDMA_WQE_BU_SIZE
);
194 dma_oob
= (struct gdma_wqe
*)wqe
;
196 sge
= (struct gdma_sge
*)(wqe
+ 8 + dma_oob
->inline_oob_size_div4
* 4);
198 /* Select the RX work request for virtual address and for reposting. */
199 rq_base_addr
= hwc_rxq
->msg_buf
->mem_info
.dma_handle
;
200 rx_req_idx
= (sge
->address
- rq_base_addr
) / hwc
->max_req_msg_size
;
202 rx_req
= &hwc_rxq
->msg_buf
->reqs
[rx_req_idx
];
203 resp
= (struct gdma_resp_hdr
*)rx_req
->buf_va
;
205 if (resp
->response
.hwc_msg_id
>= hwc
->num_inflight_msg
) {
206 dev_err(hwc
->dev
, "HWC RX: wrong msg_id=%u\n",
207 resp
->response
.hwc_msg_id
);
211 mana_hwc_handle_resp(hwc
, rx_oob
->tx_oob_data_size
, resp
);
213 /* Do no longer use 'resp', because the buffer is posted to the HW
214 * in the below mana_hwc_post_rx_wqe().
218 mana_hwc_post_rx_wqe(hwc_rxq
, rx_req
);
221 static void mana_hwc_tx_event_handler(void *ctx
, u32 gdma_txq_id
,
222 const struct hwc_rx_oob
*rx_oob
)
224 struct hw_channel_context
*hwc
= ctx
;
225 struct hwc_wq
*hwc_txq
= hwc
->txq
;
227 WARN_ON_ONCE(!hwc_txq
|| hwc_txq
->gdma_wq
->id
!= gdma_txq_id
);
230 static int mana_hwc_create_gdma_wq(struct hw_channel_context
*hwc
,
231 enum gdma_queue_type type
, u64 queue_size
,
232 struct gdma_queue
**queue
)
234 struct gdma_queue_spec spec
= {};
236 if (type
!= GDMA_SQ
&& type
!= GDMA_RQ
)
240 spec
.monitor_avl_buf
= false;
241 spec
.queue_size
= queue_size
;
243 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
246 static int mana_hwc_create_gdma_cq(struct hw_channel_context
*hwc
,
248 void *ctx
, gdma_cq_callback
*cb
,
249 struct gdma_queue
*parent_eq
,
250 struct gdma_queue
**queue
)
252 struct gdma_queue_spec spec
= {};
255 spec
.monitor_avl_buf
= false;
256 spec
.queue_size
= queue_size
;
257 spec
.cq
.context
= ctx
;
258 spec
.cq
.callback
= cb
;
259 spec
.cq
.parent_eq
= parent_eq
;
261 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
264 static int mana_hwc_create_gdma_eq(struct hw_channel_context
*hwc
,
266 void *ctx
, gdma_eq_callback
*cb
,
267 struct gdma_queue
**queue
)
269 struct gdma_queue_spec spec
= {};
272 spec
.monitor_avl_buf
= false;
273 spec
.queue_size
= queue_size
;
274 spec
.eq
.context
= ctx
;
275 spec
.eq
.callback
= cb
;
276 spec
.eq
.log2_throttle_limit
= DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ
;
278 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
281 static void mana_hwc_comp_event(void *ctx
, struct gdma_queue
*q_self
)
283 struct hwc_rx_oob comp_data
= {};
284 struct gdma_comp
*completions
;
285 struct hwc_cq
*hwc_cq
= ctx
;
288 WARN_ON_ONCE(hwc_cq
->gdma_cq
!= q_self
);
290 completions
= hwc_cq
->comp_buf
;
291 comp_read
= mana_gd_poll_cq(q_self
, completions
, hwc_cq
->queue_depth
);
292 WARN_ON_ONCE(comp_read
<= 0 || comp_read
> hwc_cq
->queue_depth
);
294 for (i
= 0; i
< comp_read
; ++i
) {
295 comp_data
= *(struct hwc_rx_oob
*)completions
[i
].cqe_data
;
297 if (completions
[i
].is_sq
)
298 hwc_cq
->tx_event_handler(hwc_cq
->tx_event_ctx
,
299 completions
[i
].wq_num
,
302 hwc_cq
->rx_event_handler(hwc_cq
->rx_event_ctx
,
303 completions
[i
].wq_num
,
307 mana_gd_ring_cq(q_self
, SET_ARM_BIT
);
310 static void mana_hwc_destroy_cq(struct gdma_context
*gc
, struct hwc_cq
*hwc_cq
)
315 kfree(hwc_cq
->comp_buf
);
318 mana_gd_destroy_queue(gc
, hwc_cq
->gdma_cq
);
321 mana_gd_destroy_queue(gc
, hwc_cq
->gdma_eq
);
326 static int mana_hwc_create_cq(struct hw_channel_context
*hwc
, u16 q_depth
,
327 gdma_eq_callback
*callback
, void *ctx
,
328 hwc_rx_event_handler_t
*rx_ev_hdlr
,
330 hwc_tx_event_handler_t
*tx_ev_hdlr
,
331 void *tx_ev_ctx
, struct hwc_cq
**hwc_cq_ptr
)
333 struct gdma_queue
*eq
, *cq
;
334 struct gdma_comp
*comp_buf
;
335 struct hwc_cq
*hwc_cq
;
336 u32 eq_size
, cq_size
;
339 eq_size
= roundup_pow_of_two(GDMA_EQE_SIZE
* q_depth
);
340 if (eq_size
< MINIMUM_SUPPORTED_PAGE_SIZE
)
341 eq_size
= MINIMUM_SUPPORTED_PAGE_SIZE
;
343 cq_size
= roundup_pow_of_two(GDMA_CQE_SIZE
* q_depth
);
344 if (cq_size
< MINIMUM_SUPPORTED_PAGE_SIZE
)
345 cq_size
= MINIMUM_SUPPORTED_PAGE_SIZE
;
347 hwc_cq
= kzalloc(sizeof(*hwc_cq
), GFP_KERNEL
);
351 err
= mana_hwc_create_gdma_eq(hwc
, eq_size
, ctx
, callback
, &eq
);
353 dev_err(hwc
->dev
, "Failed to create HWC EQ for RQ: %d\n", err
);
356 hwc_cq
->gdma_eq
= eq
;
358 err
= mana_hwc_create_gdma_cq(hwc
, cq_size
, hwc_cq
, mana_hwc_comp_event
,
361 dev_err(hwc
->dev
, "Failed to create HWC CQ for RQ: %d\n", err
);
364 hwc_cq
->gdma_cq
= cq
;
366 comp_buf
= kcalloc(q_depth
, sizeof(struct gdma_comp
), GFP_KERNEL
);
373 hwc_cq
->comp_buf
= comp_buf
;
374 hwc_cq
->queue_depth
= q_depth
;
375 hwc_cq
->rx_event_handler
= rx_ev_hdlr
;
376 hwc_cq
->rx_event_ctx
= rx_ev_ctx
;
377 hwc_cq
->tx_event_handler
= tx_ev_hdlr
;
378 hwc_cq
->tx_event_ctx
= tx_ev_ctx
;
380 *hwc_cq_ptr
= hwc_cq
;
383 mana_hwc_destroy_cq(hwc
->gdma_dev
->gdma_context
, hwc_cq
);
387 static int mana_hwc_alloc_dma_buf(struct hw_channel_context
*hwc
, u16 q_depth
,
389 struct hwc_dma_buf
**dma_buf_ptr
)
391 struct gdma_context
*gc
= hwc
->gdma_dev
->gdma_context
;
392 struct hwc_work_request
*hwc_wr
;
393 struct hwc_dma_buf
*dma_buf
;
394 struct gdma_mem_info
*gmi
;
401 dma_buf
= kzalloc(sizeof(*dma_buf
) +
402 q_depth
* sizeof(struct hwc_work_request
),
407 dma_buf
->num_reqs
= q_depth
;
409 buf_size
= PAGE_ALIGN(q_depth
* max_msg_size
);
411 gmi
= &dma_buf
->mem_info
;
412 err
= mana_gd_alloc_memory(gc
, buf_size
, gmi
);
414 dev_err(hwc
->dev
, "Failed to allocate DMA buffer: %d\n", err
);
418 virt_addr
= dma_buf
->mem_info
.virt_addr
;
419 base_pa
= (u8
*)dma_buf
->mem_info
.dma_handle
;
421 for (i
= 0; i
< q_depth
; i
++) {
422 hwc_wr
= &dma_buf
->reqs
[i
];
424 hwc_wr
->buf_va
= virt_addr
+ i
* max_msg_size
;
425 hwc_wr
->buf_sge_addr
= base_pa
+ i
* max_msg_size
;
427 hwc_wr
->buf_len
= max_msg_size
;
430 *dma_buf_ptr
= dma_buf
;
437 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context
*hwc
,
438 struct hwc_dma_buf
*dma_buf
)
443 mana_gd_free_memory(&dma_buf
->mem_info
);
448 static void mana_hwc_destroy_wq(struct hw_channel_context
*hwc
,
449 struct hwc_wq
*hwc_wq
)
454 mana_hwc_dealloc_dma_buf(hwc
, hwc_wq
->msg_buf
);
457 mana_gd_destroy_queue(hwc
->gdma_dev
->gdma_context
,
463 static int mana_hwc_create_wq(struct hw_channel_context
*hwc
,
464 enum gdma_queue_type q_type
, u16 q_depth
,
465 u32 max_msg_size
, struct hwc_cq
*hwc_cq
,
466 struct hwc_wq
**hwc_wq_ptr
)
468 struct gdma_queue
*queue
;
469 struct hwc_wq
*hwc_wq
;
473 WARN_ON(q_type
!= GDMA_SQ
&& q_type
!= GDMA_RQ
);
475 if (q_type
== GDMA_RQ
)
476 queue_size
= roundup_pow_of_two(GDMA_MAX_RQE_SIZE
* q_depth
);
478 queue_size
= roundup_pow_of_two(GDMA_MAX_SQE_SIZE
* q_depth
);
480 if (queue_size
< MINIMUM_SUPPORTED_PAGE_SIZE
)
481 queue_size
= MINIMUM_SUPPORTED_PAGE_SIZE
;
483 hwc_wq
= kzalloc(sizeof(*hwc_wq
), GFP_KERNEL
);
487 err
= mana_hwc_create_gdma_wq(hwc
, q_type
, queue_size
, &queue
);
491 err
= mana_hwc_alloc_dma_buf(hwc
, q_depth
, max_msg_size
,
497 hwc_wq
->gdma_wq
= queue
;
498 hwc_wq
->queue_depth
= q_depth
;
499 hwc_wq
->hwc_cq
= hwc_cq
;
501 *hwc_wq_ptr
= hwc_wq
;
505 mana_hwc_destroy_wq(hwc
, hwc_wq
);
509 static int mana_hwc_post_tx_wqe(const struct hwc_wq
*hwc_txq
,
510 struct hwc_work_request
*req
,
511 u32 dest_virt_rq_id
, u32 dest_virt_rcq_id
,
514 struct device
*dev
= hwc_txq
->hwc
->dev
;
515 struct hwc_tx_oob
*tx_oob
;
516 struct gdma_sge
*sge
;
519 if (req
->msg_size
== 0 || req
->msg_size
> req
->buf_len
) {
520 dev_err(dev
, "wrong msg_size: %u, buf_len: %u\n",
521 req
->msg_size
, req
->buf_len
);
525 tx_oob
= &req
->tx_oob
;
527 tx_oob
->vrq_id
= dest_virt_rq_id
;
528 tx_oob
->dest_vfid
= 0;
529 tx_oob
->vrcq_id
= dest_virt_rcq_id
;
530 tx_oob
->vscq_id
= hwc_txq
->hwc_cq
->gdma_cq
->id
;
531 tx_oob
->loopback
= false;
532 tx_oob
->lso_override
= false;
533 tx_oob
->dest_pf
= dest_pf
;
534 tx_oob
->vsq_id
= hwc_txq
->gdma_wq
->id
;
537 sge
->address
= (u64
)req
->buf_sge_addr
;
538 sge
->mem_key
= hwc_txq
->msg_buf
->gpa_mkey
;
539 sge
->size
= req
->msg_size
;
541 memset(&req
->wqe_req
, 0, sizeof(struct gdma_wqe_request
));
542 req
->wqe_req
.sgl
= sge
;
543 req
->wqe_req
.num_sge
= 1;
544 req
->wqe_req
.inline_oob_size
= sizeof(struct hwc_tx_oob
);
545 req
->wqe_req
.inline_oob_data
= tx_oob
;
546 req
->wqe_req
.client_data_unit
= 0;
548 err
= mana_gd_post_and_ring(hwc_txq
->gdma_wq
, &req
->wqe_req
, NULL
);
550 dev_err(dev
, "Failed to post WQE on HWC SQ: %d\n", err
);
554 static int mana_hwc_init_inflight_msg(struct hw_channel_context
*hwc
,
559 sema_init(&hwc
->sema
, num_msg
);
561 err
= mana_gd_alloc_res_map(num_msg
, &hwc
->inflight_msg_res
);
563 dev_err(hwc
->dev
, "Failed to init inflight_msg_res: %d\n", err
);
567 static int mana_hwc_test_channel(struct hw_channel_context
*hwc
, u16 q_depth
,
568 u32 max_req_msg_size
, u32 max_resp_msg_size
)
570 struct gdma_context
*gc
= hwc
->gdma_dev
->gdma_context
;
571 struct hwc_wq
*hwc_rxq
= hwc
->rxq
;
572 struct hwc_work_request
*req
;
573 struct hwc_caller_ctx
*ctx
;
577 /* Post all WQEs on the RQ */
578 for (i
= 0; i
< q_depth
; i
++) {
579 req
= &hwc_rxq
->msg_buf
->reqs
[i
];
580 err
= mana_hwc_post_rx_wqe(hwc_rxq
, req
);
585 ctx
= kzalloc(q_depth
* sizeof(struct hwc_caller_ctx
), GFP_KERNEL
);
589 for (i
= 0; i
< q_depth
; ++i
)
590 init_completion(&ctx
[i
].comp_event
);
592 hwc
->caller_ctx
= ctx
;
594 return mana_gd_test_eq(gc
, hwc
->cq
->gdma_eq
);
597 static int mana_hwc_establish_channel(struct gdma_context
*gc
, u16
*q_depth
,
598 u32
*max_req_msg_size
,
599 u32
*max_resp_msg_size
)
601 struct hw_channel_context
*hwc
= gc
->hwc
.driver_data
;
602 struct gdma_queue
*rq
= hwc
->rxq
->gdma_wq
;
603 struct gdma_queue
*sq
= hwc
->txq
->gdma_wq
;
604 struct gdma_queue
*eq
= hwc
->cq
->gdma_eq
;
605 struct gdma_queue
*cq
= hwc
->cq
->gdma_cq
;
608 init_completion(&hwc
->hwc_init_eqe_comp
);
610 err
= mana_smc_setup_hwc(&gc
->shm_channel
, false,
611 eq
->mem_info
.dma_handle
,
612 cq
->mem_info
.dma_handle
,
613 rq
->mem_info
.dma_handle
,
614 sq
->mem_info
.dma_handle
,
619 if (!wait_for_completion_timeout(&hwc
->hwc_init_eqe_comp
, 60 * HZ
))
622 *q_depth
= hwc
->hwc_init_q_depth_max
;
623 *max_req_msg_size
= hwc
->hwc_init_max_req_msg_size
;
624 *max_resp_msg_size
= hwc
->hwc_init_max_resp_msg_size
;
626 if (WARN_ON(cq
->id
>= gc
->max_num_cqs
))
629 gc
->cq_table
= vzalloc(gc
->max_num_cqs
* sizeof(struct gdma_queue
*));
633 gc
->cq_table
[cq
->id
] = cq
;
638 static int mana_hwc_init_queues(struct hw_channel_context
*hwc
, u16 q_depth
,
639 u32 max_req_msg_size
, u32 max_resp_msg_size
)
641 struct hwc_wq
*hwc_rxq
= NULL
;
642 struct hwc_wq
*hwc_txq
= NULL
;
643 struct hwc_cq
*hwc_cq
= NULL
;
646 err
= mana_hwc_init_inflight_msg(hwc
, q_depth
);
650 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
651 * queue depth and RQ queue depth.
653 err
= mana_hwc_create_cq(hwc
, q_depth
* 2,
654 mana_hwc_init_event_handler
, hwc
,
655 mana_hwc_rx_event_handler
, hwc
,
656 mana_hwc_tx_event_handler
, hwc
, &hwc_cq
);
658 dev_err(hwc
->dev
, "Failed to create HWC CQ: %d\n", err
);
663 err
= mana_hwc_create_wq(hwc
, GDMA_RQ
, q_depth
, max_req_msg_size
,
666 dev_err(hwc
->dev
, "Failed to create HWC RQ: %d\n", err
);
671 err
= mana_hwc_create_wq(hwc
, GDMA_SQ
, q_depth
, max_resp_msg_size
,
674 dev_err(hwc
->dev
, "Failed to create HWC SQ: %d\n", err
);
679 hwc
->num_inflight_msg
= q_depth
;
680 hwc
->max_req_msg_size
= max_req_msg_size
;
685 mana_hwc_destroy_wq(hwc
, hwc_txq
);
688 mana_hwc_destroy_wq(hwc
, hwc_rxq
);
691 mana_hwc_destroy_cq(hwc
->gdma_dev
->gdma_context
, hwc_cq
);
693 mana_gd_free_res_map(&hwc
->inflight_msg_res
);
697 int mana_hwc_create_channel(struct gdma_context
*gc
)
699 u32 max_req_msg_size
, max_resp_msg_size
;
700 struct gdma_dev
*gd
= &gc
->hwc
;
701 struct hw_channel_context
*hwc
;
705 hwc
= kzalloc(sizeof(*hwc
), GFP_KERNEL
);
709 gd
->gdma_context
= gc
;
710 gd
->driver_data
= hwc
;
714 /* HWC's instance number is always 0. */
715 gd
->dev_id
.as_uint32
= 0;
716 gd
->dev_id
.type
= GDMA_DEVICE_HWC
;
718 gd
->pdid
= INVALID_PDID
;
719 gd
->doorbell
= INVALID_DOORBELL
;
721 err
= mana_hwc_init_queues(hwc
, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH
,
722 HW_CHANNEL_MAX_REQUEST_SIZE
,
723 HW_CHANNEL_MAX_RESPONSE_SIZE
);
725 dev_err(hwc
->dev
, "Failed to initialize HWC: %d\n", err
);
729 err
= mana_hwc_establish_channel(gc
, &q_depth_max
, &max_req_msg_size
,
732 dev_err(hwc
->dev
, "Failed to establish HWC: %d\n", err
);
736 err
= mana_hwc_test_channel(gc
->hwc
.driver_data
,
737 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH
,
738 max_req_msg_size
, max_resp_msg_size
);
740 dev_err(hwc
->dev
, "Failed to test HWC: %d\n", err
);
750 void mana_hwc_destroy_channel(struct gdma_context
*gc
)
752 struct hw_channel_context
*hwc
= gc
->hwc
.driver_data
;
753 struct hwc_caller_ctx
*ctx
;
755 mana_smc_teardown_hwc(&gc
->shm_channel
, false);
757 ctx
= hwc
->caller_ctx
;
759 hwc
->caller_ctx
= NULL
;
761 mana_hwc_destroy_wq(hwc
, hwc
->txq
);
764 mana_hwc_destroy_wq(hwc
, hwc
->rxq
);
767 mana_hwc_destroy_cq(hwc
->gdma_dev
->gdma_context
, hwc
->cq
);
770 mana_gd_free_res_map(&hwc
->inflight_msg_res
);
772 hwc
->num_inflight_msg
= 0;
774 if (hwc
->gdma_dev
->pdid
!= INVALID_PDID
) {
775 hwc
->gdma_dev
->doorbell
= INVALID_DOORBELL
;
776 hwc
->gdma_dev
->pdid
= INVALID_PDID
;
780 gc
->hwc
.driver_data
= NULL
;
781 gc
->hwc
.gdma_context
= NULL
;
784 int mana_hwc_send_request(struct hw_channel_context
*hwc
, u32 req_len
,
785 const void *req
, u32 resp_len
, void *resp
)
787 struct hwc_work_request
*tx_wr
;
788 struct hwc_wq
*txq
= hwc
->txq
;
789 struct gdma_req_hdr
*req_msg
;
790 struct hwc_caller_ctx
*ctx
;
794 mana_hwc_get_msg_index(hwc
, &msg_id
);
796 tx_wr
= &txq
->msg_buf
->reqs
[msg_id
];
798 if (req_len
> tx_wr
->buf_len
) {
799 dev_err(hwc
->dev
, "HWC: req msg size: %d > %d\n", req_len
,
805 ctx
= hwc
->caller_ctx
+ msg_id
;
806 ctx
->output_buf
= resp
;
807 ctx
->output_buflen
= resp_len
;
809 req_msg
= (struct gdma_req_hdr
*)tx_wr
->buf_va
;
811 memcpy(req_msg
, req
, req_len
);
813 req_msg
->req
.hwc_msg_id
= msg_id
;
815 tx_wr
->msg_size
= req_len
;
817 err
= mana_hwc_post_tx_wqe(txq
, tx_wr
, 0, 0, false);
819 dev_err(hwc
->dev
, "HWC: Failed to post send WQE: %d\n", err
);
823 if (!wait_for_completion_timeout(&ctx
->comp_event
, 30 * HZ
)) {
824 dev_err(hwc
->dev
, "HWC: Request timed out!\n");
834 if (ctx
->status_code
) {
835 dev_err(hwc
->dev
, "HWC: Failed hw_channel req: 0x%x\n",
841 mana_hwc_put_msg_index(hwc
, msg_id
);