1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
46 * i40iw_initialize_hw_resources - initialize hw resource during open
47 * @iwdev: iwarp device
49 u32
i40iw_initialize_hw_resources(struct i40iw_device
*iwdev
)
51 unsigned long num_pds
;
60 max_qp
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
;
61 max_cq
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
;
62 max_mr
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
;
63 arp_table_size
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].cnt
;
64 iwdev
->max_cqe
= 0xFFFFF;
65 num_pds
= I40IW_MAX_PDS
;
66 resources_size
= sizeof(struct i40iw_arp_entry
) * arp_table_size
;
67 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_qp
);
68 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_mr
);
69 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_cq
);
70 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(num_pds
);
71 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size
);
72 resources_size
+= sizeof(struct i40iw_qp
**) * max_qp
;
73 iwdev
->mem_resources
= kzalloc(resources_size
, GFP_KERNEL
);
75 if (!iwdev
->mem_resources
)
78 iwdev
->max_qp
= max_qp
;
79 iwdev
->max_mr
= max_mr
;
80 iwdev
->max_cq
= max_cq
;
81 iwdev
->max_pd
= num_pds
;
82 iwdev
->arp_table_size
= arp_table_size
;
83 iwdev
->arp_table
= (struct i40iw_arp_entry
*)iwdev
->mem_resources
;
84 resource_ptr
= iwdev
->mem_resources
+ (sizeof(struct i40iw_arp_entry
) * arp_table_size
);
86 iwdev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
87 IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_MGT_EXTENSIONS
;
89 iwdev
->allocated_qps
= resource_ptr
;
90 iwdev
->allocated_cqs
= &iwdev
->allocated_qps
[BITS_TO_LONGS(max_qp
)];
91 iwdev
->allocated_mrs
= &iwdev
->allocated_cqs
[BITS_TO_LONGS(max_cq
)];
92 iwdev
->allocated_pds
= &iwdev
->allocated_mrs
[BITS_TO_LONGS(max_mr
)];
93 iwdev
->allocated_arps
= &iwdev
->allocated_pds
[BITS_TO_LONGS(num_pds
)];
94 iwdev
->qp_table
= (struct i40iw_qp
**)(&iwdev
->allocated_arps
[BITS_TO_LONGS(arp_table_size
)]);
95 set_bit(0, iwdev
->allocated_mrs
);
96 set_bit(0, iwdev
->allocated_qps
);
97 set_bit(0, iwdev
->allocated_cqs
);
98 set_bit(0, iwdev
->allocated_pds
);
99 set_bit(0, iwdev
->allocated_arps
);
101 /* Following for ILQ/IEQ */
102 set_bit(1, iwdev
->allocated_qps
);
103 set_bit(1, iwdev
->allocated_cqs
);
104 set_bit(1, iwdev
->allocated_pds
);
105 set_bit(2, iwdev
->allocated_cqs
);
106 set_bit(2, iwdev
->allocated_pds
);
108 spin_lock_init(&iwdev
->resource_lock
);
109 spin_lock_init(&iwdev
->qptable_lock
);
110 /* stag index mask has a minimum of 14 bits */
111 mrdrvbits
= 24 - max(get_count_order(iwdev
->max_mr
), 14);
112 iwdev
->mr_stagmask
= ~(((1 << mrdrvbits
) - 1) << (32 - mrdrvbits
));
117 * i40iw_cqp_ce_handler - handle cqp completions
118 * @iwdev: iwarp device
119 * @arm: flag to arm after completions
120 * @cq: cq for cqp completions
122 static void i40iw_cqp_ce_handler(struct i40iw_device
*iwdev
, struct i40iw_sc_cq
*cq
, bool arm
)
124 struct i40iw_cqp_request
*cqp_request
;
125 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
127 struct i40iw_ccq_cqe_info info
;
131 memset(&info
, 0, sizeof(info
));
132 ret
= dev
->ccq_ops
->ccq_get_cqe_info(cq
, &info
);
135 cqp_request
= (struct i40iw_cqp_request
*)(unsigned long)info
.scratch
;
137 i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
138 info
.op_code
, info
.maj_err_code
, info
.min_err_code
);
140 cqp_request
->compl_info
.maj_err_code
= info
.maj_err_code
;
141 cqp_request
->compl_info
.min_err_code
= info
.min_err_code
;
142 cqp_request
->compl_info
.op_ret_val
= info
.op_ret_val
;
143 cqp_request
->compl_info
.error
= info
.error
;
145 if (cqp_request
->waiting
) {
146 cqp_request
->request_done
= true;
147 wake_up(&cqp_request
->waitq
);
148 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
150 if (cqp_request
->callback_fcn
)
151 cqp_request
->callback_fcn(cqp_request
, 1);
152 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
159 if (arm
&& cqe_count
) {
160 i40iw_process_bh(dev
);
161 dev
->ccq_ops
->ccq_arm(cq
);
166 * i40iw_iwarp_ce_handler - handle iwarp completions
167 * @iwdev: iwarp device
168 * @iwcp: iwarp cq receiving event
170 static void i40iw_iwarp_ce_handler(struct i40iw_device
*iwdev
,
171 struct i40iw_sc_cq
*iwcq
)
173 struct i40iw_cq
*i40iwcq
= iwcq
->back_cq
;
175 if (i40iwcq
->ibcq
.comp_handler
)
176 i40iwcq
->ibcq
.comp_handler(&i40iwcq
->ibcq
,
177 i40iwcq
->ibcq
.cq_context
);
181 * i40iw_puda_ce_handler - handle puda completion events
182 * @iwdev: iwarp device
183 * @cq: puda completion q for event
185 static void i40iw_puda_ce_handler(struct i40iw_device
*iwdev
,
186 struct i40iw_sc_cq
*cq
)
188 struct i40iw_sc_dev
*dev
= (struct i40iw_sc_dev
*)&iwdev
->sc_dev
;
189 enum i40iw_status_code status
;
193 status
= i40iw_puda_poll_completion(dev
, cq
, &compl_error
);
194 if (status
== I40IW_ERR_QUEUE_EMPTY
)
197 i40iw_pr_err("puda status = %d\n", status
);
201 i40iw_pr_err("puda compl_err =0x%x\n", compl_error
);
206 dev
->ccq_ops
->ccq_arm(cq
);
210 * i40iw_process_ceq - handle ceq for completions
211 * @iwdev: iwarp device
212 * @ceq: ceq having cq for completion
214 void i40iw_process_ceq(struct i40iw_device
*iwdev
, struct i40iw_ceq
*ceq
)
216 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
217 struct i40iw_sc_ceq
*sc_ceq
;
218 struct i40iw_sc_cq
*cq
;
221 sc_ceq
= &ceq
->sc_ceq
;
223 cq
= dev
->ceq_ops
->process_ceq(dev
, sc_ceq
);
227 if (cq
->cq_type
== I40IW_CQ_TYPE_CQP
)
228 i40iw_cqp_ce_handler(iwdev
, cq
, arm
);
229 else if (cq
->cq_type
== I40IW_CQ_TYPE_IWARP
)
230 i40iw_iwarp_ce_handler(iwdev
, cq
);
231 else if ((cq
->cq_type
== I40IW_CQ_TYPE_ILQ
) ||
232 (cq
->cq_type
== I40IW_CQ_TYPE_IEQ
))
233 i40iw_puda_ce_handler(iwdev
, cq
);
238 * i40iw_next_iw_state - modify qp state
239 * @iwqp: iwarp qp to modify
240 * @state: next state for qp
241 * @del_hash: del hash
242 * @term: term message
243 * @termlen: length of term message
245 void i40iw_next_iw_state(struct i40iw_qp
*iwqp
,
251 struct i40iw_modify_qp_info info
;
253 memset(&info
, 0, sizeof(info
));
254 info
.next_iwarp_state
= state
;
255 info
.remove_hash_idx
= del_hash
;
256 info
.cq_num_valid
= true;
257 info
.arp_cache_idx_valid
= true;
258 info
.dont_send_term
= true;
259 info
.dont_send_fin
= true;
260 info
.termlen
= termlen
;
262 if (term
& I40IWQP_TERM_SEND_TERM_ONLY
)
263 info
.dont_send_term
= false;
264 if (term
& I40IWQP_TERM_SEND_FIN_ONLY
)
265 info
.dont_send_fin
= false;
266 if (iwqp
->sc_qp
.term_flags
&& (state
== I40IW_QP_STATE_ERROR
))
267 info
.reset_tcp_conn
= true;
268 iwqp
->hw_iwarp_state
= state
;
269 i40iw_hw_modify_qp(iwqp
->iwdev
, iwqp
, &info
, 0);
273 * i40iw_process_aeq - handle aeq events
274 * @iwdev: iwarp device
276 void i40iw_process_aeq(struct i40iw_device
*iwdev
)
278 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
279 struct i40iw_aeq
*aeq
= &iwdev
->aeq
;
280 struct i40iw_sc_aeq
*sc_aeq
= &aeq
->sc_aeq
;
281 struct i40iw_aeqe_info aeinfo
;
282 struct i40iw_aeqe_info
*info
= &aeinfo
;
284 struct i40iw_qp
*iwqp
= NULL
;
285 struct i40iw_sc_cq
*cq
= NULL
;
286 struct i40iw_cq
*iwcq
= NULL
;
287 struct i40iw_sc_qp
*qp
= NULL
;
288 struct i40iw_qp_host_ctx_info
*ctx_info
= NULL
;
297 memset(info
, 0, sizeof(*info
));
298 ret
= dev
->aeq_ops
->get_next_aeqe(sc_aeq
, info
);
303 i40iw_debug(dev
, I40IW_DEBUG_AEQ
,
304 "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
305 __func__
, info
->ae_id
, info
->qp
, info
->qp_cq_id
);
307 spin_lock_irqsave(&iwdev
->qptable_lock
, flags
);
308 iwqp
= iwdev
->qp_table
[info
->qp_cq_id
];
310 spin_unlock_irqrestore(&iwdev
->qptable_lock
, flags
);
311 i40iw_debug(dev
, I40IW_DEBUG_AEQ
,
312 "%s qp_id %d is already freed\n",
313 __func__
, info
->qp_cq_id
);
316 i40iw_add_ref(&iwqp
->ibqp
);
317 spin_unlock_irqrestore(&iwdev
->qptable_lock
, flags
);
319 spin_lock_irqsave(&iwqp
->lock
, flags
);
320 iwqp
->hw_tcp_state
= info
->tcp_state
;
321 iwqp
->hw_iwarp_state
= info
->iwarp_state
;
322 iwqp
->last_aeq
= info
->ae_id
;
323 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
324 ctx_info
= &iwqp
->ctx_info
;
325 ctx_info
->err_rq_idx_valid
= true;
327 if (info
->ae_id
!= I40IW_AE_CQ_OPERATION_ERROR
)
331 switch (info
->ae_id
) {
332 case I40IW_AE_LLP_FIN_RECEIVED
:
335 if (atomic_inc_return(&iwqp
->close_timer_started
) == 1) {
336 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_CLOSE_WAIT
;
337 if ((iwqp
->hw_tcp_state
== I40IW_TCP_STATE_CLOSE_WAIT
) &&
338 (iwqp
->ibqp_state
== IB_QPS_RTS
)) {
339 i40iw_next_iw_state(iwqp
,
340 I40IW_QP_STATE_CLOSING
, 0, 0, 0);
341 i40iw_cm_disconn(iwqp
);
343 iwqp
->cm_id
->add_ref(iwqp
->cm_id
);
344 i40iw_schedule_cm_timer(iwqp
->cm_node
,
345 (struct i40iw_puda_buf
*)iwqp
,
346 I40IW_TIMER_TYPE_CLOSE
, 1, 0);
349 case I40IW_AE_LLP_CLOSE_COMPLETE
:
351 i40iw_terminate_done(qp
, 0);
353 i40iw_cm_disconn(iwqp
);
355 case I40IW_AE_BAD_CLOSE
:
357 case I40IW_AE_RESET_SENT
:
358 i40iw_next_iw_state(iwqp
, I40IW_QP_STATE_ERROR
, 1, 0, 0);
359 i40iw_cm_disconn(iwqp
);
361 case I40IW_AE_LLP_CONNECTION_RESET
:
362 if (atomic_read(&iwqp
->close_timer_started
))
364 i40iw_cm_disconn(iwqp
);
366 case I40IW_AE_QP_SUSPEND_COMPLETE
:
367 i40iw_qp_suspend_resume(dev
, &iwqp
->sc_qp
, false);
369 case I40IW_AE_TERMINATE_SENT
:
370 i40iw_terminate_send_fin(qp
);
372 case I40IW_AE_LLP_TERMINATE_RECEIVED
:
373 i40iw_terminate_received(qp
, info
);
375 case I40IW_AE_CQ_OPERATION_ERROR
:
376 i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
378 cq
= (struct i40iw_sc_cq
*)(unsigned long)info
->compl_ctx
;
379 iwcq
= (struct i40iw_cq
*)cq
->back_cq
;
381 if (iwcq
->ibcq
.event_handler
) {
382 struct ib_event ibevent
;
384 ibevent
.device
= iwcq
->ibcq
.device
;
385 ibevent
.event
= IB_EVENT_CQ_ERR
;
386 ibevent
.element
.cq
= &iwcq
->ibcq
;
387 iwcq
->ibcq
.event_handler(&ibevent
, iwcq
->ibcq
.cq_context
);
390 case I40IW_AE_LLP_DOUBT_REACHABILITY
:
392 case I40IW_AE_PRIV_OPERATION_DENIED
:
393 case I40IW_AE_STAG_ZERO_INVALID
:
394 case I40IW_AE_IB_RREQ_AND_Q1_FULL
:
395 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
396 case I40IW_AE_DDP_UBE_INVALID_MO
:
397 case I40IW_AE_DDP_UBE_INVALID_QN
:
398 case I40IW_AE_DDP_NO_L_BIT
:
399 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
400 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
401 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST
:
402 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP
:
403 case I40IW_AE_INVALID_ARP_ENTRY
:
404 case I40IW_AE_INVALID_TCP_OPTION_RCVD
:
405 case I40IW_AE_STALE_ARP_ENTRY
:
406 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
407 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
408 case I40IW_AE_LLP_SYN_RECEIVED
:
409 case I40IW_AE_LLP_TOO_MANY_RETRIES
:
410 case I40IW_AE_LCE_QP_CATASTROPHIC
:
411 case I40IW_AE_LCE_FUNCTION_CATASTROPHIC
:
412 case I40IW_AE_LCE_CQ_CATASTROPHIC
:
413 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG
:
414 case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT
:
415 ctx_info
->err_rq_idx_valid
= false;
418 if (!info
->sq
&& ctx_info
->err_rq_idx_valid
) {
419 ctx_info
->err_rq_idx
= info
->wqe_idx
;
420 ctx_info
->tcp_info_valid
= false;
421 ctx_info
->iwarp_info_valid
= false;
422 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
426 i40iw_terminate_connection(qp
, info
);
430 i40iw_rem_ref(&iwqp
->ibqp
);
434 dev
->aeq_ops
->repost_aeq_entries(dev
, aeqcnt
);
438 * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
439 * @iwdev: iwarp device
440 * @accel_local_port: port for apbvt
441 * @add_port: add or delete port
443 static enum i40iw_status_code
444 i40iw_cqp_manage_abvpt_cmd(struct i40iw_device
*iwdev
,
445 u16 accel_local_port
,
448 struct i40iw_apbvt_info
*info
;
449 struct i40iw_cqp_request
*cqp_request
;
450 struct cqp_commands_info
*cqp_info
;
451 enum i40iw_status_code status
;
453 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, add_port
);
455 return I40IW_ERR_NO_MEMORY
;
457 cqp_info
= &cqp_request
->info
;
458 info
= &cqp_info
->in
.u
.manage_apbvt_entry
.info
;
460 memset(info
, 0, sizeof(*info
));
461 info
->add
= add_port
;
462 info
->port
= cpu_to_le16(accel_local_port
);
464 cqp_info
->cqp_cmd
= OP_MANAGE_APBVT_ENTRY
;
465 cqp_info
->post_sq
= 1;
466 cqp_info
->in
.u
.manage_apbvt_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
467 cqp_info
->in
.u
.manage_apbvt_entry
.scratch
= (uintptr_t)cqp_request
;
468 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
470 i40iw_pr_err("CQP-OP Manage APBVT entry fail");
476 * i40iw_manage_apbvt - add or delete tcp port
477 * @iwdev: iwarp device
478 * @accel_local_port: port for apbvt
479 * @add_port: add or delete port
481 enum i40iw_status_code
i40iw_manage_apbvt(struct i40iw_device
*iwdev
,
482 u16 accel_local_port
,
485 struct i40iw_cm_core
*cm_core
= &iwdev
->cm_core
;
486 enum i40iw_status_code status
;
490 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
491 * protect against race where add APBVT CQP can race ahead of the delete
492 * APBVT for same port.
495 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
496 in_use
= __test_and_set_bit(accel_local_port
,
497 cm_core
->ports_in_use
);
498 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
501 return i40iw_cqp_manage_abvpt_cmd(iwdev
, accel_local_port
,
504 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
505 in_use
= i40iw_port_in_use(cm_core
, accel_local_port
);
507 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
510 __clear_bit(accel_local_port
, cm_core
->ports_in_use
);
511 status
= i40iw_cqp_manage_abvpt_cmd(iwdev
, accel_local_port
,
513 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
519 * i40iw_manage_arp_cache - manage hw arp cache
520 * @iwdev: iwarp device
521 * @mac_addr: mac address ptr
522 * @ip_addr: ip addr for arp cache
523 * @action: add, delete or modify
525 void i40iw_manage_arp_cache(struct i40iw_device
*iwdev
,
526 unsigned char *mac_addr
,
531 struct i40iw_add_arp_cache_entry_info
*info
;
532 struct i40iw_cqp_request
*cqp_request
;
533 struct cqp_commands_info
*cqp_info
;
536 arp_index
= i40iw_arp_table(iwdev
, ip_addr
, ipv4
, mac_addr
, action
);
539 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, false);
543 cqp_info
= &cqp_request
->info
;
544 if (action
== I40IW_ARP_ADD
) {
545 cqp_info
->cqp_cmd
= OP_ADD_ARP_CACHE_ENTRY
;
546 info
= &cqp_info
->in
.u
.add_arp_cache_entry
.info
;
547 memset(info
, 0, sizeof(*info
));
548 info
->arp_index
= cpu_to_le16((u16
)arp_index
);
549 info
->permanent
= true;
550 ether_addr_copy(info
->mac_addr
, mac_addr
);
551 cqp_info
->in
.u
.add_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
552 cqp_info
->in
.u
.add_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
554 cqp_info
->cqp_cmd
= OP_DELETE_ARP_CACHE_ENTRY
;
555 cqp_info
->in
.u
.del_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
556 cqp_info
->in
.u
.del_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
557 cqp_info
->in
.u
.del_arp_cache_entry
.arp_index
= arp_index
;
560 cqp_info
->in
.u
.add_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
561 cqp_info
->in
.u
.add_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
562 cqp_info
->post_sq
= 1;
563 if (i40iw_handle_cqp_op(iwdev
, cqp_request
))
564 i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
568 * i40iw_send_syn_cqp_callback - do syn/ack after qhash
569 * @cqp_request: qhash cqp completion
570 * @send_ack: flag send ack
572 static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request
*cqp_request
, u32 send_ack
)
574 i40iw_send_syn(cqp_request
->param
, send_ack
);
578 * i40iw_manage_qhash - add or modify qhash
579 * @iwdev: iwarp device
580 * @cminfo: cm info for qhash
581 * @etype: type (syn or quad)
582 * @mtype: type of qhash
583 * @cmnode: cmnode associated with connection
584 * @wait: wait for completion
585 * @user_pri:user pri of the connection
587 enum i40iw_status_code
i40iw_manage_qhash(struct i40iw_device
*iwdev
,
588 struct i40iw_cm_info
*cminfo
,
589 enum i40iw_quad_entry_type etype
,
590 enum i40iw_quad_hash_manage_type mtype
,
594 struct i40iw_qhash_table_info
*info
;
595 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
596 struct i40iw_sc_vsi
*vsi
= &iwdev
->vsi
;
597 enum i40iw_status_code status
;
598 struct i40iw_cqp
*iwcqp
= &iwdev
->cqp
;
599 struct i40iw_cqp_request
*cqp_request
;
600 struct cqp_commands_info
*cqp_info
;
602 cqp_request
= i40iw_get_cqp_request(iwcqp
, wait
);
604 return I40IW_ERR_NO_MEMORY
;
605 cqp_info
= &cqp_request
->info
;
606 info
= &cqp_info
->in
.u
.manage_qhash_table_entry
.info
;
607 memset(info
, 0, sizeof(*info
));
609 info
->vsi
= &iwdev
->vsi
;
610 info
->manage
= mtype
;
611 info
->entry_type
= etype
;
612 if (cminfo
->vlan_id
!= 0xFFFF) {
613 info
->vlan_valid
= true;
614 info
->vlan_id
= cpu_to_le16(cminfo
->vlan_id
);
616 info
->vlan_valid
= false;
619 info
->ipv4_valid
= cminfo
->ipv4
;
620 info
->user_pri
= cminfo
->user_pri
;
621 ether_addr_copy(info
->mac_addr
, iwdev
->netdev
->dev_addr
);
622 info
->qp_num
= cpu_to_le32(vsi
->ilq
->qp_id
);
623 info
->dest_port
= cpu_to_le16(cminfo
->loc_port
);
624 info
->dest_ip
[0] = cpu_to_le32(cminfo
->loc_addr
[0]);
625 info
->dest_ip
[1] = cpu_to_le32(cminfo
->loc_addr
[1]);
626 info
->dest_ip
[2] = cpu_to_le32(cminfo
->loc_addr
[2]);
627 info
->dest_ip
[3] = cpu_to_le32(cminfo
->loc_addr
[3]);
628 if (etype
== I40IW_QHASH_TYPE_TCP_ESTABLISHED
) {
629 info
->src_port
= cpu_to_le16(cminfo
->rem_port
);
630 info
->src_ip
[0] = cpu_to_le32(cminfo
->rem_addr
[0]);
631 info
->src_ip
[1] = cpu_to_le32(cminfo
->rem_addr
[1]);
632 info
->src_ip
[2] = cpu_to_le32(cminfo
->rem_addr
[2]);
633 info
->src_ip
[3] = cpu_to_le32(cminfo
->rem_addr
[3]);
636 cqp_request
->callback_fcn
= i40iw_send_syn_cqp_callback
;
637 cqp_request
->param
= (void *)cmnode
;
640 if (info
->ipv4_valid
)
641 i40iw_debug(dev
, I40IW_DEBUG_CM
,
642 "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
643 __func__
, (!mtype
) ? "DELETE" : "ADD",
645 info
->dest_port
, info
->mac_addr
, cminfo
->vlan_id
);
647 i40iw_debug(dev
, I40IW_DEBUG_CM
,
648 "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
649 __func__
, (!mtype
) ? "DELETE" : "ADD",
651 info
->dest_port
, info
->mac_addr
, cminfo
->vlan_id
);
652 cqp_info
->in
.u
.manage_qhash_table_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
653 cqp_info
->in
.u
.manage_qhash_table_entry
.scratch
= (uintptr_t)cqp_request
;
654 cqp_info
->cqp_cmd
= OP_MANAGE_QHASH_TABLE_ENTRY
;
655 cqp_info
->post_sq
= 1;
656 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
658 i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
663 * i40iw_hw_flush_wqes - flush qp's wqe
664 * @iwdev: iwarp device
665 * @qp: hardware control qp
666 * @info: info for flush
667 * @wait: flag wait for completion
669 enum i40iw_status_code
i40iw_hw_flush_wqes(struct i40iw_device
*iwdev
,
670 struct i40iw_sc_qp
*qp
,
671 struct i40iw_qp_flush_info
*info
,
674 enum i40iw_status_code status
;
675 struct i40iw_qp_flush_info
*hw_info
;
676 struct i40iw_cqp_request
*cqp_request
;
677 struct cqp_commands_info
*cqp_info
;
678 struct i40iw_qp
*iwqp
= (struct i40iw_qp
*)qp
->back_qp
;
680 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
682 return I40IW_ERR_NO_MEMORY
;
684 cqp_info
= &cqp_request
->info
;
685 hw_info
= &cqp_request
->info
.in
.u
.qp_flush_wqes
.info
;
686 memcpy(hw_info
, info
, sizeof(*hw_info
));
688 cqp_info
->cqp_cmd
= OP_QP_FLUSH_WQES
;
689 cqp_info
->post_sq
= 1;
690 cqp_info
->in
.u
.qp_flush_wqes
.qp
= qp
;
691 cqp_info
->in
.u
.qp_flush_wqes
.scratch
= (uintptr_t)cqp_request
;
692 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
694 i40iw_pr_err("CQP-OP Flush WQE's fail");
695 complete(&iwqp
->sq_drained
);
696 complete(&iwqp
->rq_drained
);
699 if (!cqp_request
->compl_info
.maj_err_code
) {
700 switch (cqp_request
->compl_info
.min_err_code
) {
701 case I40IW_CQP_COMPL_RQ_WQE_FLUSHED
:
702 complete(&iwqp
->sq_drained
);
704 case I40IW_CQP_COMPL_SQ_WQE_FLUSHED
:
705 complete(&iwqp
->rq_drained
);
707 case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED
:
710 complete(&iwqp
->sq_drained
);
711 complete(&iwqp
->rq_drained
);
720 * i40iw_gen_ae - generate AE
721 * @iwdev: iwarp device
722 * @qp: qp associated with AE
724 * @wait: wait for completion
726 void i40iw_gen_ae(struct i40iw_device
*iwdev
,
727 struct i40iw_sc_qp
*qp
,
728 struct i40iw_gen_ae_info
*info
,
731 struct i40iw_gen_ae_info
*ae_info
;
732 struct i40iw_cqp_request
*cqp_request
;
733 struct cqp_commands_info
*cqp_info
;
735 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
739 cqp_info
= &cqp_request
->info
;
740 ae_info
= &cqp_request
->info
.in
.u
.gen_ae
.info
;
741 memcpy(ae_info
, info
, sizeof(*ae_info
));
743 cqp_info
->cqp_cmd
= OP_GEN_AE
;
744 cqp_info
->post_sq
= 1;
745 cqp_info
->in
.u
.gen_ae
.qp
= qp
;
746 cqp_info
->in
.u
.gen_ae
.scratch
= (uintptr_t)cqp_request
;
747 if (i40iw_handle_cqp_op(iwdev
, cqp_request
))
748 i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n",
753 * i40iw_hw_manage_vf_pble_bp - manage vf pbles
754 * @iwdev: iwarp device
755 * @info: info for managing pble
756 * @wait: flag wait for completion
758 enum i40iw_status_code
i40iw_hw_manage_vf_pble_bp(struct i40iw_device
*iwdev
,
759 struct i40iw_manage_vf_pble_info
*info
,
762 enum i40iw_status_code status
;
763 struct i40iw_manage_vf_pble_info
*hw_info
;
764 struct i40iw_cqp_request
*cqp_request
;
765 struct cqp_commands_info
*cqp_info
;
767 if ((iwdev
->init_state
< CCQ_CREATED
) && wait
)
770 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
772 return I40IW_ERR_NO_MEMORY
;
774 cqp_info
= &cqp_request
->info
;
775 hw_info
= &cqp_request
->info
.in
.u
.manage_vf_pble_bp
.info
;
776 memcpy(hw_info
, info
, sizeof(*hw_info
));
778 cqp_info
->cqp_cmd
= OP_MANAGE_VF_PBLE_BP
;
779 cqp_info
->post_sq
= 1;
780 cqp_info
->in
.u
.manage_vf_pble_bp
.cqp
= &iwdev
->cqp
.sc_cqp
;
781 cqp_info
->in
.u
.manage_vf_pble_bp
.scratch
= (uintptr_t)cqp_request
;
782 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
784 i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
789 * i40iw_get_ib_wc - return change flush code to IB's
790 * @opcode: iwarp flush code
792 static enum ib_wc_status
i40iw_get_ib_wc(enum i40iw_flush_opcode opcode
)
796 return IB_WC_LOC_PROT_ERR
;
797 case FLUSH_REM_ACCESS_ERR
:
798 return IB_WC_REM_ACCESS_ERR
;
799 case FLUSH_LOC_QP_OP_ERR
:
800 return IB_WC_LOC_QP_OP_ERR
;
801 case FLUSH_REM_OP_ERR
:
802 return IB_WC_REM_OP_ERR
;
803 case FLUSH_LOC_LEN_ERR
:
804 return IB_WC_LOC_LEN_ERR
;
805 case FLUSH_GENERAL_ERR
:
806 return IB_WC_GENERAL_ERR
;
807 case FLUSH_FATAL_ERR
:
809 return IB_WC_FATAL_ERR
;
814 * i40iw_set_flush_info - set flush info
815 * @pinfo: set flush info
818 * @opcode: flush error code
820 static void i40iw_set_flush_info(struct i40iw_qp_flush_info
*pinfo
,
823 enum i40iw_flush_opcode opcode
)
825 *min
= (u16
)i40iw_get_ib_wc(opcode
);
826 *maj
= CQE_MAJOR_DRV
;
827 pinfo
->userflushcode
= true;
831 * i40iw_flush_wqes - flush wqe for qp
832 * @iwdev: iwarp device
833 * @iwqp: qp to flush wqes
835 void i40iw_flush_wqes(struct i40iw_device
*iwdev
, struct i40iw_qp
*iwqp
)
837 struct i40iw_qp_flush_info info
;
838 struct i40iw_qp_flush_info
*pinfo
= &info
;
840 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
842 memset(pinfo
, 0, sizeof(*pinfo
));
845 if (qp
->term_flags
) {
846 i40iw_set_flush_info(pinfo
, &pinfo
->sq_minor_code
,
847 &pinfo
->sq_major_code
, qp
->flush_code
);
848 i40iw_set_flush_info(pinfo
, &pinfo
->rq_minor_code
,
849 &pinfo
->rq_major_code
, qp
->flush_code
);
851 (void)i40iw_hw_flush_wqes(iwdev
, &iwqp
->sc_qp
, &info
, true);