1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
44 #include "i40iw_virtchnl.h"
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
51 void i40iw_insert_wqe_hdr(u64
*wqe
, u64 header
)
53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe
, 24, header
);
57 void i40iw_check_cqp_progress(struct i40iw_cqp_timeout
*cqp_timeout
, struct i40iw_sc_dev
*dev
)
59 if (cqp_timeout
->compl_cqp_cmds
!= dev
->cqp_cmd_stats
[OP_COMPLETED_COMMANDS
]) {
60 cqp_timeout
->compl_cqp_cmds
= dev
->cqp_cmd_stats
[OP_COMPLETED_COMMANDS
];
61 cqp_timeout
->count
= 0;
63 if (dev
->cqp_cmd_stats
[OP_REQUESTED_COMMANDS
] != cqp_timeout
->compl_cqp_cmds
)
69 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70 * @cqp: struct for cqp hw
71 * @val: cqp tail register value
72 * @tail:wqtail register value
73 * @error: cqp processing err
75 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp
*cqp
,
80 if (cqp
->dev
->is_pf
) {
81 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPTAIL
);
82 *tail
= RS_32(*val
, I40E_PFPE_CQPTAIL_WQTAIL
);
83 *error
= RS_32(*val
, I40E_PFPE_CQPTAIL_CQP_OP_ERR
);
85 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPTAIL1
);
86 *tail
= RS_32(*val
, I40E_VFPE_CQPTAIL_WQTAIL
);
87 *error
= RS_32(*val
, I40E_VFPE_CQPTAIL_CQP_OP_ERR
);
92 * i40iw_cqp_poll_registers - poll cqp registers
93 * @cqp: struct for cqp hw
94 * @tail:wqtail register value
95 * @count: how many times to try for completion
97 static enum i40iw_status_code
i40iw_cqp_poll_registers(
98 struct i40iw_sc_cqp
*cqp
,
103 u32 newtail
, error
, val
;
107 i40iw_get_cqp_reg_info(cqp
, &val
, &newtail
, &error
);
109 error
= (cqp
->dev
->is_pf
) ?
110 i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
) :
111 i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
112 return I40IW_ERR_CQP_COMPL_ERROR
;
114 if (newtail
!= tail
) {
116 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
117 cqp
->dev
->cqp_cmd_stats
[OP_COMPLETED_COMMANDS
]++;
120 udelay(I40IW_SLEEP_COUNT
);
122 return I40IW_ERR_TIMEOUT
;
126 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
127 * @buf: ptr to fpm commit buffer
128 * @info: ptr to i40iw_hmc_obj_info struct
129 * @sd: number of SDs for HMC objects
131 * parses fpm commit info and copy base value
132 * of hmc objects in hmc_info
134 static enum i40iw_status_code
i40iw_sc_parse_fpm_commit_buf(
136 struct i40iw_hmc_obj_info
*info
,
145 /* copy base values in obj_info */
146 for (i
= I40IW_HMC_IW_QP
, j
= 0; i
<= I40IW_HMC_IW_PBLE
; i
++, j
+= 8) {
147 if ((i
== I40IW_HMC_IW_SRQ
) ||
148 (i
== I40IW_HMC_IW_FSIMC
) ||
149 (i
== I40IW_HMC_IW_FSIAV
)) {
154 get_64bit_val(buf
, j
, &temp
);
155 info
[i
].base
= RS_64_1(temp
, 32) * 512;
156 if (info
[i
].base
> base
) {
160 if (i
== I40IW_HMC_IW_APBVT_ENTRY
) {
164 if (i
== I40IW_HMC_IW_QP
)
165 info
[i
].cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_QPS
);
166 else if (i
== I40IW_HMC_IW_CQ
)
167 info
[i
].cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CQS
);
169 info
[i
].cnt
= (u32
)(temp
);
171 size
= info
[k
].cnt
* info
[k
].size
+ info
[k
].base
;
173 *sd
= (u32
)((size
>> 21) + 1); /* add 1 for remainder */
175 *sd
= (u32
)(size
>> 21);
181 * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
182 * @buf: ptr to fpm query buffer
183 * @buf_idx: index into buf
184 * @info: ptr to i40iw_hmc_obj_info struct
185 * @rsrc_idx: resource index into info
187 * Decode a 64 bit value from fpm query buffer into max count and size
189 static u64
i40iw_sc_decode_fpm_query(u64
*buf
,
191 struct i40iw_hmc_obj_info
*obj_info
,
197 get_64bit_val(buf
, buf_idx
, &temp
);
198 obj_info
[rsrc_idx
].max_cnt
= (u32
)temp
;
199 size
= (u32
)RS_64_1(temp
, 32);
200 obj_info
[rsrc_idx
].size
= LS_64_1(1, size
);
206 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
207 * @buf: ptr to fpm query buffer
208 * @info: ptr to i40iw_hmc_obj_info struct
209 * @hmc_fpm_misc: ptr to fpm data
211 * parses fpm query buffer and copy max_cnt and
212 * size value of hmc objects in hmc_info
214 static enum i40iw_status_code
i40iw_sc_parse_fpm_query_buf(
216 struct i40iw_hmc_info
*hmc_info
,
217 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
)
219 struct i40iw_hmc_obj_info
*obj_info
;
224 obj_info
= hmc_info
->hmc_obj
;
226 get_64bit_val(buf
, 0, &temp
);
227 hmc_info
->first_sd_index
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX
);
228 max_pe_sds
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_MAX_PE_SDS
);
230 /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
231 if (hmc_info
->hmc_fn_id
>= I40IW_FIRST_VF_FPM_ID
)
233 hmc_fpm_misc
->max_sds
= max_pe_sds
;
234 hmc_info
->sd_table
.sd_cnt
= max_pe_sds
+ hmc_info
->first_sd_index
;
236 get_64bit_val(buf
, 8, &temp
);
237 obj_info
[I40IW_HMC_IW_QP
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_QPS
);
238 size
= (u32
)RS_64_1(temp
, 32);
239 obj_info
[I40IW_HMC_IW_QP
].size
= LS_64_1(1, size
);
241 get_64bit_val(buf
, 16, &temp
);
242 obj_info
[I40IW_HMC_IW_CQ
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CQS
);
243 size
= (u32
)RS_64_1(temp
, 32);
244 obj_info
[I40IW_HMC_IW_CQ
].size
= LS_64_1(1, size
);
246 i40iw_sc_decode_fpm_query(buf
, 32, obj_info
, I40IW_HMC_IW_HTE
);
247 i40iw_sc_decode_fpm_query(buf
, 40, obj_info
, I40IW_HMC_IW_ARP
);
249 obj_info
[I40IW_HMC_IW_APBVT_ENTRY
].size
= 8192;
250 obj_info
[I40IW_HMC_IW_APBVT_ENTRY
].max_cnt
= 1;
252 i40iw_sc_decode_fpm_query(buf
, 48, obj_info
, I40IW_HMC_IW_MR
);
253 i40iw_sc_decode_fpm_query(buf
, 56, obj_info
, I40IW_HMC_IW_XF
);
255 get_64bit_val(buf
, 64, &temp
);
256 obj_info
[I40IW_HMC_IW_XFFL
].max_cnt
= (u32
)temp
;
257 obj_info
[I40IW_HMC_IW_XFFL
].size
= 4;
258 hmc_fpm_misc
->xf_block_size
= RS_64(temp
, I40IW_QUERY_FPM_XFBLOCKSIZE
);
259 if (!hmc_fpm_misc
->xf_block_size
)
260 return I40IW_ERR_INVALID_SIZE
;
262 i40iw_sc_decode_fpm_query(buf
, 72, obj_info
, I40IW_HMC_IW_Q1
);
264 get_64bit_val(buf
, 80, &temp
);
265 obj_info
[I40IW_HMC_IW_Q1FL
].max_cnt
= (u32
)temp
;
266 obj_info
[I40IW_HMC_IW_Q1FL
].size
= 4;
267 hmc_fpm_misc
->q1_block_size
= RS_64(temp
, I40IW_QUERY_FPM_Q1BLOCKSIZE
);
268 if (!hmc_fpm_misc
->q1_block_size
)
269 return I40IW_ERR_INVALID_SIZE
;
271 i40iw_sc_decode_fpm_query(buf
, 88, obj_info
, I40IW_HMC_IW_TIMER
);
273 get_64bit_val(buf
, 112, &temp
);
274 obj_info
[I40IW_HMC_IW_PBLE
].max_cnt
= (u32
)temp
;
275 obj_info
[I40IW_HMC_IW_PBLE
].size
= 8;
277 get_64bit_val(buf
, 120, &temp
);
278 hmc_fpm_misc
->max_ceqs
= (u8
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CEQS
);
279 hmc_fpm_misc
->ht_multiplier
= RS_64(temp
, I40IW_QUERY_FPM_HTMULTIPLIER
);
280 hmc_fpm_misc
->timer_bucket
= RS_64(temp
, I40IW_QUERY_FPM_TIMERBUCKET
);
286 * i40iw_fill_qos_list - Change all unknown qs handles to available ones
287 * @qs_list: list of qs_handles to be fixed with valid qs_handles
289 static void i40iw_fill_qos_list(u16
*qs_list
)
291 u16 qshandle
= qs_list
[0];
294 for (i
= 0; i
< I40IW_MAX_USER_PRIORITY
; i
++) {
295 if (qs_list
[i
] == QS_HANDLE_UNKNOWN
)
296 qs_list
[i
] = qshandle
;
298 qshandle
= qs_list
[i
];
303 * i40iw_qp_from_entry - Given entry, get to the qp structure
304 * @entry: Points to list of qp structure
306 static struct i40iw_sc_qp
*i40iw_qp_from_entry(struct list_head
*entry
)
311 return (struct i40iw_sc_qp
*)((char *)entry
- offsetof(struct i40iw_sc_qp
, list
));
315 * i40iw_get_qp - get the next qp from the list given current qp
316 * @head: Listhead of qp's
319 static struct i40iw_sc_qp
*i40iw_get_qp(struct list_head
*head
, struct i40iw_sc_qp
*qp
)
321 struct list_head
*entry
= NULL
;
322 struct list_head
*lastentry
;
324 if (list_empty(head
))
330 lastentry
= &qp
->list
;
331 entry
= (lastentry
!= head
) ? lastentry
->next
: NULL
;
334 return i40iw_qp_from_entry(entry
);
338 * i40iw_change_l2params - given the new l2 parameters, change all qp
339 * @vsi: pointer to the vsi structure
340 * @l2params: New paramaters from l2
342 void i40iw_change_l2params(struct i40iw_sc_vsi
*vsi
, struct i40iw_l2params
*l2params
)
344 struct i40iw_sc_dev
*dev
= vsi
->dev
;
345 struct i40iw_sc_qp
*qp
= NULL
;
346 bool qs_handle_change
= false;
351 if (vsi
->mtu
!= l2params
->mtu
) {
352 vsi
->mtu
= l2params
->mtu
;
353 i40iw_reinitialize_ieq(dev
);
356 i40iw_fill_qos_list(l2params
->qs_handle_list
);
357 for (i
= 0; i
< I40IW_MAX_USER_PRIORITY
; i
++) {
358 qs_handle
= l2params
->qs_handle_list
[i
];
359 if (vsi
->qos
[i
].qs_handle
!= qs_handle
)
360 qs_handle_change
= true;
361 spin_lock_irqsave(&vsi
->qos
[i
].lock
, flags
);
362 qp
= i40iw_get_qp(&vsi
->qos
[i
].qplist
, qp
);
364 if (qs_handle_change
) {
365 qp
->qs_handle
= qs_handle
;
366 /* issue cqp suspend command */
367 i40iw_qp_suspend_resume(dev
, qp
, true);
369 qp
= i40iw_get_qp(&vsi
->qos
[i
].qplist
, qp
);
371 spin_unlock_irqrestore(&vsi
->qos
[i
].lock
, flags
);
372 vsi
->qos
[i
].qs_handle
= qs_handle
;
377 * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
378 * @qp: qp to be removed from qos
380 void i40iw_qp_rem_qos(struct i40iw_sc_qp
*qp
)
382 struct i40iw_sc_vsi
*vsi
= qp
->vsi
;
387 spin_lock_irqsave(&vsi
->qos
[qp
->user_pri
].lock
, flags
);
389 spin_unlock_irqrestore(&vsi
->qos
[qp
->user_pri
].lock
, flags
);
393 * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
394 * @qp: qp to be added to qos
396 void i40iw_qp_add_qos(struct i40iw_sc_qp
*qp
)
398 struct i40iw_sc_vsi
*vsi
= qp
->vsi
;
403 spin_lock_irqsave(&vsi
->qos
[qp
->user_pri
].lock
, flags
);
404 qp
->qs_handle
= vsi
->qos
[qp
->user_pri
].qs_handle
;
405 list_add(&qp
->list
, &vsi
->qos
[qp
->user_pri
].qplist
);
406 qp
->on_qoslist
= true;
407 spin_unlock_irqrestore(&vsi
->qos
[qp
->user_pri
].lock
, flags
);
411 * i40iw_sc_pd_init - initialize sc pd struct
412 * @dev: sc device struct
414 * @pd_id: pd_id for allocated pd
415 * @abi_ver: ABI version from user context, -1 if not valid
417 static void i40iw_sc_pd_init(struct i40iw_sc_dev
*dev
,
418 struct i40iw_sc_pd
*pd
,
422 pd
->size
= sizeof(*pd
);
424 pd
->abi_ver
= abi_ver
;
429 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
430 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
431 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
433 u8
i40iw_get_encoded_wqe_size(u32 wqsize
, bool cqpsq
)
437 /* cqp sq's hw coded value starts from 1 for size of 4
438 * while it starts from 0 for qp' wq's.
449 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
450 * @cqp: IWARP control queue pair pointer
451 * @info: IWARP control queue pair init info pointer
453 * Initializes the object and context buffers for a control Queue Pair.
455 static enum i40iw_status_code
i40iw_sc_cqp_init(struct i40iw_sc_cqp
*cqp
,
456 struct i40iw_cqp_init_info
*info
)
460 if ((info
->sq_size
> I40IW_CQP_SW_SQSIZE_2048
) ||
461 (info
->sq_size
< I40IW_CQP_SW_SQSIZE_4
) ||
462 ((info
->sq_size
& (info
->sq_size
- 1))))
463 return I40IW_ERR_INVALID_SIZE
;
465 hw_sq_size
= i40iw_get_encoded_wqe_size(info
->sq_size
, true);
466 cqp
->size
= sizeof(*cqp
);
467 cqp
->sq_size
= info
->sq_size
;
468 cqp
->hw_sq_size
= hw_sq_size
;
469 cqp
->sq_base
= info
->sq
;
470 cqp
->host_ctx
= info
->host_ctx
;
471 cqp
->sq_pa
= info
->sq_pa
;
472 cqp
->host_ctx_pa
= info
->host_ctx_pa
;
473 cqp
->dev
= info
->dev
;
474 cqp
->struct_ver
= info
->struct_ver
;
475 cqp
->scratch_array
= info
->scratch_array
;
477 cqp
->en_datacenter_tcp
= info
->en_datacenter_tcp
;
478 cqp
->enabled_vf_count
= info
->enabled_vf_count
;
479 cqp
->hmc_profile
= info
->hmc_profile
;
480 info
->dev
->cqp
= cqp
;
482 I40IW_RING_INIT(cqp
->sq_ring
, cqp
->sq_size
);
483 cqp
->dev
->cqp_cmd_stats
[OP_REQUESTED_COMMANDS
] = 0;
484 cqp
->dev
->cqp_cmd_stats
[OP_COMPLETED_COMMANDS
] = 0;
485 INIT_LIST_HEAD(&cqp
->dev
->cqp_cmd_head
); /* for the cqp commands backlog. */
487 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CQPTAIL
, 0);
488 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CQPDB
, 0);
490 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
491 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
492 __func__
, cqp
->sq_size
, cqp
->hw_sq_size
,
493 cqp
->sq_base
, cqp
->sq_pa
, cqp
, cqp
->polarity
);
498 * i40iw_sc_cqp_create - create cqp during bringup
499 * @cqp: struct for cqp hw
500 * @maj_err: If error, major err number
501 * @min_err: If error, minor err number
503 static enum i40iw_status_code
i40iw_sc_cqp_create(struct i40iw_sc_cqp
*cqp
,
508 u32 cnt
= 0, p1
, p2
, val
= 0, err_code
;
509 enum i40iw_status_code ret_code
;
514 ret_code
= i40iw_allocate_dma_mem(cqp
->dev
->hw
,
516 I40IW_UPDATE_SD_BUF_SIZE
* cqp
->sq_size
,
517 I40IW_SD_BUF_ALIGNMENT
);
522 temp
= LS_64(cqp
->hw_sq_size
, I40IW_CQPHC_SQSIZE
) |
523 LS_64(cqp
->struct_ver
, I40IW_CQPHC_SVER
);
525 set_64bit_val(cqp
->host_ctx
, 0, temp
);
526 set_64bit_val(cqp
->host_ctx
, 8, cqp
->sq_pa
);
527 temp
= LS_64(cqp
->enabled_vf_count
, I40IW_CQPHC_ENABLED_VFS
) |
528 LS_64(cqp
->hmc_profile
, I40IW_CQPHC_HMC_PROFILE
);
529 set_64bit_val(cqp
->host_ctx
, 16, temp
);
530 set_64bit_val(cqp
->host_ctx
, 24, (uintptr_t)cqp
);
531 set_64bit_val(cqp
->host_ctx
, 32, 0);
532 set_64bit_val(cqp
->host_ctx
, 40, 0);
533 set_64bit_val(cqp
->host_ctx
, 48, 0);
534 set_64bit_val(cqp
->host_ctx
, 56, 0);
536 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQP_HOST_CTX",
537 cqp
->host_ctx
, I40IW_CQP_CTX_SIZE
* 8);
539 p1
= RS_32_1(cqp
->host_ctx_pa
, 32);
540 p2
= (u32
)cqp
->host_ctx_pa
;
542 if (cqp
->dev
->is_pf
) {
543 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, p1
);
544 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, p2
);
546 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, p1
);
547 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, p2
);
550 if (cnt
++ > I40IW_DONE_COUNT
) {
551 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
552 ret_code
= I40IW_ERR_TIMEOUT
;
554 * read PFPE_CQPERRORCODES register to get the minor
555 * and major error code
558 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
);
560 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
561 *min_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE
);
562 *maj_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE
);
565 udelay(I40IW_SLEEP_COUNT
);
567 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CCQPSTATUS
);
569 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CCQPSTATUS1
);
574 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
579 * i40iw_sc_cqp_post_sq - post of cqp's sq
580 * @cqp: struct for cqp hw
582 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp
*cqp
)
585 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CQPDB
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
587 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CQPDB1
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
589 i40iw_debug(cqp
->dev
,
591 "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
599 * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
600 * @cqp: pointer to CQP structure
601 * @scratch: private data for CQP WQE
602 * @wqe_idx: WQE index for next WQE on CQP SQ
604 static u64
*i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp
*cqp
,
605 u64 scratch
, u32
*wqe_idx
)
608 enum i40iw_status_code ret_code
;
610 if (I40IW_RING_FULL_ERR(cqp
->sq_ring
)) {
611 i40iw_debug(cqp
->dev
,
613 "%s: ring is full head %x tail %x size %x\n",
620 I40IW_ATOMIC_RING_MOVE_HEAD(cqp
->sq_ring
, *wqe_idx
, ret_code
);
621 cqp
->dev
->cqp_cmd_stats
[OP_REQUESTED_COMMANDS
]++;
625 cqp
->polarity
= !cqp
->polarity
;
627 wqe
= cqp
->sq_base
[*wqe_idx
].elem
;
628 cqp
->scratch_array
[*wqe_idx
] = scratch
;
629 I40IW_CQP_INIT_WQE(wqe
);
635 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
636 * @cqp: struct for cqp hw
637 * @scratch: private data for CQP WQE
639 u64
*i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp
*cqp
, u64 scratch
)
643 return i40iw_sc_cqp_get_next_send_wqe_idx(cqp
, scratch
, &wqe_idx
);
647 * i40iw_sc_cqp_destroy - destroy cqp during close
648 * @cqp: struct for cqp hw
650 static enum i40iw_status_code
i40iw_sc_cqp_destroy(struct i40iw_sc_cqp
*cqp
)
652 u32 cnt
= 0, val
= 1;
653 enum i40iw_status_code ret_code
= 0;
656 if (cqp
->dev
->is_pf
) {
657 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, 0);
658 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, 0);
659 cqpstat_addr
= I40E_PFPE_CCQPSTATUS
;
661 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, 0);
662 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, 0);
663 cqpstat_addr
= I40E_VFPE_CCQPSTATUS1
;
666 if (cnt
++ > I40IW_DONE_COUNT
) {
667 ret_code
= I40IW_ERR_TIMEOUT
;
670 udelay(I40IW_SLEEP_COUNT
);
671 val
= i40iw_rd32(cqp
->dev
->hw
, cqpstat_addr
);
674 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
679 * i40iw_sc_ccq_arm - enable intr for control cq
680 * @ccq: ccq sc struct
682 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq
*ccq
)
689 /* write to cq doorbell shadow area */
690 /* arm next se should always be zero */
691 get_64bit_val(ccq
->cq_uk
.shadow_area
, 32, &temp_val
);
693 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
694 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
696 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
699 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
700 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
701 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
702 LS_64(1, I40IW_CQ_DBSA_ARM_NEXT
);
704 set_64bit_val(ccq
->cq_uk
.shadow_area
, 32, temp_val
);
706 wmb(); /* make sure shadow area is updated before arming */
709 i40iw_wr32(ccq
->dev
->hw
, I40E_PFPE_CQARM
, ccq
->cq_uk
.cq_id
);
711 i40iw_wr32(ccq
->dev
->hw
, I40E_VFPE_CQARM1
, ccq
->cq_uk
.cq_id
);
715 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
716 * @ccq: ccq sc struct
717 * @info: completion q entry to return
719 static enum i40iw_status_code
i40iw_sc_ccq_get_cqe_info(
720 struct i40iw_sc_cq
*ccq
,
721 struct i40iw_ccq_cqe_info
*info
)
723 u64 qp_ctx
, temp
, temp1
;
725 struct i40iw_sc_cqp
*cqp
;
728 enum i40iw_status_code ret_code
= 0;
730 if (ccq
->cq_uk
.avoid_mem_cflct
)
731 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq
->cq_uk
);
733 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq
->cq_uk
);
735 get_64bit_val(cqe
, 24, &temp
);
736 polarity
= (u8
)RS_64(temp
, I40IW_CQ_VALID
);
737 if (polarity
!= ccq
->cq_uk
.polarity
)
738 return I40IW_ERR_QUEUE_EMPTY
;
740 get_64bit_val(cqe
, 8, &qp_ctx
);
741 cqp
= (struct i40iw_sc_cqp
*)(unsigned long)qp_ctx
;
742 info
->error
= (bool)RS_64(temp
, I40IW_CQ_ERROR
);
743 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
745 info
->maj_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MAJERR
);
746 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
748 wqe_idx
= (u32
)RS_64(temp
, I40IW_CQ_WQEIDX
);
749 info
->scratch
= cqp
->scratch_array
[wqe_idx
];
751 get_64bit_val(cqe
, 16, &temp1
);
752 info
->op_ret_val
= (u32
)RS_64(temp1
, I40IW_CCQ_OPRETVAL
);
753 get_64bit_val(cqp
->sq_base
[wqe_idx
].elem
, 24, &temp1
);
754 info
->op_code
= (u8
)RS_64(temp1
, I40IW_CQPSQ_OPCODE
);
757 /* move the head for cq */
758 I40IW_RING_MOVE_HEAD(ccq
->cq_uk
.cq_ring
, ret_code
);
759 if (I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
) == 0)
760 ccq
->cq_uk
.polarity
^= 1;
762 /* update cq tail in cq shadow memory also */
763 I40IW_RING_MOVE_TAIL(ccq
->cq_uk
.cq_ring
);
764 set_64bit_val(ccq
->cq_uk
.shadow_area
,
766 I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
));
767 wmb(); /* write shadow area before tail */
768 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
769 ccq
->dev
->cqp_cmd_stats
[OP_COMPLETED_COMMANDS
]++;
775 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
776 * @cqp: struct for cqp hw
777 * @op_code: cqp opcode for completion
778 * @info: completion q entry to return
780 static enum i40iw_status_code
i40iw_sc_poll_for_cqp_op_done(
781 struct i40iw_sc_cqp
*cqp
,
783 struct i40iw_ccq_cqe_info
*compl_info
)
785 struct i40iw_ccq_cqe_info info
;
786 struct i40iw_sc_cq
*ccq
;
787 enum i40iw_status_code ret_code
= 0;
790 memset(&info
, 0, sizeof(info
));
793 if (cnt
++ > I40IW_DONE_COUNT
)
794 return I40IW_ERR_TIMEOUT
;
796 if (i40iw_sc_ccq_get_cqe_info(ccq
, &info
)) {
797 udelay(I40IW_SLEEP_COUNT
);
802 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
805 /* check if opcode is cq create */
806 if (op_code
!= info
.op_code
) {
807 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
808 "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
809 __func__
, op_code
, info
.op_code
);
811 /* success, exit out of the loop */
812 if (op_code
== info
.op_code
)
817 memcpy(compl_info
, &info
, sizeof(*compl_info
));
823 * i40iw_sc_manage_push_page - Handle push page
824 * @cqp: struct for cqp hw
825 * @info: push page info
826 * @scratch: u64 saved to be used during cqp completion
827 * @post_sq: flag for cqp db to ring
829 static enum i40iw_status_code
i40iw_sc_manage_push_page(
830 struct i40iw_sc_cqp
*cqp
,
831 struct i40iw_cqp_manage_push_page_info
*info
,
838 if (info
->push_idx
>= I40IW_MAX_PUSH_PAGE_COUNT
)
839 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX
;
841 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
843 return I40IW_ERR_RING_FULL
;
845 set_64bit_val(wqe
, 16, info
->qs_handle
);
847 header
= LS_64(info
->push_idx
, I40IW_CQPSQ_MPP_PPIDX
) |
848 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES
, I40IW_CQPSQ_OPCODE
) |
849 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
850 LS_64(info
->free_page
, I40IW_CQPSQ_MPP_FREE_PAGE
);
852 i40iw_insert_wqe_hdr(wqe
, header
);
854 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_PUSH_PAGES WQE",
855 wqe
, I40IW_CQP_WQE_SIZE
* 8);
858 i40iw_sc_cqp_post_sq(cqp
);
863 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
864 * @cqp: struct for cqp hw
865 * @scratch: u64 saved to be used during cqp completion
866 * @vf_index: vf index for cqp
867 * @free_pm_fcn: function number
868 * @post_sq: flag for cqp db to ring
870 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table(
871 struct i40iw_sc_cqp
*cqp
,
880 if (vf_index
>= I40IW_MAX_VF_PER_PF
)
881 return I40IW_ERR_INVALID_VF_ID
;
882 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
884 return I40IW_ERR_RING_FULL
;
886 header
= LS_64(vf_index
, I40IW_CQPSQ_MHMC_VFIDX
) |
887 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, I40IW_CQPSQ_OPCODE
) |
888 LS_64(free_pm_fcn
, I40IW_CQPSQ_MHMC_FREEPMFN
) |
889 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
891 i40iw_insert_wqe_hdr(wqe
, header
);
892 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
893 wqe
, I40IW_CQP_WQE_SIZE
* 8);
895 i40iw_sc_cqp_post_sq(cqp
);
900 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
901 * @cqp: struct for cqp hw
902 * @scratch: u64 saved to be used during cqp completion
903 * @hmc_profile_type: type of profile to set
904 * @vf_num: vf number for profile
905 * @post_sq: flag for cqp db to ring
906 * @poll_registers: flag to poll register for cqp completion
908 static enum i40iw_status_code
i40iw_sc_set_hmc_resource_profile(
909 struct i40iw_sc_cqp
*cqp
,
912 u8 vf_num
, bool post_sq
,
917 u32 val
, tail
, error
;
918 enum i40iw_status_code ret_code
= 0;
920 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
922 return I40IW_ERR_RING_FULL
;
924 set_64bit_val(wqe
, 16,
925 (LS_64(hmc_profile_type
, I40IW_CQPSQ_SHMCRP_HMC_PROFILE
) |
926 LS_64(vf_num
, I40IW_CQPSQ_SHMCRP_VFNUM
)));
928 header
= LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE
, I40IW_CQPSQ_OPCODE
) |
929 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
931 i40iw_insert_wqe_hdr(wqe
, header
);
933 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
934 wqe
, I40IW_CQP_WQE_SIZE
* 8);
936 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
938 return I40IW_ERR_CQP_COMPL_ERROR
;
941 i40iw_sc_cqp_post_sq(cqp
);
943 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000000);
945 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
946 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
954 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
955 * @cqp: struct for cqp hw
957 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp
*cqp
)
959 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, NULL
);
963 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
964 * @cqp: struct for cqp hw
966 static enum i40iw_status_code
i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
968 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_COMMIT_FPM_VALUES
, NULL
);
972 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
973 * @cqp: struct for cqp hw
974 * @scratch: u64 saved to be used during cqp completion
975 * @hmc_fn_id: hmc function id
976 * @commit_fpm_mem; Memory for fpm values
977 * @post_sq: flag for cqp db to ring
978 * @wait_type: poll ccq or cqp registers for cqp completion
980 static enum i40iw_status_code
i40iw_sc_commit_fpm_values(
981 struct i40iw_sc_cqp
*cqp
,
984 struct i40iw_dma_mem
*commit_fpm_mem
,
990 u32 tail
, val
, error
;
991 enum i40iw_status_code ret_code
= 0;
993 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
995 return I40IW_ERR_RING_FULL
;
997 set_64bit_val(wqe
, 16, hmc_fn_id
);
998 set_64bit_val(wqe
, 32, commit_fpm_mem
->pa
);
1000 header
= LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
1001 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1003 i40iw_insert_wqe_hdr(wqe
, header
);
1005 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "COMMIT_FPM_VALUES WQE",
1006 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1008 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
1010 return I40IW_ERR_CQP_COMPL_ERROR
;
1013 i40iw_sc_cqp_post_sq(cqp
);
1015 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
1016 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
1017 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
1018 ret_code
= i40iw_sc_commit_fpm_values_done(cqp
);
1025 * i40iw_sc_query_rdma_features_done - poll cqp for query features done
1026 * @cqp: struct for cqp hw
1028 static enum i40iw_status_code
1029 i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp
*cqp
)
1031 return i40iw_sc_poll_for_cqp_op_done(
1032 cqp
, I40IW_CQP_OP_QUERY_RDMA_FEATURES
, NULL
);
1036 * i40iw_sc_query_rdma_features - query rdma features
1037 * @cqp: struct for cqp hw
1038 * @feat_mem: holds PA for HW to use
1039 * @scratch: u64 saved to be used during cqp completion
1041 static enum i40iw_status_code
1042 i40iw_sc_query_rdma_features(struct i40iw_sc_cqp
*cqp
,
1043 struct i40iw_dma_mem
*feat_mem
, u64 scratch
)
1048 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1050 return I40IW_ERR_RING_FULL
;
1052 set_64bit_val(wqe
, 32, feat_mem
->pa
);
1054 header
= LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES
, I40IW_CQPSQ_OPCODE
) |
1055 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) | feat_mem
->size
;
1057 i40iw_insert_wqe_hdr(wqe
, header
);
1059 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY RDMA FEATURES WQE",
1060 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1062 i40iw_sc_cqp_post_sq(cqp
);
1068 * i40iw_get_rdma_features - get RDMA features
1069 * @dev - sc device struct
1071 enum i40iw_status_code
i40iw_get_rdma_features(struct i40iw_sc_dev
*dev
)
1073 enum i40iw_status_code ret_code
;
1074 struct i40iw_dma_mem feat_buf
;
1076 u16 byte_idx
, feat_type
, feat_cnt
;
1078 ret_code
= i40iw_allocate_dma_mem(dev
->hw
, &feat_buf
,
1079 I40IW_FEATURE_BUF_SIZE
,
1080 I40IW_FEATURE_BUF_ALIGNMENT
);
1083 return I40IW_ERR_NO_MEMORY
;
1085 ret_code
= i40iw_sc_query_rdma_features(dev
->cqp
, &feat_buf
, 0);
1087 ret_code
= i40iw_sc_query_rdma_features_done(dev
->cqp
);
1092 get_64bit_val(feat_buf
.va
, 0, &temp
);
1093 feat_cnt
= RS_64(temp
, I40IW_FEATURE_CNT
);
1094 if (feat_cnt
< I40IW_MAX_FEATURES
) {
1095 ret_code
= I40IW_ERR_INVALID_FEAT_CNT
;
1097 } else if (feat_cnt
> I40IW_MAX_FEATURES
) {
1098 i40iw_debug(dev
, I40IW_DEBUG_CQP
,
1099 "features buf size insufficient\n");
1102 for (byte_idx
= 0, feat_type
= 0; feat_type
< I40IW_MAX_FEATURES
;
1103 feat_type
++, byte_idx
+= 8) {
1104 get_64bit_val((u64
*)feat_buf
.va
, byte_idx
, &temp
);
1105 dev
->feature_info
[feat_type
] = RS_64(temp
, I40IW_FEATURE_INFO
);
1108 i40iw_free_dma_mem(dev
->hw
, &feat_buf
);
1114 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1115 * @cqp: struct for cqp hw
1117 static enum i40iw_status_code
i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
1119 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_QUERY_FPM_VALUES
, NULL
);
1123 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1124 * @cqp: struct for cqp hw
1125 * @scratch: u64 saved to be used during cqp completion
1126 * @hmc_fn_id: hmc function id
1127 * @query_fpm_mem: memory for return fpm values
1128 * @post_sq: flag for cqp db to ring
1129 * @wait_type: poll ccq or cqp registers for cqp completion
1131 static enum i40iw_status_code
i40iw_sc_query_fpm_values(
1132 struct i40iw_sc_cqp
*cqp
,
1135 struct i40iw_dma_mem
*query_fpm_mem
,
1141 u32 tail
, val
, error
;
1142 enum i40iw_status_code ret_code
= 0;
1144 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1146 return I40IW_ERR_RING_FULL
;
1148 set_64bit_val(wqe
, 16, hmc_fn_id
);
1149 set_64bit_val(wqe
, 32, query_fpm_mem
->pa
);
1151 header
= LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
1152 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1154 i40iw_insert_wqe_hdr(wqe
, header
);
1156 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_FPM WQE",
1157 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1159 /* read the tail from CQP_TAIL register */
1160 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
1163 return I40IW_ERR_CQP_COMPL_ERROR
;
1166 i40iw_sc_cqp_post_sq(cqp
);
1167 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
1168 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
1169 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
1170 ret_code
= i40iw_sc_query_fpm_values_done(cqp
);
1177 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1178 * @cqp: struct for cqp hw
1179 * @info: arp entry information
1180 * @scratch: u64 saved to be used during cqp completion
1181 * @post_sq: flag for cqp db to ring
1183 static enum i40iw_status_code
i40iw_sc_add_arp_cache_entry(
1184 struct i40iw_sc_cqp
*cqp
,
1185 struct i40iw_add_arp_cache_entry_info
*info
,
1192 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1194 return I40IW_ERR_RING_FULL
;
1195 set_64bit_val(wqe
, 8, info
->reach_max
);
1197 temp
= info
->mac_addr
[5] |
1198 LS_64_1(info
->mac_addr
[4], 8) |
1199 LS_64_1(info
->mac_addr
[3], 16) |
1200 LS_64_1(info
->mac_addr
[2], 24) |
1201 LS_64_1(info
->mac_addr
[1], 32) |
1202 LS_64_1(info
->mac_addr
[0], 40);
1204 set_64bit_val(wqe
, 16, temp
);
1206 header
= info
->arp_index
|
1207 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
1208 LS_64((info
->permanent
? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT
) |
1209 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID
) |
1210 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1212 i40iw_insert_wqe_hdr(wqe
, header
);
1214 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_ENTRY WQE",
1215 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1218 i40iw_sc_cqp_post_sq(cqp
);
1223 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1224 * @cqp: struct for cqp hw
1225 * @scratch: u64 saved to be used during cqp completion
1226 * @arp_index: arp index to delete arp entry
1227 * @post_sq: flag for cqp db to ring
1229 static enum i40iw_status_code
i40iw_sc_del_arp_cache_entry(
1230 struct i40iw_sc_cqp
*cqp
,
1238 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1240 return I40IW_ERR_RING_FULL
;
1242 header
= arp_index
|
1243 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
1244 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1245 i40iw_insert_wqe_hdr(wqe
, header
);
1247 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_DEL_ENTRY WQE",
1248 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1251 i40iw_sc_cqp_post_sq(cqp
);
1256 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1257 * @cqp: struct for cqp hw
1258 * @scratch: u64 saved to be used during cqp completion
1259 * @arp_index: arp index to delete arp entry
1260 * @post_sq: flag for cqp db to ring
1262 static enum i40iw_status_code
i40iw_sc_query_arp_cache_entry(
1263 struct i40iw_sc_cqp
*cqp
,
1271 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1273 return I40IW_ERR_RING_FULL
;
1275 header
= arp_index
|
1276 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
1277 LS_64(1, I40IW_CQPSQ_MAT_QUERY
) |
1278 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1280 i40iw_insert_wqe_hdr(wqe
, header
);
1282 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_ARP_CACHE_ENTRY WQE",
1283 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1286 i40iw_sc_cqp_post_sq(cqp
);
1291 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1292 * @cqp: struct for cqp hw
1293 * @info: info for apbvt entry to add or delete
1294 * @scratch: u64 saved to be used during cqp completion
1295 * @post_sq: flag for cqp db to ring
1297 static enum i40iw_status_code
i40iw_sc_manage_apbvt_entry(
1298 struct i40iw_sc_cqp
*cqp
,
1299 struct i40iw_apbvt_info
*info
,
1306 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1308 return I40IW_ERR_RING_FULL
;
1310 set_64bit_val(wqe
, 16, info
->port
);
1312 header
= LS_64(I40IW_CQP_OP_MANAGE_APBVT
, I40IW_CQPSQ_OPCODE
) |
1313 LS_64(info
->add
, I40IW_CQPSQ_MAPT_ADDPORT
) |
1314 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1316 i40iw_insert_wqe_hdr(wqe
, header
);
1318 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_APBVT WQE",
1319 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1322 i40iw_sc_cqp_post_sq(cqp
);
1327 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1328 * @cqp: struct for cqp hw
1329 * @info: info for quad hash to manage
1330 * @scratch: u64 saved to be used during cqp completion
1331 * @post_sq: flag for cqp db to ring
1333 * This is called before connection establishment is started. For passive connections, when
1334 * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
1335 * ip address and tcp port. When SYN is received (passive connections) or
1336 * sent (active connections), this routine is called with entry type of
1337 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1339 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1340 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1342 static enum i40iw_status_code
i40iw_sc_manage_qhash_table_entry(
1343 struct i40iw_sc_cqp
*cqp
,
1344 struct i40iw_qhash_table_info
*info
,
1352 struct i40iw_sc_vsi
*vsi
= info
->vsi
;
1354 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1356 return I40IW_ERR_RING_FULL
;
1358 temp
= info
->mac_addr
[5] |
1359 LS_64_1(info
->mac_addr
[4], 8) |
1360 LS_64_1(info
->mac_addr
[3], 16) |
1361 LS_64_1(info
->mac_addr
[2], 24) |
1362 LS_64_1(info
->mac_addr
[1], 32) |
1363 LS_64_1(info
->mac_addr
[0], 40);
1365 set_64bit_val(wqe
, 0, temp
);
1367 qw1
= LS_64(info
->qp_num
, I40IW_CQPSQ_QHASH_QPN
) |
1368 LS_64(info
->dest_port
, I40IW_CQPSQ_QHASH_DEST_PORT
);
1369 if (info
->ipv4_valid
) {
1372 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1376 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1377 LS_64(info
->dest_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1381 LS_64(info
->dest_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1382 LS_64(info
->dest_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1384 qw2
= LS_64(vsi
->qos
[info
->user_pri
].qs_handle
, I40IW_CQPSQ_QHASH_QS_HANDLE
);
1385 if (info
->vlan_valid
)
1386 qw2
|= LS_64(info
->vlan_id
, I40IW_CQPSQ_QHASH_VLANID
);
1387 set_64bit_val(wqe
, 16, qw2
);
1388 if (info
->entry_type
== I40IW_QHASH_TYPE_TCP_ESTABLISHED
) {
1389 qw1
|= LS_64(info
->src_port
, I40IW_CQPSQ_QHASH_SRC_PORT
);
1390 if (!info
->ipv4_valid
) {
1393 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1394 LS_64(info
->src_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1397 LS_64(info
->src_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1398 LS_64(info
->src_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1402 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1406 set_64bit_val(wqe
, 8, qw1
);
1407 temp
= LS_64(cqp
->polarity
, I40IW_CQPSQ_QHASH_WQEVALID
) |
1408 LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY
, I40IW_CQPSQ_QHASH_OPCODE
) |
1409 LS_64(info
->manage
, I40IW_CQPSQ_QHASH_MANAGE
) |
1410 LS_64(info
->ipv4_valid
, I40IW_CQPSQ_QHASH_IPV4VALID
) |
1411 LS_64(info
->vlan_valid
, I40IW_CQPSQ_QHASH_VLANVALID
) |
1412 LS_64(info
->entry_type
, I40IW_CQPSQ_QHASH_ENTRYTYPE
);
1414 i40iw_insert_wqe_hdr(wqe
, temp
);
1416 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_QHASH WQE",
1417 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1420 i40iw_sc_cqp_post_sq(cqp
);
1425 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1426 * @cqp: struct for cqp hw
1427 * @scratch: u64 saved to be used during cqp completion
1428 * @post_sq: flag for cqp db to ring
1430 static enum i40iw_status_code
i40iw_sc_alloc_local_mac_ipaddr_entry(
1431 struct i40iw_sc_cqp
*cqp
,
1438 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1440 return I40IW_ERR_RING_FULL
;
1441 header
= LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY
, I40IW_CQPSQ_OPCODE
) |
1442 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1444 i40iw_insert_wqe_hdr(wqe
, header
);
1445 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1446 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1448 i40iw_sc_cqp_post_sq(cqp
);
1453 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1454 * @cqp: struct for cqp hw
1455 * @info:mac addr info
1456 * @scratch: u64 saved to be used during cqp completion
1457 * @post_sq: flag for cqp db to ring
1459 static enum i40iw_status_code
i40iw_sc_add_local_mac_ipaddr_entry(
1460 struct i40iw_sc_cqp
*cqp
,
1461 struct i40iw_local_mac_ipaddr_entry_info
*info
,
1468 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1470 return I40IW_ERR_RING_FULL
;
1471 temp
= info
->mac_addr
[5] |
1472 LS_64_1(info
->mac_addr
[4], 8) |
1473 LS_64_1(info
->mac_addr
[3], 16) |
1474 LS_64_1(info
->mac_addr
[2], 24) |
1475 LS_64_1(info
->mac_addr
[1], 32) |
1476 LS_64_1(info
->mac_addr
[0], 40);
1478 set_64bit_val(wqe
, 32, temp
);
1480 header
= LS_64(info
->entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1481 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1482 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1484 i40iw_insert_wqe_hdr(wqe
, header
);
1486 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ADD_LOCAL_MAC_IPADDR WQE",
1487 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1490 i40iw_sc_cqp_post_sq(cqp
);
1495 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1496 * @cqp: struct for cqp hw
1497 * @scratch: u64 saved to be used during cqp completion
1498 * @entry_idx: index of mac entry
1499 * @ ignore_ref_count: to force mac adde delete
1500 * @post_sq: flag for cqp db to ring
1502 static enum i40iw_status_code
i40iw_sc_del_local_mac_ipaddr_entry(
1503 struct i40iw_sc_cqp
*cqp
,
1506 u8 ignore_ref_count
,
1512 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1514 return I40IW_ERR_RING_FULL
;
1515 header
= LS_64(entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1516 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1517 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY
) |
1518 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
1519 LS_64(ignore_ref_count
, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT
);
1521 i40iw_insert_wqe_hdr(wqe
, header
);
1523 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "DEL_LOCAL_MAC_IPADDR WQE",
1524 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1527 i40iw_sc_cqp_post_sq(cqp
);
1532 * i40iw_sc_cqp_nop - send a nop wqe
1533 * @cqp: struct for cqp hw
1534 * @scratch: u64 saved to be used during cqp completion
1535 * @post_sq: flag for cqp db to ring
1537 static enum i40iw_status_code
i40iw_sc_cqp_nop(struct i40iw_sc_cqp
*cqp
,
1544 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1546 return I40IW_ERR_RING_FULL
;
1547 header
= LS_64(I40IW_CQP_OP_NOP
, I40IW_CQPSQ_OPCODE
) |
1548 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1549 i40iw_insert_wqe_hdr(wqe
, header
);
1550 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "NOP WQE",
1551 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1554 i40iw_sc_cqp_post_sq(cqp
);
1559 * i40iw_sc_ceq_init - initialize ceq
1560 * @ceq: ceq sc structure
1561 * @info: ceq initialization info
1563 static enum i40iw_status_code
i40iw_sc_ceq_init(struct i40iw_sc_ceq
*ceq
,
1564 struct i40iw_ceq_init_info
*info
)
1568 if ((info
->elem_cnt
< I40IW_MIN_CEQ_ENTRIES
) ||
1569 (info
->elem_cnt
> I40IW_MAX_CEQ_ENTRIES
))
1570 return I40IW_ERR_INVALID_SIZE
;
1572 if (info
->ceq_id
>= I40IW_MAX_CEQID
)
1573 return I40IW_ERR_INVALID_CEQ_ID
;
1575 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1577 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1578 return I40IW_ERR_INVALID_PBLE_INDEX
;
1580 ceq
->size
= sizeof(*ceq
);
1581 ceq
->ceqe_base
= (struct i40iw_ceqe
*)info
->ceqe_base
;
1582 ceq
->ceq_id
= info
->ceq_id
;
1583 ceq
->dev
= info
->dev
;
1584 ceq
->elem_cnt
= info
->elem_cnt
;
1585 ceq
->ceq_elem_pa
= info
->ceqe_pa
;
1586 ceq
->virtual_map
= info
->virtual_map
;
1588 ceq
->pbl_chunk_size
= (ceq
->virtual_map
? info
->pbl_chunk_size
: 0);
1589 ceq
->first_pm_pbl_idx
= (ceq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1590 ceq
->pbl_list
= (ceq
->virtual_map
? info
->pbl_list
: NULL
);
1592 ceq
->tph_en
= info
->tph_en
;
1593 ceq
->tph_val
= info
->tph_val
;
1595 I40IW_RING_INIT(ceq
->ceq_ring
, ceq
->elem_cnt
);
1596 ceq
->dev
->ceq
[info
->ceq_id
] = ceq
;
1602 * i40iw_sc_ceq_create - create ceq wqe
1603 * @ceq: ceq sc structure
1604 * @scratch: u64 saved to be used during cqp completion
1605 * @post_sq: flag for cqp db to ring
1607 static enum i40iw_status_code
i40iw_sc_ceq_create(struct i40iw_sc_ceq
*ceq
,
1611 struct i40iw_sc_cqp
*cqp
;
1615 cqp
= ceq
->dev
->cqp
;
1616 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1618 return I40IW_ERR_RING_FULL
;
1619 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1620 set_64bit_val(wqe
, 32, (ceq
->virtual_map
? 0 : ceq
->ceq_elem_pa
));
1621 set_64bit_val(wqe
, 48, (ceq
->virtual_map
? ceq
->first_pm_pbl_idx
: 0));
1622 set_64bit_val(wqe
, 56, LS_64(ceq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1624 header
= ceq
->ceq_id
|
1625 LS_64(I40IW_CQP_OP_CREATE_CEQ
, I40IW_CQPSQ_OPCODE
) |
1626 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1627 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1628 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1629 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1631 i40iw_insert_wqe_hdr(wqe
, header
);
1633 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_CREATE WQE",
1634 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1637 i40iw_sc_cqp_post_sq(cqp
);
1642 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1643 * @ceq: ceq sc structure
1645 static enum i40iw_status_code
i40iw_sc_cceq_create_done(struct i40iw_sc_ceq
*ceq
)
1647 struct i40iw_sc_cqp
*cqp
;
1649 cqp
= ceq
->dev
->cqp
;
1650 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CEQ
, NULL
);
1654 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1655 * @ceq: ceq sc structure
1657 static enum i40iw_status_code
i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq
*ceq
)
1659 struct i40iw_sc_cqp
*cqp
;
1661 cqp
= ceq
->dev
->cqp
;
1662 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
1663 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_CEQ
, NULL
);
1667 * i40iw_sc_cceq_create - create cceq
1668 * @ceq: ceq sc structure
1669 * @scratch: u64 saved to be used during cqp completion
1671 static enum i40iw_status_code
i40iw_sc_cceq_create(struct i40iw_sc_ceq
*ceq
, u64 scratch
)
1673 enum i40iw_status_code ret_code
;
1675 ret_code
= i40iw_sc_ceq_create(ceq
, scratch
, true);
1677 ret_code
= i40iw_sc_cceq_create_done(ceq
);
1682 * i40iw_sc_ceq_destroy - destroy ceq
1683 * @ceq: ceq sc structure
1684 * @scratch: u64 saved to be used during cqp completion
1685 * @post_sq: flag for cqp db to ring
1687 static enum i40iw_status_code
i40iw_sc_ceq_destroy(struct i40iw_sc_ceq
*ceq
,
1691 struct i40iw_sc_cqp
*cqp
;
1695 cqp
= ceq
->dev
->cqp
;
1696 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1698 return I40IW_ERR_RING_FULL
;
1699 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1700 set_64bit_val(wqe
, 48, ceq
->first_pm_pbl_idx
);
1701 header
= ceq
->ceq_id
|
1702 LS_64(I40IW_CQP_OP_DESTROY_CEQ
, I40IW_CQPSQ_OPCODE
) |
1703 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1704 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1705 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1706 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1707 i40iw_insert_wqe_hdr(wqe
, header
);
1708 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_DESTROY WQE",
1709 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1712 i40iw_sc_cqp_post_sq(cqp
);
1717 * i40iw_sc_process_ceq - process ceq
1718 * @dev: sc device struct
1719 * @ceq: ceq sc structure
1721 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev
*dev
, struct i40iw_sc_ceq
*ceq
)
1725 struct i40iw_sc_cq
*cq
= NULL
;
1728 ceqe
= (u64
*)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq
);
1729 get_64bit_val(ceqe
, 0, &temp
);
1730 polarity
= (u8
)RS_64(temp
, I40IW_CEQE_VALID
);
1731 if (polarity
!= ceq
->polarity
)
1734 cq
= (struct i40iw_sc_cq
*)(unsigned long)LS_64_1(temp
, 1);
1736 I40IW_RING_MOVE_TAIL(ceq
->ceq_ring
);
1737 if (I40IW_RING_GETCURRENT_TAIL(ceq
->ceq_ring
) == 0)
1741 i40iw_wr32(dev
->hw
, I40E_PFPE_CQACK
, cq
->cq_uk
.cq_id
);
1743 i40iw_wr32(dev
->hw
, I40E_VFPE_CQACK1
, cq
->cq_uk
.cq_id
);
1749 * i40iw_sc_aeq_init - initialize aeq
1750 * @aeq: aeq structure ptr
1751 * @info: aeq initialization info
1753 static enum i40iw_status_code
i40iw_sc_aeq_init(struct i40iw_sc_aeq
*aeq
,
1754 struct i40iw_aeq_init_info
*info
)
1758 if ((info
->elem_cnt
< I40IW_MIN_AEQ_ENTRIES
) ||
1759 (info
->elem_cnt
> I40IW_MAX_AEQ_ENTRIES
))
1760 return I40IW_ERR_INVALID_SIZE
;
1761 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1763 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1764 return I40IW_ERR_INVALID_PBLE_INDEX
;
1766 aeq
->size
= sizeof(*aeq
);
1768 aeq
->aeqe_base
= (struct i40iw_sc_aeqe
*)info
->aeqe_base
;
1769 aeq
->dev
= info
->dev
;
1770 aeq
->elem_cnt
= info
->elem_cnt
;
1772 aeq
->aeq_elem_pa
= info
->aeq_elem_pa
;
1773 I40IW_RING_INIT(aeq
->aeq_ring
, aeq
->elem_cnt
);
1774 info
->dev
->aeq
= aeq
;
1776 aeq
->virtual_map
= info
->virtual_map
;
1777 aeq
->pbl_list
= (aeq
->virtual_map
? info
->pbl_list
: NULL
);
1778 aeq
->pbl_chunk_size
= (aeq
->virtual_map
? info
->pbl_chunk_size
: 0);
1779 aeq
->first_pm_pbl_idx
= (aeq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1780 info
->dev
->aeq
= aeq
;
1785 * i40iw_sc_aeq_create - create aeq
1786 * @aeq: aeq structure ptr
1787 * @scratch: u64 saved to be used during cqp completion
1788 * @post_sq: flag for cqp db to ring
1790 static enum i40iw_status_code
i40iw_sc_aeq_create(struct i40iw_sc_aeq
*aeq
,
1795 struct i40iw_sc_cqp
*cqp
;
1798 cqp
= aeq
->dev
->cqp
;
1799 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1801 return I40IW_ERR_RING_FULL
;
1802 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1803 set_64bit_val(wqe
, 32,
1804 (aeq
->virtual_map
? 0 : aeq
->aeq_elem_pa
));
1805 set_64bit_val(wqe
, 48,
1806 (aeq
->virtual_map
? aeq
->first_pm_pbl_idx
: 0));
1808 header
= LS_64(I40IW_CQP_OP_CREATE_AEQ
, I40IW_CQPSQ_OPCODE
) |
1809 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1810 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1811 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1813 i40iw_insert_wqe_hdr(wqe
, header
);
1814 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_CREATE WQE",
1815 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1817 i40iw_sc_cqp_post_sq(cqp
);
1822 * i40iw_sc_aeq_destroy - destroy aeq during close
1823 * @aeq: aeq structure ptr
1824 * @scratch: u64 saved to be used during cqp completion
1825 * @post_sq: flag for cqp db to ring
1827 static enum i40iw_status_code
i40iw_sc_aeq_destroy(struct i40iw_sc_aeq
*aeq
,
1832 struct i40iw_sc_cqp
*cqp
;
1835 cqp
= aeq
->dev
->cqp
;
1836 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1838 return I40IW_ERR_RING_FULL
;
1839 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1840 set_64bit_val(wqe
, 48, aeq
->first_pm_pbl_idx
);
1841 header
= LS_64(I40IW_CQP_OP_DESTROY_AEQ
, I40IW_CQPSQ_OPCODE
) |
1842 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1843 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1844 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1845 i40iw_insert_wqe_hdr(wqe
, header
);
1847 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_DESTROY WQE",
1848 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1850 i40iw_sc_cqp_post_sq(cqp
);
1855 * i40iw_sc_get_next_aeqe - get next aeq entry
1856 * @aeq: aeq structure ptr
1857 * @info: aeqe info to be returned
1859 static enum i40iw_status_code
i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq
*aeq
,
1860 struct i40iw_aeqe_info
*info
)
1862 u64 temp
, compl_ctx
;
1868 aeqe
= (u64
*)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq
);
1869 get_64bit_val(aeqe
, 0, &compl_ctx
);
1870 get_64bit_val(aeqe
, 8, &temp
);
1871 polarity
= (u8
)RS_64(temp
, I40IW_AEQE_VALID
);
1873 if (aeq
->polarity
!= polarity
)
1874 return I40IW_ERR_QUEUE_EMPTY
;
1876 i40iw_debug_buf(aeq
->dev
, I40IW_DEBUG_WQE
, "AEQ_ENTRY", aeqe
, 16);
1878 ae_src
= (u8
)RS_64(temp
, I40IW_AEQE_AESRC
);
1879 wqe_idx
= (u16
)RS_64(temp
, I40IW_AEQE_WQDESCIDX
);
1880 info
->qp_cq_id
= (u32
)RS_64(temp
, I40IW_AEQE_QPCQID
);
1881 info
->ae_id
= (u16
)RS_64(temp
, I40IW_AEQE_AECODE
);
1882 info
->tcp_state
= (u8
)RS_64(temp
, I40IW_AEQE_TCPSTATE
);
1883 info
->iwarp_state
= (u8
)RS_64(temp
, I40IW_AEQE_IWSTATE
);
1884 info
->q2_data_written
= (u8
)RS_64(temp
, I40IW_AEQE_Q2DATA
);
1885 info
->aeqe_overflow
= (bool)RS_64(temp
, I40IW_AEQE_OVERFLOW
);
1887 switch (info
->ae_id
) {
1888 case I40IW_AE_PRIV_OPERATION_DENIED
:
1889 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG
:
1890 case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT
:
1891 case I40IW_AE_BAD_CLOSE
:
1892 case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE
:
1893 case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO
:
1894 case I40IW_AE_STAG_ZERO_INVALID
:
1895 case I40IW_AE_IB_RREQ_AND_Q1_FULL
:
1896 case I40IW_AE_WQE_UNEXPECTED_OPCODE
:
1897 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
1898 case I40IW_AE_DDP_UBE_INVALID_MO
:
1899 case I40IW_AE_DDP_UBE_INVALID_QN
:
1900 case I40IW_AE_DDP_NO_L_BIT
:
1901 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
1902 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
1903 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST
:
1904 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP
:
1905 case I40IW_AE_INVALID_ARP_ENTRY
:
1906 case I40IW_AE_INVALID_TCP_OPTION_RCVD
:
1907 case I40IW_AE_STALE_ARP_ENTRY
:
1908 case I40IW_AE_LLP_CLOSE_COMPLETE
:
1909 case I40IW_AE_LLP_CONNECTION_RESET
:
1910 case I40IW_AE_LLP_FIN_RECEIVED
:
1911 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
1912 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
1913 case I40IW_AE_LLP_SYN_RECEIVED
:
1914 case I40IW_AE_LLP_TERMINATE_RECEIVED
:
1915 case I40IW_AE_LLP_TOO_MANY_RETRIES
:
1916 case I40IW_AE_LLP_DOUBT_REACHABILITY
:
1917 case I40IW_AE_RESET_SENT
:
1918 case I40IW_AE_TERMINATE_SENT
:
1919 case I40IW_AE_RESET_NOT_SENT
:
1920 case I40IW_AE_LCE_QP_CATASTROPHIC
:
1921 case I40IW_AE_QP_SUSPEND_COMPLETE
:
1923 info
->compl_ctx
= compl_ctx
;
1924 ae_src
= I40IW_AE_SOURCE_RSVD
;
1926 case I40IW_AE_LCE_CQ_CATASTROPHIC
:
1928 info
->compl_ctx
= LS_64_1(compl_ctx
, 1);
1929 ae_src
= I40IW_AE_SOURCE_RSVD
;
1934 case I40IW_AE_SOURCE_RQ
:
1935 case I40IW_AE_SOURCE_RQ_0011
:
1937 info
->wqe_idx
= wqe_idx
;
1938 info
->compl_ctx
= compl_ctx
;
1940 case I40IW_AE_SOURCE_CQ
:
1941 case I40IW_AE_SOURCE_CQ_0110
:
1942 case I40IW_AE_SOURCE_CQ_1010
:
1943 case I40IW_AE_SOURCE_CQ_1110
:
1945 info
->compl_ctx
= LS_64_1(compl_ctx
, 1);
1947 case I40IW_AE_SOURCE_SQ
:
1948 case I40IW_AE_SOURCE_SQ_0111
:
1951 info
->wqe_idx
= wqe_idx
;
1952 info
->compl_ctx
= compl_ctx
;
1954 case I40IW_AE_SOURCE_IN_RR_WR
:
1955 case I40IW_AE_SOURCE_IN_RR_WR_1011
:
1957 info
->compl_ctx
= compl_ctx
;
1958 info
->in_rdrsp_wr
= true;
1960 case I40IW_AE_SOURCE_OUT_RR
:
1961 case I40IW_AE_SOURCE_OUT_RR_1111
:
1963 info
->compl_ctx
= compl_ctx
;
1964 info
->out_rdrsp
= true;
1966 case I40IW_AE_SOURCE_RSVD
:
1971 I40IW_RING_MOVE_TAIL(aeq
->aeq_ring
);
1972 if (I40IW_RING_GETCURRENT_TAIL(aeq
->aeq_ring
) == 0)
1978 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1979 * @dev: sc device struct
1980 * @count: allocate count
1982 static enum i40iw_status_code
i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev
*dev
,
1987 i40iw_wr32(dev
->hw
, I40E_PFPE_AEQALLOC
, count
);
1989 i40iw_wr32(dev
->hw
, I40E_VFPE_AEQALLOC1
, count
);
1995 * i40iw_sc_aeq_create_done - create aeq
1996 * @aeq: aeq structure ptr
1998 static enum i40iw_status_code
i40iw_sc_aeq_create_done(struct i40iw_sc_aeq
*aeq
)
2000 struct i40iw_sc_cqp
*cqp
;
2002 cqp
= aeq
->dev
->cqp
;
2003 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_AEQ
, NULL
);
2007 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
2008 * @aeq: aeq structure ptr
2010 static enum i40iw_status_code
i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq
*aeq
)
2012 struct i40iw_sc_cqp
*cqp
;
2014 cqp
= aeq
->dev
->cqp
;
2015 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_AEQ
, NULL
);
2019 * i40iw_sc_ccq_init - initialize control cq
2020 * @cq: sc's cq ctruct
2021 * @info: info for control cq initialization
2023 static enum i40iw_status_code
i40iw_sc_ccq_init(struct i40iw_sc_cq
*cq
,
2024 struct i40iw_ccq_init_info
*info
)
2028 if (info
->num_elem
< I40IW_MIN_CQ_SIZE
|| info
->num_elem
> I40IW_MAX_CQ_SIZE
)
2029 return I40IW_ERR_INVALID_SIZE
;
2031 if (info
->ceq_id
> I40IW_MAX_CEQID
)
2032 return I40IW_ERR_INVALID_CEQ_ID
;
2034 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2036 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
2037 return I40IW_ERR_INVALID_PBLE_INDEX
;
2039 cq
->cq_pa
= info
->cq_pa
;
2040 cq
->cq_uk
.cq_base
= info
->cq_base
;
2041 cq
->shadow_area_pa
= info
->shadow_area_pa
;
2042 cq
->cq_uk
.shadow_area
= info
->shadow_area
;
2043 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
2044 cq
->dev
= info
->dev
;
2045 cq
->ceq_id
= info
->ceq_id
;
2046 cq
->cq_uk
.cq_size
= info
->num_elem
;
2047 cq
->cq_type
= I40IW_CQ_TYPE_CQP
;
2048 cq
->ceqe_mask
= info
->ceqe_mask
;
2049 I40IW_RING_INIT(cq
->cq_uk
.cq_ring
, info
->num_elem
);
2051 cq
->cq_uk
.cq_id
= 0; /* control cq is id 0 always */
2052 cq
->ceq_id_valid
= info
->ceq_id_valid
;
2053 cq
->tph_en
= info
->tph_en
;
2054 cq
->tph_val
= info
->tph_val
;
2055 cq
->cq_uk
.avoid_mem_cflct
= info
->avoid_mem_cflct
;
2057 cq
->pbl_list
= info
->pbl_list
;
2058 cq
->virtual_map
= info
->virtual_map
;
2059 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
2060 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
2061 cq
->cq_uk
.polarity
= true;
2063 /* following are only for iw cqs so initialize them to zero */
2064 cq
->cq_uk
.cqe_alloc_reg
= NULL
;
2065 info
->dev
->ccq
= cq
;
2070 * i40iw_sc_ccq_create_done - poll cqp for ccq create
2071 * @ccq: ccq sc struct
2073 static enum i40iw_status_code
i40iw_sc_ccq_create_done(struct i40iw_sc_cq
*ccq
)
2075 struct i40iw_sc_cqp
*cqp
;
2077 cqp
= ccq
->dev
->cqp
;
2078 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CQ
, NULL
);
2082 * i40iw_sc_ccq_create - create control cq
2083 * @ccq: ccq sc struct
2084 * @scratch: u64 saved to be used during cqp completion
2085 * @check_overflow: overlow flag for ccq
2086 * @post_sq: flag for cqp db to ring
2088 static enum i40iw_status_code
i40iw_sc_ccq_create(struct i40iw_sc_cq
*ccq
,
2090 bool check_overflow
,
2094 struct i40iw_sc_cqp
*cqp
;
2096 enum i40iw_status_code ret_code
;
2098 cqp
= ccq
->dev
->cqp
;
2099 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2101 return I40IW_ERR_RING_FULL
;
2102 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
2103 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
2104 set_64bit_val(wqe
, 16,
2105 LS_64(ccq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
2106 set_64bit_val(wqe
, 32, (ccq
->virtual_map
? 0 : ccq
->cq_pa
));
2107 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
2108 set_64bit_val(wqe
, 48,
2109 (ccq
->virtual_map
? ccq
->first_pm_pbl_idx
: 0));
2110 set_64bit_val(wqe
, 56,
2111 LS_64(ccq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
2113 header
= ccq
->cq_uk
.cq_id
|
2114 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
2115 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
2116 LS_64(ccq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2117 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
2118 LS_64(ccq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2119 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2120 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2121 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2122 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2123 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2125 i40iw_insert_wqe_hdr(wqe
, header
);
2127 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_CREATE WQE",
2128 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2131 i40iw_sc_cqp_post_sq(cqp
);
2132 ret_code
= i40iw_sc_ccq_create_done(ccq
);
2136 cqp
->process_cqp_sds
= i40iw_cqp_sds_cmd
;
2142 * i40iw_sc_ccq_destroy - destroy ccq during close
2143 * @ccq: ccq sc struct
2144 * @scratch: u64 saved to be used during cqp completion
2145 * @post_sq: flag for cqp db to ring
2147 static enum i40iw_status_code
i40iw_sc_ccq_destroy(struct i40iw_sc_cq
*ccq
,
2151 struct i40iw_sc_cqp
*cqp
;
2154 enum i40iw_status_code ret_code
= 0;
2155 u32 tail
, val
, error
;
2157 cqp
= ccq
->dev
->cqp
;
2158 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2160 return I40IW_ERR_RING_FULL
;
2161 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
2162 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
2163 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
2165 header
= ccq
->cq_uk
.cq_id
|
2166 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
2167 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
2168 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2169 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2170 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2171 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2172 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2174 i40iw_insert_wqe_hdr(wqe
, header
);
2176 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_DESTROY WQE",
2177 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2179 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
2181 return I40IW_ERR_CQP_COMPL_ERROR
;
2184 i40iw_sc_cqp_post_sq(cqp
);
2185 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
2188 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
2194 * i40iw_sc_cq_init - initialize completion q
2196 * @info: cq initialization info
2198 static enum i40iw_status_code
i40iw_sc_cq_init(struct i40iw_sc_cq
*cq
,
2199 struct i40iw_cq_init_info
*info
)
2201 u32 __iomem
*cqe_alloc_reg
= NULL
;
2202 enum i40iw_status_code ret_code
;
2206 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2208 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
2209 return I40IW_ERR_INVALID_PBLE_INDEX
;
2211 cq
->cq_pa
= info
->cq_base_pa
;
2212 cq
->dev
= info
->dev
;
2213 cq
->ceq_id
= info
->ceq_id
;
2214 arm_offset
= (info
->dev
->is_pf
) ? I40E_PFPE_CQARM
: I40E_VFPE_CQARM1
;
2215 if (i40iw_get_hw_addr(cq
->dev
))
2216 cqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(cq
->dev
) +
2218 info
->cq_uk_init_info
.cqe_alloc_reg
= cqe_alloc_reg
;
2219 ret_code
= i40iw_cq_uk_init(&cq
->cq_uk
, &info
->cq_uk_init_info
);
2222 cq
->virtual_map
= info
->virtual_map
;
2223 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
2224 cq
->ceqe_mask
= info
->ceqe_mask
;
2225 cq
->cq_type
= (info
->type
) ? info
->type
: I40IW_CQ_TYPE_IWARP
;
2227 cq
->shadow_area_pa
= info
->shadow_area_pa
;
2228 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
2230 cq
->ceq_id_valid
= info
->ceq_id_valid
;
2231 cq
->tph_en
= info
->tph_en
;
2232 cq
->tph_val
= info
->tph_val
;
2234 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
2240 * i40iw_sc_cq_create - create completion q
2242 * @scratch: u64 saved to be used during cqp completion
2243 * @check_overflow: flag for overflow check
2244 * @post_sq: flag for cqp db to ring
2246 static enum i40iw_status_code
i40iw_sc_cq_create(struct i40iw_sc_cq
*cq
,
2248 bool check_overflow
,
2252 struct i40iw_sc_cqp
*cqp
;
2255 if (cq
->cq_uk
.cq_id
> I40IW_MAX_CQID
)
2256 return I40IW_ERR_INVALID_CQ_ID
;
2258 if (cq
->ceq_id
> I40IW_MAX_CEQID
)
2259 return I40IW_ERR_INVALID_CEQ_ID
;
2262 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2264 return I40IW_ERR_RING_FULL
;
2266 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
2267 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
2270 LS_64(cq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
2272 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
2274 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
2275 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
2276 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
2278 header
= cq
->cq_uk
.cq_id
|
2279 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
2280 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
2281 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2282 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
2283 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2284 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2285 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2286 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2287 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2288 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2290 i40iw_insert_wqe_hdr(wqe
, header
);
2292 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_CREATE WQE",
2293 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2296 i40iw_sc_cqp_post_sq(cqp
);
2301 * i40iw_sc_cq_destroy - destroy completion q
2303 * @scratch: u64 saved to be used during cqp completion
2304 * @post_sq: flag for cqp db to ring
2306 static enum i40iw_status_code
i40iw_sc_cq_destroy(struct i40iw_sc_cq
*cq
,
2310 struct i40iw_sc_cqp
*cqp
;
2315 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2317 return I40IW_ERR_RING_FULL
;
2318 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
2319 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
2320 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
2321 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
2323 header
= cq
->cq_uk
.cq_id
|
2324 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
2325 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
2326 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2327 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2328 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2329 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2330 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2331 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2332 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2334 i40iw_insert_wqe_hdr(wqe
, header
);
2336 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_DESTROY WQE",
2337 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2340 i40iw_sc_cqp_post_sq(cqp
);
2345 * i40iw_sc_cq_modify - modify a Completion Queue
2347 * @info: modification info struct
2349 * @post_sq: flag to post to sq
2351 static enum i40iw_status_code
i40iw_sc_cq_modify(struct i40iw_sc_cq
*cq
,
2352 struct i40iw_modify_cq_info
*info
,
2356 struct i40iw_sc_cqp
*cqp
;
2359 u32 cq_size
, ceq_id
, first_pm_pbl_idx
;
2361 bool virtual_map
, ceq_id_valid
, check_overflow
;
2364 if (info
->ceq_valid
&& (info
->ceq_id
> I40IW_MAX_CEQID
))
2365 return I40IW_ERR_INVALID_CEQ_ID
;
2367 pble_obj_cnt
= cq
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2369 if (info
->cq_resize
&& info
->virtual_map
&&
2370 (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
2371 return I40IW_ERR_INVALID_PBLE_INDEX
;
2374 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2376 return I40IW_ERR_RING_FULL
;
2378 cq
->pbl_list
= info
->pbl_list
;
2379 cq
->cq_pa
= info
->cq_pa
;
2380 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
2382 cq_size
= info
->cq_resize
? info
->cq_size
: cq
->cq_uk
.cq_size
;
2383 if (info
->ceq_change
) {
2384 ceq_id_valid
= true;
2385 ceq_id
= info
->ceq_id
;
2387 ceq_id_valid
= cq
->ceq_id_valid
;
2388 ceq_id
= ceq_id_valid
? cq
->ceq_id
: 0;
2390 virtual_map
= info
->cq_resize
? info
->virtual_map
: cq
->virtual_map
;
2391 first_pm_pbl_idx
= (info
->cq_resize
?
2392 (info
->virtual_map
? info
->first_pm_pbl_idx
: 0) :
2393 (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
2394 pbl_chunk_size
= (info
->cq_resize
?
2395 (info
->virtual_map
? info
->pbl_chunk_size
: 0) :
2396 (cq
->virtual_map
? cq
->pbl_chunk_size
: 0));
2397 check_overflow
= info
->check_overflow_change
? info
->check_overflow
:
2399 cq
->cq_uk
.cq_size
= cq_size
;
2400 cq
->ceq_id_valid
= ceq_id_valid
;
2401 cq
->ceq_id
= ceq_id
;
2402 cq
->virtual_map
= virtual_map
;
2403 cq
->first_pm_pbl_idx
= first_pm_pbl_idx
;
2404 cq
->pbl_chunk_size
= pbl_chunk_size
;
2405 cq
->check_overflow
= check_overflow
;
2407 set_64bit_val(wqe
, 0, cq_size
);
2408 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
2409 set_64bit_val(wqe
, 16,
2410 LS_64(info
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
2411 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
2412 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
2413 set_64bit_val(wqe
, 48, (cq
->virtual_map
? first_pm_pbl_idx
: 0));
2414 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
2416 header
= cq
->cq_uk
.cq_id
|
2417 LS_64(ceq_id
, I40IW_CQPSQ_CQ_CEQID
) |
2418 LS_64(I40IW_CQP_OP_MODIFY_CQ
, I40IW_CQPSQ_OPCODE
) |
2419 LS_64(info
->cq_resize
, I40IW_CQPSQ_CQ_CQRESIZE
) |
2420 LS_64(pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2421 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
2422 LS_64(virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2423 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2424 LS_64(ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2425 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2426 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2427 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2429 i40iw_insert_wqe_hdr(wqe
, header
);
2431 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_MODIFY WQE",
2432 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2435 i40iw_sc_cqp_post_sq(cqp
);
2440 * i40iw_sc_qp_init - initialize qp
2442 * @info: initialization qp info
2444 static enum i40iw_status_code
i40iw_sc_qp_init(struct i40iw_sc_qp
*qp
,
2445 struct i40iw_qp_init_info
*info
)
2447 u32 __iomem
*wqe_alloc_reg
= NULL
;
2448 enum i40iw_status_code ret_code
;
2453 qp
->dev
= info
->pd
->dev
;
2454 qp
->vsi
= info
->vsi
;
2455 qp
->sq_pa
= info
->sq_pa
;
2456 qp
->rq_pa
= info
->rq_pa
;
2457 qp
->hw_host_ctx_pa
= info
->host_ctx_pa
;
2458 qp
->q2_pa
= info
->q2_pa
;
2459 qp
->shadow_area_pa
= info
->shadow_area_pa
;
2461 qp
->q2_buf
= info
->q2
;
2463 qp
->hw_host_ctx
= info
->host_ctx
;
2464 offset
= (qp
->pd
->dev
->is_pf
) ? I40E_PFPE_WQEALLOC
: I40E_VFPE_WQEALLOC1
;
2465 if (i40iw_get_hw_addr(qp
->pd
->dev
))
2466 wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
2469 info
->qp_uk_init_info
.wqe_alloc_reg
= wqe_alloc_reg
;
2470 info
->qp_uk_init_info
.abi_ver
= qp
->pd
->abi_ver
;
2471 ret_code
= i40iw_qp_uk_init(&qp
->qp_uk
, &info
->qp_uk_init_info
);
2474 qp
->virtual_map
= info
->virtual_map
;
2476 pble_obj_cnt
= info
->pd
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2478 if ((info
->virtual_map
&& (info
->sq_pa
>= pble_obj_cnt
)) ||
2479 (info
->virtual_map
&& (info
->rq_pa
>= pble_obj_cnt
)))
2480 return I40IW_ERR_INVALID_PBLE_INDEX
;
2482 qp
->llp_stream_handle
= (void *)(-1);
2483 qp
->qp_type
= (info
->type
) ? info
->type
: I40IW_QP_TYPE_IWARP
;
2485 qp
->hw_sq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.sq_ring
.size
,
2487 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2488 __func__
, qp
->hw_sq_size
, qp
->qp_uk
.sq_ring
.size
);
2490 switch (qp
->pd
->abi_ver
) {
2492 ret_code
= i40iw_fragcnt_to_wqesize_rq(qp
->qp_uk
.max_rq_frag_cnt
,
2497 case 5: /* fallthrough until next ABI version */
2499 if (qp
->qp_uk
.max_rq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
2500 return I40IW_ERR_INVALID_FRAG_COUNT
;
2501 wqe_size
= I40IW_MAX_WQE_SIZE_RQ
;
2504 qp
->hw_rq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.rq_size
*
2505 (wqe_size
/ I40IW_QP_WQE_MIN_SIZE
), false);
2506 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
,
2507 "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2508 __func__
, qp
->hw_rq_size
, qp
->qp_uk
.rq_size
, wqe_size
);
2509 qp
->sq_tph_val
= info
->sq_tph_val
;
2510 qp
->rq_tph_val
= info
->rq_tph_val
;
2511 qp
->sq_tph_en
= info
->sq_tph_en
;
2512 qp
->rq_tph_en
= info
->rq_tph_en
;
2513 qp
->rcv_tph_en
= info
->rcv_tph_en
;
2514 qp
->xmit_tph_en
= info
->xmit_tph_en
;
2515 qp
->qs_handle
= qp
->vsi
->qos
[qp
->user_pri
].qs_handle
;
2521 * i40iw_sc_qp_create - create qp
2523 * @info: qp create info
2524 * @scratch: u64 saved to be used during cqp completion
2525 * @post_sq: flag for cqp db to ring
2527 static enum i40iw_status_code
i40iw_sc_qp_create(
2528 struct i40iw_sc_qp
*qp
,
2529 struct i40iw_create_qp_info
*info
,
2533 struct i40iw_sc_cqp
*cqp
;
2537 if ((qp
->qp_uk
.qp_id
< I40IW_MIN_IW_QP_ID
) ||
2538 (qp
->qp_uk
.qp_id
> I40IW_MAX_IW_QP_ID
))
2539 return I40IW_ERR_INVALID_QP_ID
;
2541 cqp
= qp
->pd
->dev
->cqp
;
2542 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2544 return I40IW_ERR_RING_FULL
;
2546 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2548 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2550 header
= qp
->qp_uk
.qp_id
|
2551 LS_64(I40IW_CQP_OP_CREATE_QP
, I40IW_CQPSQ_OPCODE
) |
2552 LS_64((info
->ord_valid
? 1 : 0), I40IW_CQPSQ_QP_ORDVALID
) |
2553 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2554 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2555 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2556 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2557 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2558 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2559 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2561 i40iw_insert_wqe_hdr(wqe
, header
);
2562 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_CREATE WQE",
2563 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2566 i40iw_sc_cqp_post_sq(cqp
);
2571 * i40iw_sc_qp_modify - modify qp cqp wqe
2573 * @info: modify qp info
2574 * @scratch: u64 saved to be used during cqp completion
2575 * @post_sq: flag for cqp db to ring
2577 static enum i40iw_status_code
i40iw_sc_qp_modify(
2578 struct i40iw_sc_qp
*qp
,
2579 struct i40iw_modify_qp_info
*info
,
2584 struct i40iw_sc_cqp
*cqp
;
2586 u8 term_actions
= 0;
2589 cqp
= qp
->pd
->dev
->cqp
;
2590 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2592 return I40IW_ERR_RING_FULL
;
2593 if (info
->next_iwarp_state
== I40IW_QP_STATE_TERMINATE
) {
2594 if (info
->dont_send_fin
)
2595 term_actions
+= I40IWQP_TERM_SEND_TERM_ONLY
;
2596 if (info
->dont_send_term
)
2597 term_actions
+= I40IWQP_TERM_SEND_FIN_ONLY
;
2598 if ((term_actions
== I40IWQP_TERM_SEND_TERM_AND_FIN
) ||
2599 (term_actions
== I40IWQP_TERM_SEND_TERM_ONLY
))
2600 term_len
= info
->termlen
;
2605 LS_64(term_len
, I40IW_CQPSQ_QP_TERMLEN
));
2607 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2608 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2610 header
= qp
->qp_uk
.qp_id
|
2611 LS_64(I40IW_CQP_OP_MODIFY_QP
, I40IW_CQPSQ_OPCODE
) |
2612 LS_64(info
->ord_valid
, I40IW_CQPSQ_QP_ORDVALID
) |
2613 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2614 LS_64(info
->cached_var_valid
, I40IW_CQPSQ_QP_CACHEDVARVALID
) |
2615 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2616 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2617 LS_64(info
->force_loopback
, I40IW_CQPSQ_QP_FORCELOOPBACK
) |
2618 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2619 LS_64(info
->remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2620 LS_64(term_actions
, I40IW_CQPSQ_QP_TERMACT
) |
2621 LS_64(info
->reset_tcp_conn
, I40IW_CQPSQ_QP_RESETCON
) |
2622 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2623 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2624 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2626 i40iw_insert_wqe_hdr(wqe
, header
);
2628 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_MODIFY WQE",
2629 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2632 i40iw_sc_cqp_post_sq(cqp
);
2637 * i40iw_sc_qp_destroy - cqp destroy qp
2639 * @scratch: u64 saved to be used during cqp completion
2640 * @remove_hash_idx: flag if to remove hash idx
2641 * @ignore_mw_bnd: memory window bind flag
2642 * @post_sq: flag for cqp db to ring
2644 static enum i40iw_status_code
i40iw_sc_qp_destroy(
2645 struct i40iw_sc_qp
*qp
,
2647 bool remove_hash_idx
,
2652 struct i40iw_sc_cqp
*cqp
;
2655 i40iw_qp_rem_qos(qp
);
2656 cqp
= qp
->pd
->dev
->cqp
;
2657 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2659 return I40IW_ERR_RING_FULL
;
2660 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2661 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2663 header
= qp
->qp_uk
.qp_id
|
2664 LS_64(I40IW_CQP_OP_DESTROY_QP
, I40IW_CQPSQ_OPCODE
) |
2665 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2666 LS_64(ignore_mw_bnd
, I40IW_CQPSQ_QP_IGNOREMWBOUND
) |
2667 LS_64(remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2668 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2670 i40iw_insert_wqe_hdr(wqe
, header
);
2671 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_DESTROY WQE",
2672 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2675 i40iw_sc_cqp_post_sq(cqp
);
2680 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2682 * @info: dlush information
2683 * @scratch: u64 saved to be used during cqp completion
2684 * @post_sq: flag for cqp db to ring
2686 static enum i40iw_status_code
i40iw_sc_qp_flush_wqes(
2687 struct i40iw_sc_qp
*qp
,
2688 struct i40iw_qp_flush_info
*info
,
2694 struct i40iw_sc_cqp
*cqp
;
2696 bool flush_sq
= false, flush_rq
= false;
2698 if (info
->rq
&& !qp
->flush_rq
)
2701 if (info
->sq
&& !qp
->flush_sq
)
2704 qp
->flush_sq
|= flush_sq
;
2705 qp
->flush_rq
|= flush_rq
;
2706 if (!flush_sq
&& !flush_rq
)
2709 cqp
= qp
->pd
->dev
->cqp
;
2710 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2712 return I40IW_ERR_RING_FULL
;
2713 if (info
->userflushcode
) {
2715 temp
|= LS_64(info
->rq_minor_code
, I40IW_CQPSQ_FWQE_RQMNERR
) |
2716 LS_64(info
->rq_major_code
, I40IW_CQPSQ_FWQE_RQMJERR
);
2719 temp
|= LS_64(info
->sq_minor_code
, I40IW_CQPSQ_FWQE_SQMNERR
) |
2720 LS_64(info
->sq_major_code
, I40IW_CQPSQ_FWQE_SQMJERR
);
2723 set_64bit_val(wqe
, 16, temp
);
2725 temp
= (info
->generate_ae
) ?
2726 info
->ae_code
| LS_64(info
->ae_source
, I40IW_CQPSQ_FWQE_AESOURCE
) : 0;
2728 set_64bit_val(wqe
, 8, temp
);
2730 header
= qp
->qp_uk
.qp_id
|
2731 LS_64(I40IW_CQP_OP_FLUSH_WQES
, I40IW_CQPSQ_OPCODE
) |
2732 LS_64(info
->generate_ae
, I40IW_CQPSQ_FWQE_GENERATE_AE
) |
2733 LS_64(info
->userflushcode
, I40IW_CQPSQ_FWQE_USERFLCODE
) |
2734 LS_64(flush_sq
, I40IW_CQPSQ_FWQE_FLUSHSQ
) |
2735 LS_64(flush_rq
, I40IW_CQPSQ_FWQE_FLUSHRQ
) |
2736 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2738 i40iw_insert_wqe_hdr(wqe
, header
);
2740 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_FLUSH WQE",
2741 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2744 i40iw_sc_cqp_post_sq(cqp
);
2749 * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP
2751 * @info: gen ae information
2752 * @scratch: u64 saved to be used during cqp completion
2753 * @post_sq: flag for cqp db to ring
2755 static enum i40iw_status_code
i40iw_sc_gen_ae(
2756 struct i40iw_sc_qp
*qp
,
2757 struct i40iw_gen_ae_info
*info
,
2763 struct i40iw_sc_cqp
*cqp
;
2766 cqp
= qp
->pd
->dev
->cqp
;
2767 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2769 return I40IW_ERR_RING_FULL
;
2771 temp
= info
->ae_code
|
2772 LS_64(info
->ae_source
, I40IW_CQPSQ_FWQE_AESOURCE
);
2774 set_64bit_val(wqe
, 8, temp
);
2776 header
= qp
->qp_uk
.qp_id
|
2777 LS_64(I40IW_CQP_OP_GEN_AE
, I40IW_CQPSQ_OPCODE
) |
2778 LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE
) |
2779 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2781 i40iw_insert_wqe_hdr(wqe
, header
);
2783 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "GEN_AE WQE",
2784 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2787 i40iw_sc_cqp_post_sq(cqp
);
2792 * i40iw_sc_qp_upload_context - upload qp's context
2793 * @dev: sc device struct
2794 * @info: upload context info ptr for return
2795 * @scratch: u64 saved to be used during cqp completion
2796 * @post_sq: flag for cqp db to ring
2798 static enum i40iw_status_code
i40iw_sc_qp_upload_context(
2799 struct i40iw_sc_dev
*dev
,
2800 struct i40iw_upload_context_info
*info
,
2805 struct i40iw_sc_cqp
*cqp
;
2809 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2811 return I40IW_ERR_RING_FULL
;
2812 set_64bit_val(wqe
, 16, info
->buf_pa
);
2814 header
= LS_64(info
->qp_id
, I40IW_CQPSQ_UCTX_QPID
) |
2815 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT
, I40IW_CQPSQ_OPCODE
) |
2816 LS_64(info
->qp_type
, I40IW_CQPSQ_UCTX_QPTYPE
) |
2817 LS_64(info
->raw_format
, I40IW_CQPSQ_UCTX_RAWFORMAT
) |
2818 LS_64(info
->freeze_qp
, I40IW_CQPSQ_UCTX_FREEZEQP
) |
2819 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2821 i40iw_insert_wqe_hdr(wqe
, header
);
2823 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QP_UPLOAD_CTX WQE",
2824 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2827 i40iw_sc_cqp_post_sq(cqp
);
2832 * i40iw_sc_qp_setctx - set qp's context
2834 * @qp_ctx: context ptr
2837 static enum i40iw_status_code
i40iw_sc_qp_setctx(
2838 struct i40iw_sc_qp
*qp
,
2840 struct i40iw_qp_host_ctx_info
*info
)
2842 struct i40iwarp_offload_info
*iw
;
2843 struct i40iw_tcp_offload_info
*tcp
;
2844 struct i40iw_sc_vsi
*vsi
;
2845 struct i40iw_sc_dev
*dev
;
2846 u64 qw0
, qw3
, qw7
= 0;
2848 iw
= info
->iwarp_info
;
2849 tcp
= info
->tcp_info
;
2852 if (info
->add_to_qoslist
) {
2853 qp
->user_pri
= info
->user_pri
;
2854 i40iw_qp_add_qos(qp
);
2855 i40iw_debug(qp
->dev
, I40IW_DEBUG_DCB
, "%s qp[%d] UP[%d] qset[%d]\n",
2856 __func__
, qp
->qp_uk
.qp_id
, qp
->user_pri
, qp
->qs_handle
);
2858 qw0
= LS_64(qp
->qp_uk
.rq_wqe_size
, I40IWQPC_RQWQESIZE
) |
2859 LS_64(info
->err_rq_idx_valid
, I40IWQPC_ERR_RQ_IDX_VALID
) |
2860 LS_64(qp
->rcv_tph_en
, I40IWQPC_RCVTPHEN
) |
2861 LS_64(qp
->xmit_tph_en
, I40IWQPC_XMITTPHEN
) |
2862 LS_64(qp
->rq_tph_en
, I40IWQPC_RQTPHEN
) |
2863 LS_64(qp
->sq_tph_en
, I40IWQPC_SQTPHEN
) |
2864 LS_64(info
->push_idx
, I40IWQPC_PPIDX
) |
2865 LS_64(info
->push_mode_en
, I40IWQPC_PMENA
);
2867 set_64bit_val(qp_ctx
, 8, qp
->sq_pa
);
2868 set_64bit_val(qp_ctx
, 16, qp
->rq_pa
);
2870 qw3
= LS_64(qp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2871 LS_64(qp
->hw_rq_size
, I40IWQPC_RQSIZE
) |
2872 LS_64(qp
->hw_sq_size
, I40IWQPC_SQSIZE
);
2874 set_64bit_val(qp_ctx
,
2876 LS_64(info
->err_rq_idx
, I40IWQPC_ERR_RQ_IDX
));
2878 set_64bit_val(qp_ctx
,
2880 LS_64(info
->send_cq_num
, I40IWQPC_TXCQNUM
) |
2881 LS_64(info
->rcv_cq_num
, I40IWQPC_RXCQNUM
));
2883 set_64bit_val(qp_ctx
,
2885 LS_64(info
->qp_compl_ctx
, I40IWQPC_QPCOMPCTX
));
2886 set_64bit_val(qp_ctx
,
2888 LS_64(qp
->sq_tph_val
, I40IWQPC_SQTPHVAL
) |
2889 LS_64(qp
->rq_tph_val
, I40IWQPC_RQTPHVAL
) |
2890 LS_64(qp
->qs_handle
, I40IWQPC_QSHANDLE
) |
2891 LS_64(vsi
->exception_lan_queue
, I40IWQPC_EXCEPTION_LAN_QUEUE
));
2893 if (info
->iwarp_info_valid
) {
2894 qw0
|= LS_64(iw
->ddp_ver
, I40IWQPC_DDP_VER
) |
2895 LS_64(iw
->rdmap_ver
, I40IWQPC_RDMAP_VER
);
2897 qw7
|= LS_64(iw
->pd_id
, I40IWQPC_PDIDX
);
2898 set_64bit_val(qp_ctx
,
2900 LS_64(qp
->q2_pa
, I40IWQPC_Q2ADDR
) |
2901 LS_64(vsi
->fcn_id
, I40IWQPC_STAT_INDEX
));
2902 set_64bit_val(qp_ctx
,
2904 LS_64(iw
->last_byte_sent
, I40IWQPC_LASTBYTESENT
));
2906 set_64bit_val(qp_ctx
,
2908 LS_64(iw
->ord_size
, I40IWQPC_ORDSIZE
) |
2909 LS_64(iw
->ird_size
, I40IWQPC_IRDSIZE
) |
2910 LS_64(iw
->wr_rdresp_en
, I40IWQPC_WRRDRSPOK
) |
2911 LS_64(iw
->rd_enable
, I40IWQPC_RDOK
) |
2912 LS_64(iw
->snd_mark_en
, I40IWQPC_SNDMARKERS
) |
2913 LS_64(iw
->bind_en
, I40IWQPC_BINDEN
) |
2914 LS_64(iw
->fast_reg_en
, I40IWQPC_FASTREGEN
) |
2915 LS_64(iw
->priv_mode_en
, I40IWQPC_PRIVEN
) |
2916 LS_64((((vsi
->stats_fcn_id_alloc
) &&
2917 (dev
->is_pf
) && (vsi
->fcn_id
>= I40IW_FIRST_NON_PF_STAT
)) ? 1 : 0),
2918 I40IWQPC_USESTATSINSTANCE
) |
2919 LS_64(1, I40IWQPC_IWARPMODE
) |
2920 LS_64(iw
->rcv_mark_en
, I40IWQPC_RCVMARKERS
) |
2921 LS_64(iw
->align_hdrs
, I40IWQPC_ALIGNHDRS
) |
2922 LS_64(iw
->rcv_no_mpa_crc
, I40IWQPC_RCVNOMPACRC
) |
2923 LS_64(iw
->rcv_mark_offset
, I40IWQPC_RCVMARKOFFSET
) |
2924 LS_64(iw
->snd_mark_offset
, I40IWQPC_SNDMARKOFFSET
));
2926 if (info
->tcp_info_valid
) {
2927 qw0
|= LS_64(tcp
->ipv4
, I40IWQPC_IPV4
) |
2928 LS_64(tcp
->no_nagle
, I40IWQPC_NONAGLE
) |
2929 LS_64(tcp
->insert_vlan_tag
, I40IWQPC_INSERTVLANTAG
) |
2930 LS_64(tcp
->time_stamp
, I40IWQPC_TIMESTAMP
) |
2931 LS_64(tcp
->cwnd_inc_limit
, I40IWQPC_LIMIT
) |
2932 LS_64(tcp
->drop_ooo_seg
, I40IWQPC_DROPOOOSEG
) |
2933 LS_64(tcp
->dup_ack_thresh
, I40IWQPC_DUPACK_THRESH
);
2935 qw3
|= LS_64(tcp
->ttl
, I40IWQPC_TTL
) |
2936 LS_64(tcp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2937 LS_64(tcp
->avoid_stretch_ack
, I40IWQPC_AVOIDSTRETCHACK
) |
2938 LS_64(tcp
->tos
, I40IWQPC_TOS
) |
2939 LS_64(tcp
->src_port
, I40IWQPC_SRCPORTNUM
) |
2940 LS_64(tcp
->dst_port
, I40IWQPC_DESTPORTNUM
);
2942 qp
->src_mac_addr_idx
= tcp
->src_mac_addr_idx
;
2943 set_64bit_val(qp_ctx
,
2945 LS_64(tcp
->dest_ip_addr2
, I40IWQPC_DESTIPADDR2
) |
2946 LS_64(tcp
->dest_ip_addr3
, I40IWQPC_DESTIPADDR3
));
2948 set_64bit_val(qp_ctx
,
2950 LS_64(tcp
->dest_ip_addr0
, I40IWQPC_DESTIPADDR0
) |
2951 LS_64(tcp
->dest_ip_addr1
, I40IWQPC_DESTIPADDR1
));
2953 set_64bit_val(qp_ctx
,
2955 LS_64(tcp
->snd_mss
, I40IWQPC_SNDMSS
) |
2956 LS_64(tcp
->vlan_tag
, I40IWQPC_VLANTAG
) |
2957 LS_64(tcp
->arp_idx
, I40IWQPC_ARPIDX
));
2959 qw7
|= LS_64(tcp
->flow_label
, I40IWQPC_FLOWLABEL
) |
2960 LS_64(tcp
->wscale
, I40IWQPC_WSCALE
) |
2961 LS_64(tcp
->ignore_tcp_opt
, I40IWQPC_IGNORE_TCP_OPT
) |
2962 LS_64(tcp
->ignore_tcp_uns_opt
, I40IWQPC_IGNORE_TCP_UNS_OPT
) |
2963 LS_64(tcp
->tcp_state
, I40IWQPC_TCPSTATE
) |
2964 LS_64(tcp
->rcv_wscale
, I40IWQPC_RCVSCALE
) |
2965 LS_64(tcp
->snd_wscale
, I40IWQPC_SNDSCALE
);
2967 set_64bit_val(qp_ctx
,
2969 LS_64(tcp
->time_stamp_recent
, I40IWQPC_TIMESTAMP_RECENT
) |
2970 LS_64(tcp
->time_stamp_age
, I40IWQPC_TIMESTAMP_AGE
));
2971 set_64bit_val(qp_ctx
,
2973 LS_64(tcp
->snd_nxt
, I40IWQPC_SNDNXT
) |
2974 LS_64(tcp
->snd_wnd
, I40IWQPC_SNDWND
));
2976 set_64bit_val(qp_ctx
,
2978 LS_64(tcp
->rcv_nxt
, I40IWQPC_RCVNXT
) |
2979 LS_64(tcp
->rcv_wnd
, I40IWQPC_RCVWND
));
2980 set_64bit_val(qp_ctx
,
2982 LS_64(tcp
->snd_max
, I40IWQPC_SNDMAX
) |
2983 LS_64(tcp
->snd_una
, I40IWQPC_SNDUNA
));
2984 set_64bit_val(qp_ctx
,
2986 LS_64(tcp
->srtt
, I40IWQPC_SRTT
) |
2987 LS_64(tcp
->rtt_var
, I40IWQPC_RTTVAR
));
2988 set_64bit_val(qp_ctx
,
2990 LS_64(tcp
->ss_thresh
, I40IWQPC_SSTHRESH
) |
2991 LS_64(tcp
->cwnd
, I40IWQPC_CWND
));
2992 set_64bit_val(qp_ctx
,
2994 LS_64(tcp
->snd_wl1
, I40IWQPC_SNDWL1
) |
2995 LS_64(tcp
->snd_wl2
, I40IWQPC_SNDWL2
));
2996 set_64bit_val(qp_ctx
,
2998 LS_64(tcp
->max_snd_window
, I40IWQPC_MAXSNDWND
) |
2999 LS_64(tcp
->rexmit_thresh
, I40IWQPC_REXMIT_THRESH
));
3000 set_64bit_val(qp_ctx
,
3002 LS_64(tcp
->local_ipaddr3
, I40IWQPC_LOCAL_IPADDR3
) |
3003 LS_64(tcp
->local_ipaddr2
, I40IWQPC_LOCAL_IPADDR2
));
3004 set_64bit_val(qp_ctx
,
3006 LS_64(tcp
->local_ipaddr1
, I40IWQPC_LOCAL_IPADDR1
) |
3007 LS_64(tcp
->local_ipaddr0
, I40IWQPC_LOCAL_IPADDR0
));
3010 set_64bit_val(qp_ctx
, 0, qw0
);
3011 set_64bit_val(qp_ctx
, 24, qw3
);
3012 set_64bit_val(qp_ctx
, 56, qw7
);
3014 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "QP_HOST)CTX WQE",
3015 qp_ctx
, I40IW_QP_CTX_SIZE
);
3020 * i40iw_sc_alloc_stag - mr stag alloc
3021 * @dev: sc device struct
3023 * @scratch: u64 saved to be used during cqp completion
3024 * @post_sq: flag for cqp db to ring
3026 static enum i40iw_status_code
i40iw_sc_alloc_stag(
3027 struct i40iw_sc_dev
*dev
,
3028 struct i40iw_allocate_stag_info
*info
,
3033 struct i40iw_sc_cqp
*cqp
;
3035 enum i40iw_page_size page_size
;
3037 page_size
= (info
->page_size
== 0x200000) ? I40IW_PAGE_SIZE_2M
: I40IW_PAGE_SIZE_4K
;
3039 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3041 return I40IW_ERR_RING_FULL
;
3044 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
) |
3045 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
));
3048 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
3051 LS_64(info
->hmc_fcn_index
, I40IW_CQPSQ_STAG_HMCFNIDX
));
3053 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
3054 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
3055 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
3056 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
3057 LS_64(page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
3058 LS_64(info
->remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
3059 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
3060 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
3061 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3063 i40iw_insert_wqe_hdr(wqe
, header
);
3065 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "ALLOC_STAG WQE",
3066 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3069 i40iw_sc_cqp_post_sq(cqp
);
3074 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
3075 * @dev: sc device struct
3077 * @scratch: u64 saved to be used during cqp completion
3078 * @post_sq: flag for cqp db to ring
3080 static enum i40iw_status_code
i40iw_sc_mr_reg_non_shared(
3081 struct i40iw_sc_dev
*dev
,
3082 struct i40iw_reg_ns_stag_info
*info
,
3088 struct i40iw_sc_cqp
*cqp
;
3093 enum i40iw_page_size page_size
;
3095 page_size
= (info
->page_size
== 0x200000) ? I40IW_PAGE_SIZE_2M
: I40IW_PAGE_SIZE_4K
;
3096 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
3097 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
3098 remote_access
= true;
3100 remote_access
= false;
3102 pble_obj_cnt
= dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
3104 if (info
->chunk_size
&& (info
->first_pm_pbl_index
>= pble_obj_cnt
))
3105 return I40IW_ERR_INVALID_PBLE_INDEX
;
3108 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3110 return I40IW_ERR_RING_FULL
;
3112 temp
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? (uintptr_t)info
->va
: info
->fbo
;
3113 set_64bit_val(wqe
, 0, temp
);
3117 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
) |
3118 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
3122 LS_64(info
->stag_key
, I40IW_CQPSQ_STAG_KEY
) |
3123 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
3124 if (!info
->chunk_size
) {
3125 set_64bit_val(wqe
, 32, info
->reg_addr_pa
);
3126 set_64bit_val(wqe
, 48, 0);
3128 set_64bit_val(wqe
, 32, 0);
3129 set_64bit_val(wqe
, 48, info
->first_pm_pbl_index
);
3131 set_64bit_val(wqe
, 40, info
->hmc_fcn_index
);
3132 set_64bit_val(wqe
, 56, 0);
3134 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
3135 header
= LS_64(I40IW_CQP_OP_REG_MR
, I40IW_CQPSQ_OPCODE
) |
3136 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
3137 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
3138 LS_64(page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
3139 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
3140 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
3141 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
3142 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
3143 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
3144 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3146 i40iw_insert_wqe_hdr(wqe
, header
);
3148 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_NS WQE",
3149 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3152 i40iw_sc_cqp_post_sq(cqp
);
3157 * i40iw_sc_mr_reg_shared - registered shared memory region
3158 * @dev: sc device struct
3159 * @info: info for shared memory registeration
3160 * @scratch: u64 saved to be used during cqp completion
3161 * @post_sq: flag for cqp db to ring
3163 static enum i40iw_status_code
i40iw_sc_mr_reg_shared(
3164 struct i40iw_sc_dev
*dev
,
3165 struct i40iw_register_shared_stag
*info
,
3170 struct i40iw_sc_cqp
*cqp
;
3171 u64 temp
, va64
, fbo
, header
;
3176 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
3177 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
3178 remote_access
= true;
3180 remote_access
= false;
3182 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3184 return I40IW_ERR_RING_FULL
;
3185 va64
= (uintptr_t)(info
->va
);
3186 va32
= (u32
)(va64
& 0x00000000FFFFFFFF);
3187 fbo
= (u64
)(va32
& (4096 - 1));
3191 (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
? (uintptr_t)info
->va
: fbo
));
3195 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
3196 temp
= LS_64(info
->new_stag_key
, I40IW_CQPSQ_STAG_KEY
) |
3197 LS_64(info
->new_stag_idx
, I40IW_CQPSQ_STAG_IDX
) |
3198 LS_64(info
->parent_stag_idx
, I40IW_CQPSQ_STAG_PARENTSTAGIDX
);
3199 set_64bit_val(wqe
, 16, temp
);
3201 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
3202 header
= LS_64(I40IW_CQP_OP_REG_SMR
, I40IW_CQPSQ_OPCODE
) |
3203 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
3204 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
3205 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
3206 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
3207 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3209 i40iw_insert_wqe_hdr(wqe
, header
);
3211 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_SHARED WQE",
3212 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3215 i40iw_sc_cqp_post_sq(cqp
);
3220 * i40iw_sc_dealloc_stag - deallocate stag
3221 * @dev: sc device struct
3222 * @info: dealloc stag info
3223 * @scratch: u64 saved to be used during cqp completion
3224 * @post_sq: flag for cqp db to ring
3226 static enum i40iw_status_code
i40iw_sc_dealloc_stag(
3227 struct i40iw_sc_dev
*dev
,
3228 struct i40iw_dealloc_stag_info
*info
,
3234 struct i40iw_sc_cqp
*cqp
;
3237 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3239 return I40IW_ERR_RING_FULL
;
3242 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
3245 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
3247 header
= LS_64(I40IW_CQP_OP_DEALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
3248 LS_64(info
->mr
, I40IW_CQPSQ_STAG_MR
) |
3249 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3251 i40iw_insert_wqe_hdr(wqe
, header
);
3253 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "DEALLOC_STAG WQE",
3254 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3257 i40iw_sc_cqp_post_sq(cqp
);
3262 * i40iw_sc_query_stag - query hardware for stag
3263 * @dev: sc device struct
3264 * @scratch: u64 saved to be used during cqp completion
3265 * @stag_index: stag index for query
3266 * @post_sq: flag for cqp db to ring
3268 static enum i40iw_status_code
i40iw_sc_query_stag(struct i40iw_sc_dev
*dev
,
3275 struct i40iw_sc_cqp
*cqp
;
3278 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3280 return I40IW_ERR_RING_FULL
;
3283 LS_64(stag_index
, I40IW_CQPSQ_QUERYSTAG_IDX
));
3285 header
= LS_64(I40IW_CQP_OP_QUERY_STAG
, I40IW_CQPSQ_OPCODE
) |
3286 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3288 i40iw_insert_wqe_hdr(wqe
, header
);
3290 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QUERY_STAG WQE",
3291 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3294 i40iw_sc_cqp_post_sq(cqp
);
3299 * i40iw_sc_mw_alloc - mw allocate
3300 * @dev: sc device struct
3301 * @scratch: u64 saved to be used during cqp completion
3302 * @mw_stag_index:stag index
3303 * @pd_id: pd is for this mw
3304 * @post_sq: flag for cqp db to ring
3306 static enum i40iw_status_code
i40iw_sc_mw_alloc(
3307 struct i40iw_sc_dev
*dev
,
3314 struct i40iw_sc_cqp
*cqp
;
3318 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3320 return I40IW_ERR_RING_FULL
;
3321 set_64bit_val(wqe
, 8, LS_64(pd_id
, I40IW_CQPSQ_STAG_PDID
));
3324 LS_64(mw_stag_index
, I40IW_CQPSQ_STAG_IDX
));
3326 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
3327 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3329 i40iw_insert_wqe_hdr(wqe
, header
);
3331 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MW_ALLOC WQE",
3332 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3335 i40iw_sc_cqp_post_sq(cqp
);
3340 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3342 * @info: fast mr info
3343 * @post_sq: flag for cqp db to ring
3345 enum i40iw_status_code
i40iw_sc_mr_fast_register(
3346 struct i40iw_sc_qp
*qp
,
3347 struct i40iw_fast_reg_stag_info
*info
,
3353 enum i40iw_page_size page_size
;
3355 page_size
= (info
->page_size
== 0x200000) ? I40IW_PAGE_SIZE_2M
: I40IW_PAGE_SIZE_4K
;
3356 wqe
= i40iw_qp_get_next_send_wqe(&qp
->qp_uk
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
,
3359 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
3361 i40iw_debug(qp
->dev
, I40IW_DEBUG_MR
, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3362 __func__
, info
->wr_id
, wqe_idx
,
3363 &qp
->qp_uk
.sq_wrtrk_array
[wqe_idx
].wrid
);
3364 temp
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? (uintptr_t)info
->va
: info
->fbo
;
3365 set_64bit_val(wqe
, 0, temp
);
3367 temp
= RS_64(info
->first_pm_pbl_index
>> 16, I40IWQPSQ_FIRSTPMPBLIDXHI
);
3370 LS_64(temp
, I40IWQPSQ_FIRSTPMPBLIDXHI
) |
3371 LS_64(info
->reg_addr_pa
>> I40IWQPSQ_PBLADDR_SHIFT
, I40IWQPSQ_PBLADDR
));
3376 LS_64(info
->first_pm_pbl_index
, I40IWQPSQ_FIRSTPMPBLIDXLO
));
3378 header
= LS_64(info
->stag_key
, I40IWQPSQ_STAGKEY
) |
3379 LS_64(info
->stag_idx
, I40IWQPSQ_STAGINDEX
) |
3380 LS_64(I40IWQP_OP_FAST_REGISTER
, I40IWQPSQ_OPCODE
) |
3381 LS_64(info
->chunk_size
, I40IWQPSQ_LPBLSIZE
) |
3382 LS_64(page_size
, I40IWQPSQ_HPAGESIZE
) |
3383 LS_64(info
->access_rights
, I40IWQPSQ_STAGRIGHTS
) |
3384 LS_64(info
->addr_type
, I40IWQPSQ_VABASEDTO
) |
3385 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
3386 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
3387 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
3388 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3390 i40iw_insert_wqe_hdr(wqe
, header
);
3392 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "FAST_REG WQE",
3393 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3396 i40iw_qp_post_wr(&qp
->qp_uk
);
3401 * i40iw_sc_send_lsmm - send last streaming mode message
3403 * @lsmm_buf: buffer with lsmm message
3404 * @size: size of lsmm buffer
3405 * @stag: stag of lsmm buffer
3407 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp
*qp
,
3414 struct i40iw_qp_uk
*qp_uk
;
3417 wqe
= qp_uk
->sq_base
->elem
;
3419 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
3421 set_64bit_val(wqe
, 8, (size
| LS_64(stag
, I40IWQPSQ_FRAG_STAG
)));
3423 set_64bit_val(wqe
, 16, 0);
3425 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3426 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3427 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
3428 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3430 i40iw_insert_wqe_hdr(wqe
, header
);
3432 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_QP
, "SEND_LSMM WQE",
3433 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3437 * i40iw_sc_send_lsmm_nostag - for privilege qp
3439 * @lsmm_buf: buffer with lsmm message
3440 * @size: size of lsmm buffer
3442 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp
*qp
,
3448 struct i40iw_qp_uk
*qp_uk
;
3451 wqe
= qp_uk
->sq_base
->elem
;
3453 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
3455 set_64bit_val(wqe
, 8, size
);
3457 set_64bit_val(wqe
, 16, 0);
3459 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3460 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3461 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
3462 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3464 i40iw_insert_wqe_hdr(wqe
, header
);
3466 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "SEND_LSMM_NOSTAG WQE",
3467 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3471 * i40iw_sc_send_rtt - send last read0 or write0
3473 * @read: Do read0 or write0
3475 static void i40iw_sc_send_rtt(struct i40iw_sc_qp
*qp
, bool read
)
3479 struct i40iw_qp_uk
*qp_uk
;
3482 wqe
= qp_uk
->sq_base
->elem
;
3484 set_64bit_val(wqe
, 0, 0);
3485 set_64bit_val(wqe
, 8, 0);
3486 set_64bit_val(wqe
, 16, 0);
3488 header
= LS_64(0x1234, I40IWQPSQ_REMSTAG
) |
3489 LS_64(I40IWQP_OP_RDMA_READ
, I40IWQPSQ_OPCODE
) |
3490 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3491 set_64bit_val(wqe
, 8, ((u64
)0xabcd << 32));
3493 header
= LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
3494 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3497 i40iw_insert_wqe_hdr(wqe
, header
);
3499 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "RTR WQE",
3500 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3504 * i40iw_sc_post_wqe0 - send wqe with opcode
3506 * @opcode: opcode to use for wqe0
3508 static enum i40iw_status_code
i40iw_sc_post_wqe0(struct i40iw_sc_qp
*qp
, u8 opcode
)
3512 struct i40iw_qp_uk
*qp_uk
;
3515 wqe
= qp_uk
->sq_base
->elem
;
3518 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
3520 case I40IWQP_OP_NOP
:
3521 set_64bit_val(wqe
, 0, 0);
3522 set_64bit_val(wqe
, 8, 0);
3523 set_64bit_val(wqe
, 16, 0);
3524 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
3525 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3527 i40iw_insert_wqe_hdr(wqe
, header
);
3529 case I40IWQP_OP_RDMA_SEND
:
3530 set_64bit_val(wqe
, 0, 0);
3531 set_64bit_val(wqe
, 8, 0);
3532 set_64bit_val(wqe
, 16, 0);
3533 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3534 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
) |
3535 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3536 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
);
3538 i40iw_insert_wqe_hdr(wqe
, header
);
3541 i40iw_debug(qp
->dev
, I40IW_DEBUG_QP
, "%s: Invalid WQE zero opcode\n",
3549 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3550 * @dev : ptr to i40iw_dev struct
3551 * @hmc_fn_id: hmc function id
3553 enum i40iw_status_code
i40iw_sc_init_iw_hmc(struct i40iw_sc_dev
*dev
, u8 hmc_fn_id
)
3555 struct i40iw_hmc_info
*hmc_info
;
3556 struct i40iw_dma_mem query_fpm_mem
;
3557 struct i40iw_virt_mem virt_mem
;
3558 struct i40iw_vfdev
*vf_dev
= NULL
;
3560 enum i40iw_status_code ret_code
= 0;
3561 bool poll_registers
= true;
3565 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3566 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3567 return I40IW_ERR_INVALID_HMCFN_ID
;
3569 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id
,
3571 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3572 hmc_info
= dev
->hmc_info
;
3573 query_fpm_mem
.pa
= dev
->fpm_query_buf_pa
;
3574 query_fpm_mem
.va
= dev
->fpm_query_buf
;
3576 vf_dev
= i40iw_vfdev_from_fpm(dev
, hmc_fn_id
);
3578 return I40IW_ERR_INVALID_VF_ID
;
3580 hmc_info
= &vf_dev
->hmc_info
;
3581 iw_vf_idx
= vf_dev
->iw_vf_idx
;
3582 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev
,
3583 hmc_info
, hmc_info
->hmc_obj
);
3584 if (!vf_dev
->fpm_query_buf
) {
3585 if (!dev
->vf_fpm_query_buf
[iw_vf_idx
].va
) {
3586 ret_code
= i40iw_alloc_query_fpm_buf(dev
,
3587 &dev
->vf_fpm_query_buf
[iw_vf_idx
]);
3591 vf_dev
->fpm_query_buf
= dev
->vf_fpm_query_buf
[iw_vf_idx
].va
;
3592 vf_dev
->fpm_query_buf_pa
= dev
->vf_fpm_query_buf
[iw_vf_idx
].pa
;
3594 query_fpm_mem
.pa
= vf_dev
->fpm_query_buf_pa
;
3595 query_fpm_mem
.va
= vf_dev
->fpm_query_buf
;
3597 * It is HARDWARE specific:
3598 * this call is done by PF for VF and
3599 * i40iw_sc_query_fpm_values needs ccq poll
3600 * because PF ccq is already created.
3602 poll_registers
= false;
3605 hmc_info
->hmc_fn_id
= hmc_fn_id
;
3607 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3609 i40iw_cqp_query_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3611 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3612 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3614 ret_code
= i40iw_sc_query_fpm_values(
3617 hmc_info
->hmc_fn_id
,
3625 /* parse the fpm_query_buf and fill hmc obj info */
3627 i40iw_sc_parse_fpm_query_buf((u64
*)query_fpm_mem
.va
,
3629 &dev
->hmc_fpm_misc
);
3632 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "QUERY FPM BUFFER",
3633 query_fpm_mem
.va
, I40IW_QUERY_FPM_BUF_SIZE
);
3635 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3636 i40iw_cqp_commit_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3638 /* parse the fpm_commit_buf and fill hmc obj info */
3639 i40iw_sc_parse_fpm_commit_buf((u64
*)query_fpm_mem
.va
, hmc_info
->hmc_obj
, &hmc_info
->sd_table
.sd_cnt
);
3640 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
3641 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
);
3642 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
3645 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
3652 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3653 * populates fpm base address in hmc_info
3654 * @dev : ptr to i40iw_dev struct
3655 * @hmc_fn_id: hmc function id
3657 static enum i40iw_status_code
i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev
*dev
,
3660 struct i40iw_hmc_info
*hmc_info
;
3661 struct i40iw_hmc_obj_info
*obj_info
;
3663 struct i40iw_dma_mem commit_fpm_mem
;
3665 enum i40iw_status_code ret_code
= 0;
3666 bool poll_registers
= true;
3669 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3670 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3671 return I40IW_ERR_INVALID_HMCFN_ID
;
3673 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3674 hmc_info
= dev
->hmc_info
;
3676 hmc_info
= i40iw_vf_hmcinfo_from_fpm(dev
, hmc_fn_id
);
3677 poll_registers
= false;
3680 return I40IW_ERR_BAD_PTR
;
3682 obj_info
= hmc_info
->hmc_obj
;
3683 buf
= dev
->fpm_commit_buf
;
3685 /* copy cnt values in commit buf */
3686 for (i
= I40IW_HMC_IW_QP
, j
= 0; i
<= I40IW_HMC_IW_PBLE
;
3688 set_64bit_val(buf
, j
, (u64
)obj_info
[i
].cnt
);
3690 set_64bit_val(buf
, 40, 0); /* APBVT rsvd */
3692 commit_fpm_mem
.pa
= dev
->fpm_commit_buf_pa
;
3693 commit_fpm_mem
.va
= dev
->fpm_commit_buf
;
3694 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3695 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3696 ret_code
= i40iw_sc_commit_fpm_values(
3699 hmc_info
->hmc_fn_id
,
3704 /* parse the fpm_commit_buf and fill hmc obj info */
3706 ret_code
= i40iw_sc_parse_fpm_commit_buf(dev
->fpm_commit_buf
,
3708 &hmc_info
->sd_table
.sd_cnt
);
3710 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "COMMIT FPM BUFFER",
3711 commit_fpm_mem
.va
, I40IW_COMMIT_FPM_BUF_SIZE
);
3717 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3718 * @cqp: struct for cqp hw
3719 * @info; sd info for wqe
3720 * @scratch: u64 saved to be used during cqp completion
3722 static enum i40iw_status_code
cqp_sds_wqe_fill(struct i40iw_sc_cqp
*cqp
,
3723 struct i40iw_update_sds_info
*info
,
3729 int mem_entries
, wqe_entries
;
3730 struct i40iw_dma_mem
*sdbuf
= &cqp
->sdbuf
;
3734 wqe
= i40iw_sc_cqp_get_next_send_wqe_idx(cqp
, scratch
, &wqe_idx
);
3736 return I40IW_ERR_RING_FULL
;
3738 I40IW_CQP_INIT_WQE(wqe
);
3739 wqe_entries
= (info
->cnt
> 3) ? 3 : info
->cnt
;
3740 mem_entries
= info
->cnt
- wqe_entries
;
3742 header
= LS_64(I40IW_CQP_OP_UPDATE_PE_SDS
, I40IW_CQPSQ_OPCODE
) |
3743 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
3744 LS_64(mem_entries
, I40IW_CQPSQ_UPESD_ENTRY_COUNT
);
3747 offset
= wqe_idx
* I40IW_UPDATE_SD_BUF_SIZE
;
3748 memcpy((char *)sdbuf
->va
+ offset
, &info
->entry
[3],
3750 data
= (u64
)sdbuf
->pa
+ offset
;
3754 data
|= LS_64(info
->hmc_fn_id
, I40IW_CQPSQ_UPESD_HMCFNID
);
3756 set_64bit_val(wqe
, 16, data
);
3758 switch (wqe_entries
) {
3760 set_64bit_val(wqe
, 48,
3761 (LS_64(info
->entry
[2].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3762 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3764 set_64bit_val(wqe
, 56, info
->entry
[2].data
);
3767 set_64bit_val(wqe
, 32,
3768 (LS_64(info
->entry
[1].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3769 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3771 set_64bit_val(wqe
, 40, info
->entry
[1].data
);
3774 set_64bit_val(wqe
, 0,
3775 LS_64(info
->entry
[0].cmd
, I40IW_CQPSQ_UPESD_SDCMD
));
3777 set_64bit_val(wqe
, 8, info
->entry
[0].data
);
3783 i40iw_insert_wqe_hdr(wqe
, header
);
3785 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "UPDATE_PE_SDS WQE",
3786 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3791 * i40iw_update_pe_sds - cqp wqe for sd
3792 * @dev: ptr to i40iw_dev struct
3793 * @info: sd info for sd's
3794 * @scratch: u64 saved to be used during cqp completion
3796 static enum i40iw_status_code
i40iw_update_pe_sds(struct i40iw_sc_dev
*dev
,
3797 struct i40iw_update_sds_info
*info
,
3800 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3801 enum i40iw_status_code ret_code
;
3803 ret_code
= cqp_sds_wqe_fill(cqp
, info
, scratch
);
3805 i40iw_sc_cqp_post_sq(cqp
);
3811 * i40iw_update_sds_noccq - update sd before ccq created
3812 * @dev: sc device struct
3813 * @info: sd info for sd's
3815 enum i40iw_status_code
i40iw_update_sds_noccq(struct i40iw_sc_dev
*dev
,
3816 struct i40iw_update_sds_info
*info
)
3818 u32 error
, val
, tail
;
3819 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3820 enum i40iw_status_code ret_code
;
3822 ret_code
= cqp_sds_wqe_fill(cqp
, info
, 0);
3825 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3827 return I40IW_ERR_CQP_COMPL_ERROR
;
3829 i40iw_sc_cqp_post_sq(cqp
);
3830 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
3836 * i40iw_sc_suspend_qp - suspend qp for param change
3837 * @cqp: struct for cqp hw
3839 * @scratch: u64 saved to be used during cqp completion
3841 enum i40iw_status_code
i40iw_sc_suspend_qp(struct i40iw_sc_cqp
*cqp
,
3842 struct i40iw_sc_qp
*qp
,
3848 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3850 return I40IW_ERR_RING_FULL
;
3851 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_SUSPENDQP_QPID
) |
3852 LS_64(I40IW_CQP_OP_SUSPEND_QP
, I40IW_CQPSQ_OPCODE
) |
3853 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3855 i40iw_insert_wqe_hdr(wqe
, header
);
3857 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SUSPEND_QP WQE",
3858 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3860 i40iw_sc_cqp_post_sq(cqp
);
3865 * i40iw_sc_resume_qp - resume qp after suspend
3866 * @cqp: struct for cqp hw
3868 * @scratch: u64 saved to be used during cqp completion
3870 enum i40iw_status_code
i40iw_sc_resume_qp(struct i40iw_sc_cqp
*cqp
,
3871 struct i40iw_sc_qp
*qp
,
3877 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3879 return I40IW_ERR_RING_FULL
;
3882 LS_64(qp
->qs_handle
, I40IW_CQPSQ_RESUMEQP_QSHANDLE
));
3884 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_RESUMEQP_QPID
) |
3885 LS_64(I40IW_CQP_OP_RESUME_QP
, I40IW_CQPSQ_OPCODE
) |
3886 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3888 i40iw_insert_wqe_hdr(wqe
, header
);
3890 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "RESUME_QP WQE",
3891 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3893 i40iw_sc_cqp_post_sq(cqp
);
3898 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3899 * @cqp: struct for cqp hw
3900 * @scratch: u64 saved to be used during cqp completion
3901 * @hmc_fn_id: hmc function id
3902 * @post_sq: flag for cqp db to ring
3903 * @poll_registers: flag to poll register for cqp completion
3905 enum i40iw_status_code
i40iw_sc_static_hmc_pages_allocated(
3906 struct i40iw_sc_cqp
*cqp
,
3910 bool poll_registers
)
3914 u32 tail
, val
, error
;
3915 enum i40iw_status_code ret_code
= 0;
3917 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3919 return I40IW_ERR_RING_FULL
;
3922 LS_64(hmc_fn_id
, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID
));
3924 header
= LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
, I40IW_CQPSQ_OPCODE
) |
3925 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3927 i40iw_insert_wqe_hdr(wqe
, header
);
3929 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SHMC_PAGES_ALLOCATED WQE",
3930 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3931 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3933 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
3937 i40iw_sc_cqp_post_sq(cqp
);
3939 /* check for cqp sq tail update */
3940 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
3942 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
3943 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
3951 * i40iw_ring_full - check if cqp ring is full
3952 * @cqp: struct for cqp hw
3954 static bool i40iw_ring_full(struct i40iw_sc_cqp
*cqp
)
3956 return I40IW_RING_FULL_ERR(cqp
->sq_ring
);
3960 * i40iw_est_sd - returns approximate number of SDs for HMC
3961 * @dev: sc device struct
3962 * @hmc_info: hmc structure, size and count for HMC objects
3964 static u64
i40iw_est_sd(struct i40iw_sc_dev
*dev
, struct i40iw_hmc_info
*hmc_info
)
3970 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_PBLE
; i
++)
3971 size
+= hmc_info
->hmc_obj
[i
].cnt
* hmc_info
->hmc_obj
[i
].size
;
3974 size
+= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
* hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
;
3976 if (size
& 0x1FFFFF)
3977 sd
= (size
>> 21) + 1; /* add 1 for remainder */
3982 /* 2MB alignment for VF PBLE HMC */
3983 size
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
* hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
;
3984 if (size
& 0x1FFFFF)
3985 sd
+= (size
>> 21) + 1; /* add 1 for remainder */
3994 * i40iw_config_fpm_values - configure HMC objects
3995 * @dev: sc device struct
3996 * @qp_count: desired qp count
3998 enum i40iw_status_code
i40iw_config_fpm_values(struct i40iw_sc_dev
*dev
, u32 qp_count
)
4000 struct i40iw_virt_mem virt_mem
;
4002 u32 qpwantedoriginal
, qpwanted
, mrwanted
, pblewanted
;
4006 struct i40iw_hmc_info
*hmc_info
;
4007 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
;
4008 enum i40iw_status_code ret_code
= 0;
4010 hmc_info
= dev
->hmc_info
;
4011 hmc_fpm_misc
= &dev
->hmc_fpm_misc
;
4013 ret_code
= i40iw_sc_init_iw_hmc(dev
, dev
->hmc_fn_id
);
4015 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4016 "i40iw_sc_init_iw_hmc returned error_code = %d\n",
4021 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_MAX
; i
++)
4022 hmc_info
->hmc_obj
[i
].cnt
= hmc_info
->hmc_obj
[i
].max_cnt
;
4023 sd_needed
= i40iw_est_sd(dev
, hmc_info
);
4024 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4025 "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
4026 __func__
, sd_needed
, hmc_info
->first_sd_index
);
4027 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4028 "%s: sd count %d where max sd is %d\n",
4029 __func__
, hmc_info
->sd_table
.sd_cnt
,
4030 hmc_fpm_misc
->max_sds
);
4032 qpwanted
= min(qp_count
, hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
);
4033 qpwantedoriginal
= qpwanted
;
4034 mrwanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
;
4035 pblewanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
;
4037 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4038 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
4039 qp_count
, hmc_fpm_misc
->max_sds
,
4040 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
,
4041 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].max_cnt
,
4042 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
,
4043 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
);
4047 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
= qpwanted
;
4048 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
=
4049 min(2 * qpwanted
, hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
);
4050 hmc_info
->hmc_obj
[I40IW_HMC_IW_SRQ
].cnt
= 0x00; /* Reserved */
4051 hmc_info
->hmc_obj
[I40IW_HMC_IW_HTE
].cnt
=
4052 qpwanted
* hmc_fpm_misc
->ht_multiplier
;
4053 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].cnt
=
4054 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].max_cnt
;
4055 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].cnt
= 1;
4056 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
= mrwanted
;
4058 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
=
4059 roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES
* qpwanted
);
4060 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
=
4061 roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE
* qpwanted
);
4062 hmc_info
->hmc_obj
[I40IW_HMC_IW_XFFL
].cnt
=
4063 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
/ hmc_fpm_misc
->xf_block_size
;
4064 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1FL
].cnt
=
4065 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
/ hmc_fpm_misc
->q1_block_size
;
4066 hmc_info
->hmc_obj
[I40IW_HMC_IW_TIMER
].cnt
=
4067 ((qpwanted
) / 512 + 1) * hmc_fpm_misc
->timer_bucket
;
4068 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIMC
].cnt
= 0x00;
4069 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIAV
].cnt
= 0x00;
4070 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
= pblewanted
;
4072 /* How much memory is needed for all the objects. */
4073 sd_needed
= i40iw_est_sd(dev
, hmc_info
);
4074 if ((loop_count
> 1000) ||
4075 ((!(loop_count
% 10)) &&
4076 (qpwanted
> qpwantedoriginal
* 2 / 3))) {
4077 if (qpwanted
> FPM_MULTIPLIER
)
4078 qpwanted
= roundup_pow_of_two(qpwanted
-
4082 if (mrwanted
> FPM_MULTIPLIER
* 10)
4083 mrwanted
-= FPM_MULTIPLIER
* 10;
4084 if (pblewanted
> FPM_MULTIPLIER
* 1000)
4085 pblewanted
-= FPM_MULTIPLIER
* 1000;
4086 } while (sd_needed
> hmc_fpm_misc
->max_sds
&& loop_count
< 2000);
4088 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4089 "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
4090 loop_count
, sd_needed
,
4091 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
,
4092 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
,
4093 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
,
4094 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
);
4096 ret_code
= i40iw_sc_configure_iw_fpm(dev
, dev
->hmc_fn_id
);
4098 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4099 "configure_iw_fpm returned error_code[x%08X]\n",
4100 i40iw_rd32(dev
->hw
, dev
->is_pf
? I40E_PFPE_CQPERRCODES
: I40E_VFPE_CQPERRCODES1
));
4104 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
4105 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
+ 1);
4106 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
4108 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
4109 "%s: failed to allocate memory for sd_entry buffer\n",
4113 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
4119 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
4121 * @pcmdinfo: cqp command info
4123 static enum i40iw_status_code
i40iw_exec_cqp_cmd(struct i40iw_sc_dev
*dev
,
4124 struct cqp_commands_info
*pcmdinfo
)
4126 enum i40iw_status_code status
;
4127 struct i40iw_dma_mem values_mem
;
4129 dev
->cqp_cmd_stats
[pcmdinfo
->cqp_cmd
]++;
4130 switch (pcmdinfo
->cqp_cmd
) {
4131 case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY
:
4132 status
= i40iw_sc_del_local_mac_ipaddr_entry(
4133 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.cqp
,
4134 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.scratch
,
4135 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.entry_idx
,
4136 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.ignore_ref_count
,
4139 case OP_CEQ_DESTROY
:
4140 status
= i40iw_sc_ceq_destroy(pcmdinfo
->in
.u
.ceq_destroy
.ceq
,
4141 pcmdinfo
->in
.u
.ceq_destroy
.scratch
,
4144 case OP_AEQ_DESTROY
:
4145 status
= i40iw_sc_aeq_destroy(pcmdinfo
->in
.u
.aeq_destroy
.aeq
,
4146 pcmdinfo
->in
.u
.aeq_destroy
.scratch
,
4150 case OP_DELETE_ARP_CACHE_ENTRY
:
4151 status
= i40iw_sc_del_arp_cache_entry(
4152 pcmdinfo
->in
.u
.del_arp_cache_entry
.cqp
,
4153 pcmdinfo
->in
.u
.del_arp_cache_entry
.scratch
,
4154 pcmdinfo
->in
.u
.del_arp_cache_entry
.arp_index
,
4157 case OP_MANAGE_APBVT_ENTRY
:
4158 status
= i40iw_sc_manage_apbvt_entry(
4159 pcmdinfo
->in
.u
.manage_apbvt_entry
.cqp
,
4160 &pcmdinfo
->in
.u
.manage_apbvt_entry
.info
,
4161 pcmdinfo
->in
.u
.manage_apbvt_entry
.scratch
,
4165 status
= i40iw_sc_ceq_create(pcmdinfo
->in
.u
.ceq_create
.ceq
,
4166 pcmdinfo
->in
.u
.ceq_create
.scratch
,
4170 status
= i40iw_sc_aeq_create(pcmdinfo
->in
.u
.aeq_create
.aeq
,
4171 pcmdinfo
->in
.u
.aeq_create
.scratch
,
4174 case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY
:
4175 status
= i40iw_sc_alloc_local_mac_ipaddr_entry(
4176 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.cqp
,
4177 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.scratch
,
4180 case OP_ADD_LOCAL_MAC_IPADDR_ENTRY
:
4181 status
= i40iw_sc_add_local_mac_ipaddr_entry(
4182 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.cqp
,
4183 &pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.info
,
4184 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.scratch
,
4187 case OP_MANAGE_QHASH_TABLE_ENTRY
:
4188 status
= i40iw_sc_manage_qhash_table_entry(
4189 pcmdinfo
->in
.u
.manage_qhash_table_entry
.cqp
,
4190 &pcmdinfo
->in
.u
.manage_qhash_table_entry
.info
,
4191 pcmdinfo
->in
.u
.manage_qhash_table_entry
.scratch
,
4196 status
= i40iw_sc_qp_modify(
4197 pcmdinfo
->in
.u
.qp_modify
.qp
,
4198 &pcmdinfo
->in
.u
.qp_modify
.info
,
4199 pcmdinfo
->in
.u
.qp_modify
.scratch
,
4203 case OP_QP_UPLOAD_CONTEXT
:
4204 status
= i40iw_sc_qp_upload_context(
4205 pcmdinfo
->in
.u
.qp_upload_context
.dev
,
4206 &pcmdinfo
->in
.u
.qp_upload_context
.info
,
4207 pcmdinfo
->in
.u
.qp_upload_context
.scratch
,
4212 status
= i40iw_sc_cq_create(
4213 pcmdinfo
->in
.u
.cq_create
.cq
,
4214 pcmdinfo
->in
.u
.cq_create
.scratch
,
4215 pcmdinfo
->in
.u
.cq_create
.check_overflow
,
4219 status
= i40iw_sc_cq_destroy(
4220 pcmdinfo
->in
.u
.cq_destroy
.cq
,
4221 pcmdinfo
->in
.u
.cq_destroy
.scratch
,
4226 status
= i40iw_sc_qp_create(
4227 pcmdinfo
->in
.u
.qp_create
.qp
,
4228 &pcmdinfo
->in
.u
.qp_create
.info
,
4229 pcmdinfo
->in
.u
.qp_create
.scratch
,
4233 status
= i40iw_sc_qp_destroy(
4234 pcmdinfo
->in
.u
.qp_destroy
.qp
,
4235 pcmdinfo
->in
.u
.qp_destroy
.scratch
,
4236 pcmdinfo
->in
.u
.qp_destroy
.remove_hash_idx
,
4237 pcmdinfo
->in
.u
.qp_destroy
.
4243 status
= i40iw_sc_alloc_stag(
4244 pcmdinfo
->in
.u
.alloc_stag
.dev
,
4245 &pcmdinfo
->in
.u
.alloc_stag
.info
,
4246 pcmdinfo
->in
.u
.alloc_stag
.scratch
,
4249 case OP_MR_REG_NON_SHARED
:
4250 status
= i40iw_sc_mr_reg_non_shared(
4251 pcmdinfo
->in
.u
.mr_reg_non_shared
.dev
,
4252 &pcmdinfo
->in
.u
.mr_reg_non_shared
.info
,
4253 pcmdinfo
->in
.u
.mr_reg_non_shared
.scratch
,
4257 case OP_DEALLOC_STAG
:
4258 status
= i40iw_sc_dealloc_stag(
4259 pcmdinfo
->in
.u
.dealloc_stag
.dev
,
4260 &pcmdinfo
->in
.u
.dealloc_stag
.info
,
4261 pcmdinfo
->in
.u
.dealloc_stag
.scratch
,
4266 status
= i40iw_sc_mw_alloc(
4267 pcmdinfo
->in
.u
.mw_alloc
.dev
,
4268 pcmdinfo
->in
.u
.mw_alloc
.scratch
,
4269 pcmdinfo
->in
.u
.mw_alloc
.mw_stag_index
,
4270 pcmdinfo
->in
.u
.mw_alloc
.pd_id
,
4274 case OP_QP_FLUSH_WQES
:
4275 status
= i40iw_sc_qp_flush_wqes(
4276 pcmdinfo
->in
.u
.qp_flush_wqes
.qp
,
4277 &pcmdinfo
->in
.u
.qp_flush_wqes
.info
,
4278 pcmdinfo
->in
.u
.qp_flush_wqes
.
4279 scratch
, pcmdinfo
->post_sq
);
4282 status
= i40iw_sc_gen_ae(
4283 pcmdinfo
->in
.u
.gen_ae
.qp
,
4284 &pcmdinfo
->in
.u
.gen_ae
.info
,
4285 pcmdinfo
->in
.u
.gen_ae
.scratch
,
4288 case OP_ADD_ARP_CACHE_ENTRY
:
4289 status
= i40iw_sc_add_arp_cache_entry(
4290 pcmdinfo
->in
.u
.add_arp_cache_entry
.cqp
,
4291 &pcmdinfo
->in
.u
.add_arp_cache_entry
.info
,
4292 pcmdinfo
->in
.u
.add_arp_cache_entry
.scratch
,
4295 case OP_MANAGE_PUSH_PAGE
:
4296 status
= i40iw_sc_manage_push_page(
4297 pcmdinfo
->in
.u
.manage_push_page
.cqp
,
4298 &pcmdinfo
->in
.u
.manage_push_page
.info
,
4299 pcmdinfo
->in
.u
.manage_push_page
.scratch
,
4302 case OP_UPDATE_PE_SDS
:
4303 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4304 status
= i40iw_update_pe_sds(
4305 pcmdinfo
->in
.u
.update_pe_sds
.dev
,
4306 &pcmdinfo
->in
.u
.update_pe_sds
.info
,
4307 pcmdinfo
->in
.u
.update_pe_sds
.
4311 case OP_MANAGE_HMC_PM_FUNC_TABLE
:
4312 status
= i40iw_sc_manage_hmc_pm_func_table(
4313 pcmdinfo
->in
.u
.manage_hmc_pm
.dev
->cqp
,
4314 pcmdinfo
->in
.u
.manage_hmc_pm
.scratch
,
4315 (u8
)pcmdinfo
->in
.u
.manage_hmc_pm
.info
.vf_id
,
4316 pcmdinfo
->in
.u
.manage_hmc_pm
.info
.free_fcn
,
4320 status
= i40iw_sc_suspend_qp(
4321 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
4322 pcmdinfo
->in
.u
.suspend_resume
.qp
,
4323 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
4326 status
= i40iw_sc_resume_qp(
4327 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
4328 pcmdinfo
->in
.u
.suspend_resume
.qp
,
4329 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
4331 case OP_MANAGE_VF_PBLE_BP
:
4332 status
= i40iw_manage_vf_pble_bp(
4333 pcmdinfo
->in
.u
.manage_vf_pble_bp
.cqp
,
4334 &pcmdinfo
->in
.u
.manage_vf_pble_bp
.info
,
4335 pcmdinfo
->in
.u
.manage_vf_pble_bp
.scratch
, true);
4337 case OP_QUERY_FPM_VALUES
:
4338 values_mem
.pa
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_pa
;
4339 values_mem
.va
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_va
;
4340 status
= i40iw_sc_query_fpm_values(
4341 pcmdinfo
->in
.u
.query_fpm_values
.cqp
,
4342 pcmdinfo
->in
.u
.query_fpm_values
.scratch
,
4343 pcmdinfo
->in
.u
.query_fpm_values
.hmc_fn_id
,
4344 &values_mem
, true, I40IW_CQP_WAIT_EVENT
);
4346 case OP_COMMIT_FPM_VALUES
:
4347 values_mem
.pa
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_pa
;
4348 values_mem
.va
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_va
;
4349 status
= i40iw_sc_commit_fpm_values(
4350 pcmdinfo
->in
.u
.commit_fpm_values
.cqp
,
4351 pcmdinfo
->in
.u
.commit_fpm_values
.scratch
,
4352 pcmdinfo
->in
.u
.commit_fpm_values
.hmc_fn_id
,
4355 I40IW_CQP_WAIT_EVENT
);
4357 case OP_QUERY_RDMA_FEATURES
:
4358 values_mem
.pa
= pcmdinfo
->in
.u
.query_rdma_features
.cap_pa
;
4359 values_mem
.va
= pcmdinfo
->in
.u
.query_rdma_features
.cap_va
;
4360 status
= i40iw_sc_query_rdma_features(
4361 pcmdinfo
->in
.u
.query_rdma_features
.cqp
, &values_mem
,
4362 pcmdinfo
->in
.u
.query_rdma_features
.scratch
);
4365 status
= I40IW_NOT_SUPPORTED
;
4373 * i40iw_process_cqp_cmd - process all cqp commands
4374 * @dev: sc device struct
4375 * @pcmdinfo: cqp command info
4377 enum i40iw_status_code
i40iw_process_cqp_cmd(struct i40iw_sc_dev
*dev
,
4378 struct cqp_commands_info
*pcmdinfo
)
4380 enum i40iw_status_code status
= 0;
4381 unsigned long flags
;
4383 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
4384 if (list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
))
4385 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
4387 list_add_tail(&pcmdinfo
->cqp_cmd_entry
, &dev
->cqp_cmd_head
);
4388 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
4393 * i40iw_process_bh - called from tasklet for cqp list
4394 * @dev: sc device struct
4396 enum i40iw_status_code
i40iw_process_bh(struct i40iw_sc_dev
*dev
)
4398 enum i40iw_status_code status
= 0;
4399 struct cqp_commands_info
*pcmdinfo
;
4400 unsigned long flags
;
4402 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
4403 while (!list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
)) {
4404 pcmdinfo
= (struct cqp_commands_info
*)i40iw_remove_head(&dev
->cqp_cmd_head
);
4406 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
4410 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
4415 * i40iw_iwarp_opcode - determine if incoming is rdma layer
4416 * @info: aeq info for the packet
4417 * @pkt: packet for error
4419 static u32
i40iw_iwarp_opcode(struct i40iw_aeqe_info
*info
, u8
*pkt
)
4422 u32 opcode
= 0xffffffff;
4424 if (info
->q2_data_written
) {
4425 mpa
= (__be16
*)pkt
;
4426 opcode
= ntohs(mpa
[1]) & 0xf;
4432 * i40iw_locate_mpa - return pointer to mpa in the pkt
4433 * @pkt: packet with data
4435 static u8
*i40iw_locate_mpa(u8
*pkt
)
4437 /* skip over ethernet header */
4438 pkt
+= I40IW_MAC_HLEN
;
4440 /* Skip over IP and TCP headers */
4441 pkt
+= 4 * (pkt
[0] & 0x0f);
4442 pkt
+= 4 * ((pkt
[12] >> 4) & 0x0f);
4447 * i40iw_setup_termhdr - termhdr for terminate pkt
4448 * @qp: sc qp ptr for pkt
4450 * @opcode: flush opcode for termhdr
4451 * @layer_etype: error layer + error type
4452 * @err: error cod ein the header
4454 static void i40iw_setup_termhdr(struct i40iw_sc_qp
*qp
,
4455 struct i40iw_terminate_hdr
*hdr
,
4456 enum i40iw_flush_opcode opcode
,
4460 qp
->flush_code
= opcode
;
4461 hdr
->layer_etype
= layer_etype
;
4462 hdr
->error_code
= err
;
4466 * i40iw_bld_terminate_hdr - build terminate message header
4467 * @qp: qp associated with received terminate AE
4468 * @info: the struct contiaing AE information
4470 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp
*qp
,
4471 struct i40iw_aeqe_info
*info
)
4473 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
4478 struct i40iw_terminate_hdr
*termhdr
;
4480 termhdr
= (struct i40iw_terminate_hdr
*)qp
->q2_buf
;
4481 memset(termhdr
, 0, Q2_BAD_FRAME_OFFSET
);
4483 if (info
->q2_data_written
) {
4484 /* Use data from offending packet to fill in ddp & rdma hdrs */
4485 pkt
= i40iw_locate_mpa(pkt
);
4486 ddp_seg_len
= ntohs(*(__be16
*)pkt
);
4489 termhdr
->hdrct
= DDP_LEN_FLAG
;
4490 if (pkt
[2] & 0x80) {
4492 if (ddp_seg_len
>= TERM_DDP_LEN_TAGGED
) {
4493 copy_len
+= TERM_DDP_LEN_TAGGED
;
4494 termhdr
->hdrct
|= DDP_HDR_FLAG
;
4497 if (ddp_seg_len
>= TERM_DDP_LEN_UNTAGGED
) {
4498 copy_len
+= TERM_DDP_LEN_UNTAGGED
;
4499 termhdr
->hdrct
|= DDP_HDR_FLAG
;
4502 if (ddp_seg_len
>= (TERM_DDP_LEN_UNTAGGED
+ TERM_RDMA_LEN
)) {
4503 if ((pkt
[3] & RDMA_OPCODE_MASK
) == RDMA_READ_REQ_OPCODE
) {
4504 copy_len
+= TERM_RDMA_LEN
;
4505 termhdr
->hdrct
|= RDMA_HDR_FLAG
;
4512 opcode
= i40iw_iwarp_opcode(info
, pkt
);
4514 switch (info
->ae_id
) {
4515 case I40IW_AE_AMP_UNALLOCATED_STAG
:
4516 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4517 if (opcode
== I40IW_OP_TYPE_RDMA_WRITE
)
4518 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4519 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_STAG
);
4521 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4522 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4524 case I40IW_AE_AMP_BOUNDS_VIOLATION
:
4525 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4526 if (info
->q2_data_written
)
4527 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4528 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_BOUNDS
);
4530 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4531 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_BOUNDS
);
4533 case I40IW_AE_AMP_BAD_PD
:
4535 case I40IW_OP_TYPE_RDMA_WRITE
:
4536 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4537 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_UNASSOC_STAG
);
4539 case I40IW_OP_TYPE_SEND_INV
:
4540 case I40IW_OP_TYPE_SEND_SOL_INV
:
4541 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4542 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_CANT_INV_STAG
);
4545 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4546 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_UNASSOC_STAG
);
4549 case I40IW_AE_AMP_INVALID_STAG
:
4550 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4551 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4552 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4554 case I40IW_AE_AMP_BAD_QP
:
4555 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4556 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4558 case I40IW_AE_AMP_BAD_STAG_KEY
:
4559 case I40IW_AE_AMP_BAD_STAG_INDEX
:
4560 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4562 case I40IW_OP_TYPE_SEND_INV
:
4563 case I40IW_OP_TYPE_SEND_SOL_INV
:
4564 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4565 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_CANT_INV_STAG
);
4568 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4569 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_STAG
);
4572 case I40IW_AE_AMP_RIGHTS_VIOLATION
:
4573 case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS
:
4574 case I40IW_AE_PRIV_OPERATION_DENIED
:
4575 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4576 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4577 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_ACCESS
);
4579 case I40IW_AE_AMP_TO_WRAP
:
4580 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4581 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4582 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_TO_WRAP
);
4584 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
4585 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4586 (LAYER_MPA
<< 4) | DDP_LLP
, MPA_CRC
);
4588 case I40IW_AE_LLP_SEGMENT_TOO_LARGE
:
4589 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
4590 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4591 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4593 case I40IW_AE_LCE_QP_CATASTROPHIC
:
4594 case I40IW_AE_DDP_NO_L_BIT
:
4595 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4596 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4598 case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
:
4599 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4600 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_RANGE
);
4602 case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER
:
4603 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4604 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4605 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_TOO_LONG
);
4607 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
4609 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4610 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_DDP_VER
);
4612 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4613 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_DDP_VER
);
4615 case I40IW_AE_DDP_UBE_INVALID_MO
:
4616 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4617 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MO
);
4619 case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE
:
4620 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4621 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_NO_BUF
);
4623 case I40IW_AE_DDP_UBE_INVALID_QN
:
4624 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4625 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4627 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
4628 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4629 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_RDMAP_VER
);
4631 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
4632 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4633 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNEXPECTED_OP
);
4636 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4637 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNSPECIFIED
);
4642 memcpy(termhdr
+ 1, pkt
, copy_len
);
4644 return sizeof(struct i40iw_terminate_hdr
) + copy_len
;
4648 * i40iw_terminate_send_fin() - Send fin for terminate message
4649 * @qp: qp associated with received terminate AE
4651 void i40iw_terminate_send_fin(struct i40iw_sc_qp
*qp
)
4653 /* Send the fin only */
4654 i40iw_term_modify_qp(qp
,
4655 I40IW_QP_STATE_TERMINATE
,
4656 I40IWQP_TERM_SEND_FIN_ONLY
,
4661 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4662 * @qp: qp associated with received terminate AE
4663 * @info: the struct contiaing AE information
4665 void i40iw_terminate_connection(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4669 if (qp
->term_flags
& I40IW_TERM_SENT
)
4670 return; /* Sanity check */
4672 /* Eventtype can change from bld_terminate_hdr */
4673 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4674 termlen
= i40iw_bld_terminate_hdr(qp
, info
);
4675 i40iw_terminate_start_timer(qp
);
4676 qp
->term_flags
|= I40IW_TERM_SENT
;
4677 i40iw_term_modify_qp(qp
, I40IW_QP_STATE_TERMINATE
,
4678 I40IWQP_TERM_SEND_TERM_ONLY
, termlen
);
4682 * i40iw_terminate_received - handle terminate received AE
4683 * @qp: qp associated with received terminate AE
4684 * @info: the struct contiaing AE information
4686 void i40iw_terminate_received(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4688 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
4693 struct i40iw_terminate_hdr
*termhdr
;
4695 mpa
= (__be32
*)i40iw_locate_mpa(pkt
);
4696 if (info
->q2_data_written
) {
4697 /* did not validate the frame - do it now */
4698 ddp_ctl
= (ntohl(mpa
[0]) >> 8) & 0xff;
4699 rdma_ctl
= ntohl(mpa
[0]) & 0xff;
4700 if ((ddp_ctl
& 0xc0) != 0x40)
4701 aeq_id
= I40IW_AE_LCE_QP_CATASTROPHIC
;
4702 else if ((ddp_ctl
& 0x03) != 1)
4703 aeq_id
= I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
;
4704 else if (ntohl(mpa
[2]) != 2)
4705 aeq_id
= I40IW_AE_DDP_UBE_INVALID_QN
;
4706 else if (ntohl(mpa
[3]) != 1)
4707 aeq_id
= I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
;
4708 else if (ntohl(mpa
[4]) != 0)
4709 aeq_id
= I40IW_AE_DDP_UBE_INVALID_MO
;
4710 else if ((rdma_ctl
& 0xc0) != 0x40)
4711 aeq_id
= I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
;
4713 info
->ae_id
= aeq_id
;
4715 /* Bad terminate recvd - send back a terminate */
4716 i40iw_terminate_connection(qp
, info
);
4721 qp
->term_flags
|= I40IW_TERM_RCVD
;
4722 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4723 termhdr
= (struct i40iw_terminate_hdr
*)&mpa
[5];
4724 if (termhdr
->layer_etype
== RDMAP_REMOTE_PROT
||
4725 termhdr
->layer_etype
== RDMAP_REMOTE_OP
) {
4726 i40iw_terminate_done(qp
, 0);
4728 i40iw_terminate_start_timer(qp
);
4729 i40iw_terminate_send_fin(qp
);
4734 * i40iw_sc_vsi_init - Initialize virtual device
4735 * @vsi: pointer to the vsi structure
4736 * @info: parameters to initialize vsi
4738 void i40iw_sc_vsi_init(struct i40iw_sc_vsi
*vsi
, struct i40iw_vsi_init_info
*info
)
4742 vsi
->dev
= info
->dev
;
4743 vsi
->back_vsi
= info
->back_vsi
;
4744 vsi
->mtu
= info
->params
->mtu
;
4745 vsi
->exception_lan_queue
= info
->exception_lan_queue
;
4746 i40iw_fill_qos_list(info
->params
->qs_handle_list
);
4748 for (i
= 0; i
< I40IW_MAX_USER_PRIORITY
; i
++) {
4749 vsi
->qos
[i
].qs_handle
= info
->params
->qs_handle_list
[i
];
4750 i40iw_debug(vsi
->dev
, I40IW_DEBUG_DCB
, "qset[%d]: %d\n", i
,
4751 vsi
->qos
[i
].qs_handle
);
4752 spin_lock_init(&vsi
->qos
[i
].lock
);
4753 INIT_LIST_HEAD(&vsi
->qos
[i
].qplist
);
4758 * i40iw_hw_stats_init - Initiliaze HW stats table
4759 * @stats: pestat struct
4760 * @fcn_idx: PCI fn id
4761 * @is_pf: Is it a PF?
4763 * Populate the HW stats table with register offset addr for each
4764 * stats. And start the perioidic stats timer.
4766 void i40iw_hw_stats_init(struct i40iw_vsi_pestat
*stats
, u8 fcn_idx
, bool is_pf
)
4768 u32 stats_reg_offset
;
4770 struct i40iw_dev_hw_stats_offsets
*stats_table
=
4771 &stats
->hw_stats_offsets
;
4772 struct i40iw_dev_hw_stats
*last_rd_stats
= &stats
->last_read_hw_stats
;
4775 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4776 I40E_GLPES_PFIP4RXDISCARD(fcn_idx
);
4777 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4778 I40E_GLPES_PFIP4RXTRUNC(fcn_idx
);
4779 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4780 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx
);
4781 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4782 I40E_GLPES_PFIP6RXDISCARD(fcn_idx
);
4783 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4784 I40E_GLPES_PFIP6RXTRUNC(fcn_idx
);
4785 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4786 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx
);
4787 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4788 I40E_GLPES_PFTCPRTXSEG(fcn_idx
);
4789 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4790 I40E_GLPES_PFTCPRXOPTERR(fcn_idx
);
4791 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4792 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx
);
4794 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4795 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx
);
4796 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4797 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx
);
4798 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4799 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx
);
4800 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4801 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx
);
4802 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4803 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx
);
4804 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4805 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx
);
4806 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4807 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx
);
4808 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4809 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx
);
4810 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4811 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx
);
4812 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4813 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx
);
4814 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4815 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx
);
4816 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4817 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx
);
4818 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4819 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx
);
4820 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4821 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4822 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4823 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4824 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4825 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx
);
4826 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4827 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx
);
4828 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4829 I40E_GLPES_PFTCPTXSEGLO(fcn_idx
);
4830 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4831 I40E_GLPES_PFRDMARXRDSLO(fcn_idx
);
4832 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4833 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx
);
4834 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4835 I40E_GLPES_PFRDMARXWRSLO(fcn_idx
);
4836 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4837 I40E_GLPES_PFRDMATXRDSLO(fcn_idx
);
4838 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4839 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx
);
4840 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4841 I40E_GLPES_PFRDMATXWRSLO(fcn_idx
);
4842 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4843 I40E_GLPES_PFRDMAVBNDLO(fcn_idx
);
4844 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4845 I40E_GLPES_PFRDMAVINVLO(fcn_idx
);
4847 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4848 I40E_GLPES_VFIP4RXDISCARD(fcn_idx
);
4849 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4850 I40E_GLPES_VFIP4RXTRUNC(fcn_idx
);
4851 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4852 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx
);
4853 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4854 I40E_GLPES_VFIP6RXDISCARD(fcn_idx
);
4855 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4856 I40E_GLPES_VFIP6RXTRUNC(fcn_idx
);
4857 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4858 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx
);
4859 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4860 I40E_GLPES_VFTCPRTXSEG(fcn_idx
);
4861 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4862 I40E_GLPES_VFTCPRXOPTERR(fcn_idx
);
4863 stats_table
->stats_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4864 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx
);
4866 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4867 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx
);
4868 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4869 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx
);
4870 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4871 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx
);
4872 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4873 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx
);
4874 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4875 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx
);
4876 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4877 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx
);
4878 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4879 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx
);
4880 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4881 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx
);
4882 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4883 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx
);
4884 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4885 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx
);
4886 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4887 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx
);
4888 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4889 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx
);
4890 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4891 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx
);
4892 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4893 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4894 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4895 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4896 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4897 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx
);
4898 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4899 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx
);
4900 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4901 I40E_GLPES_VFTCPTXSEGLO(fcn_idx
);
4902 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4903 I40E_GLPES_VFRDMARXRDSLO(fcn_idx
);
4904 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4905 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx
);
4906 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4907 I40E_GLPES_VFRDMARXWRSLO(fcn_idx
);
4908 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4909 I40E_GLPES_VFRDMATXRDSLO(fcn_idx
);
4910 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4911 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx
);
4912 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4913 I40E_GLPES_VFRDMATXWRSLO(fcn_idx
);
4914 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4915 I40E_GLPES_VFRDMAVBNDLO(fcn_idx
);
4916 stats_table
->stats_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4917 I40E_GLPES_VFRDMAVINVLO(fcn_idx
);
4920 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4922 stats_reg_offset
= stats_table
->stats_offset_64
[stats_index
];
4923 last_rd_stats
->stats_value_64
[stats_index
] =
4924 readq(stats
->hw
->hw_addr
+ stats_reg_offset
);
4927 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4929 stats_reg_offset
= stats_table
->stats_offset_32
[stats_index
];
4930 last_rd_stats
->stats_value_32
[stats_index
] =
4931 i40iw_rd32(stats
->hw
, stats_reg_offset
);
4936 * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4937 * @stat: pestat struct
4938 * @index: index in HW stats table which contains offset reg-addr
4939 * @value: hw stats value
4941 void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat
*stats
,
4942 enum i40iw_hw_stats_index_32b index
,
4945 struct i40iw_dev_hw_stats_offsets
*stats_table
=
4946 &stats
->hw_stats_offsets
;
4947 struct i40iw_dev_hw_stats
*last_rd_stats
= &stats
->last_read_hw_stats
;
4948 struct i40iw_dev_hw_stats
*hw_stats
= &stats
->hw_stats
;
4949 u64 new_stats_value
= 0;
4950 u32 stats_reg_offset
= stats_table
->stats_offset_32
[index
];
4952 new_stats_value
= i40iw_rd32(stats
->hw
, stats_reg_offset
);
4954 if (new_stats_value
< last_rd_stats
->stats_value_32
[index
])
4955 hw_stats
->stats_value_32
[index
] += new_stats_value
;
4957 hw_stats
->stats_value_32
[index
] +=
4958 new_stats_value
- last_rd_stats
->stats_value_32
[index
];
4959 last_rd_stats
->stats_value_32
[index
] = new_stats_value
;
4960 *value
= hw_stats
->stats_value_32
[index
];
4964 * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4965 * @stats: pestat struct
4966 * @index: index in HW stats table which contains offset reg-addr
4967 * @value: hw stats value
4969 void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat
*stats
,
4970 enum i40iw_hw_stats_index_64b index
,
4973 struct i40iw_dev_hw_stats_offsets
*stats_table
=
4974 &stats
->hw_stats_offsets
;
4975 struct i40iw_dev_hw_stats
*last_rd_stats
= &stats
->last_read_hw_stats
;
4976 struct i40iw_dev_hw_stats
*hw_stats
= &stats
->hw_stats
;
4977 u64 new_stats_value
= 0;
4978 u32 stats_reg_offset
= stats_table
->stats_offset_64
[index
];
4980 new_stats_value
= readq(stats
->hw
->hw_addr
+ stats_reg_offset
);
4982 if (new_stats_value
< last_rd_stats
->stats_value_64
[index
])
4983 hw_stats
->stats_value_64
[index
] += new_stats_value
;
4985 hw_stats
->stats_value_64
[index
] +=
4986 new_stats_value
- last_rd_stats
->stats_value_64
[index
];
4987 last_rd_stats
->stats_value_64
[index
] = new_stats_value
;
4988 *value
= hw_stats
->stats_value_64
[index
];
4992 * i40iw_hw_stats_read_all - read all HW stat counters
4993 * @stats: pestat struct
4994 * @stats_values: hw stats structure
4996 * Read all the HW stat counters and populates hw_stats structure
4997 * of passed-in vsi's pestat as well as copy created in stat_values.
4999 void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat
*stats
,
5000 struct i40iw_dev_hw_stats
*stats_values
)
5003 unsigned long flags
;
5005 spin_lock_irqsave(&stats
->lock
, flags
);
5007 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_32
;
5009 i40iw_hw_stats_read_32(stats
, stats_index
,
5010 &stats_values
->stats_value_32
[stats_index
]);
5011 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_64
;
5013 i40iw_hw_stats_read_64(stats
, stats_index
,
5014 &stats_values
->stats_value_64
[stats_index
]);
5015 spin_unlock_irqrestore(&stats
->lock
, flags
);
5019 * i40iw_hw_stats_refresh_all - Update all HW stats structs
5020 * @stats: pestat struct
5022 * Read all the HW stats counters to refresh values in hw_stats structure
5023 * of passed-in dev's pestat
5025 void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat
*stats
)
5029 unsigned long flags
;
5031 spin_lock_irqsave(&stats
->lock
, flags
);
5033 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_32
;
5035 i40iw_hw_stats_read_32(stats
, stats_index
, &stats_value
);
5036 for (stats_index
= 0; stats_index
< I40IW_HW_STAT_INDEX_MAX_64
;
5038 i40iw_hw_stats_read_64(stats
, stats_index
, &stats_value
);
5039 spin_unlock_irqrestore(&stats
->lock
, flags
);
5043 * i40iw_get_fcn_id - Return the function id
5044 * @dev: pointer to the device
5046 static u8
i40iw_get_fcn_id(struct i40iw_sc_dev
*dev
)
5048 u8 fcn_id
= I40IW_INVALID_FCN_ID
;
5051 for (i
= I40IW_FIRST_NON_PF_STAT
; i
< I40IW_MAX_STATS_COUNT
; i
++)
5052 if (!dev
->fcn_id_array
[i
]) {
5054 dev
->fcn_id_array
[i
] = true;
5061 * i40iw_vsi_stats_init - Initialize the vsi statistics
5062 * @vsi: pointer to the vsi structure
5063 * @info: The info structure used for initialization
5065 enum i40iw_status_code
i40iw_vsi_stats_init(struct i40iw_sc_vsi
*vsi
, struct i40iw_vsi_stats_info
*info
)
5067 u8 fcn_id
= info
->fcn_id
;
5069 if (info
->alloc_fcn_id
)
5070 fcn_id
= i40iw_get_fcn_id(vsi
->dev
);
5072 if (fcn_id
== I40IW_INVALID_FCN_ID
)
5073 return I40IW_ERR_NOT_READY
;
5075 vsi
->pestat
= info
->pestat
;
5076 vsi
->pestat
->hw
= vsi
->dev
->hw
;
5077 vsi
->pestat
->vsi
= vsi
;
5079 if (info
->stats_initialize
) {
5080 i40iw_hw_stats_init(vsi
->pestat
, fcn_id
, true);
5081 spin_lock_init(&vsi
->pestat
->lock
);
5082 i40iw_hw_stats_start_timer(vsi
);
5084 vsi
->stats_fcn_id_alloc
= info
->alloc_fcn_id
;
5085 vsi
->fcn_id
= fcn_id
;
5086 return I40IW_SUCCESS
;
5090 * i40iw_vsi_stats_free - Free the vsi stats
5091 * @vsi: pointer to the vsi structure
5093 void i40iw_vsi_stats_free(struct i40iw_sc_vsi
*vsi
)
5095 u8 fcn_id
= vsi
->fcn_id
;
5097 if (vsi
->stats_fcn_id_alloc
&& fcn_id
< I40IW_MAX_STATS_COUNT
)
5098 vsi
->dev
->fcn_id_array
[fcn_id
] = false;
5099 i40iw_hw_stats_stop_timer(vsi
);
5102 static struct i40iw_cqp_ops iw_cqp_ops
= {
5103 .cqp_init
= i40iw_sc_cqp_init
,
5104 .cqp_create
= i40iw_sc_cqp_create
,
5105 .cqp_post_sq
= i40iw_sc_cqp_post_sq
,
5106 .cqp_get_next_send_wqe
= i40iw_sc_cqp_get_next_send_wqe
,
5107 .cqp_destroy
= i40iw_sc_cqp_destroy
,
5108 .poll_for_cqp_op_done
= i40iw_sc_poll_for_cqp_op_done
5111 static struct i40iw_ccq_ops iw_ccq_ops
= {
5112 .ccq_init
= i40iw_sc_ccq_init
,
5113 .ccq_create
= i40iw_sc_ccq_create
,
5114 .ccq_destroy
= i40iw_sc_ccq_destroy
,
5115 .ccq_create_done
= i40iw_sc_ccq_create_done
,
5116 .ccq_get_cqe_info
= i40iw_sc_ccq_get_cqe_info
,
5117 .ccq_arm
= i40iw_sc_ccq_arm
5120 static struct i40iw_ceq_ops iw_ceq_ops
= {
5121 .ceq_init
= i40iw_sc_ceq_init
,
5122 .ceq_create
= i40iw_sc_ceq_create
,
5123 .cceq_create_done
= i40iw_sc_cceq_create_done
,
5124 .cceq_destroy_done
= i40iw_sc_cceq_destroy_done
,
5125 .cceq_create
= i40iw_sc_cceq_create
,
5126 .ceq_destroy
= i40iw_sc_ceq_destroy
,
5127 .process_ceq
= i40iw_sc_process_ceq
5130 static struct i40iw_aeq_ops iw_aeq_ops
= {
5131 .aeq_init
= i40iw_sc_aeq_init
,
5132 .aeq_create
= i40iw_sc_aeq_create
,
5133 .aeq_destroy
= i40iw_sc_aeq_destroy
,
5134 .get_next_aeqe
= i40iw_sc_get_next_aeqe
,
5135 .repost_aeq_entries
= i40iw_sc_repost_aeq_entries
,
5136 .aeq_create_done
= i40iw_sc_aeq_create_done
,
5137 .aeq_destroy_done
= i40iw_sc_aeq_destroy_done
5141 static struct i40iw_pd_ops iw_pd_ops
= {
5142 .pd_init
= i40iw_sc_pd_init
,
5145 static struct i40iw_priv_qp_ops iw_priv_qp_ops
= {
5146 .qp_init
= i40iw_sc_qp_init
,
5147 .qp_create
= i40iw_sc_qp_create
,
5148 .qp_modify
= i40iw_sc_qp_modify
,
5149 .qp_destroy
= i40iw_sc_qp_destroy
,
5150 .qp_flush_wqes
= i40iw_sc_qp_flush_wqes
,
5151 .qp_upload_context
= i40iw_sc_qp_upload_context
,
5152 .qp_setctx
= i40iw_sc_qp_setctx
,
5153 .qp_send_lsmm
= i40iw_sc_send_lsmm
,
5154 .qp_send_lsmm_nostag
= i40iw_sc_send_lsmm_nostag
,
5155 .qp_send_rtt
= i40iw_sc_send_rtt
,
5156 .qp_post_wqe0
= i40iw_sc_post_wqe0
,
5157 .iw_mr_fast_register
= i40iw_sc_mr_fast_register
5160 static struct i40iw_priv_cq_ops iw_priv_cq_ops
= {
5161 .cq_init
= i40iw_sc_cq_init
,
5162 .cq_create
= i40iw_sc_cq_create
,
5163 .cq_destroy
= i40iw_sc_cq_destroy
,
5164 .cq_modify
= i40iw_sc_cq_modify
,
5167 static struct i40iw_mr_ops iw_mr_ops
= {
5168 .alloc_stag
= i40iw_sc_alloc_stag
,
5169 .mr_reg_non_shared
= i40iw_sc_mr_reg_non_shared
,
5170 .mr_reg_shared
= i40iw_sc_mr_reg_shared
,
5171 .dealloc_stag
= i40iw_sc_dealloc_stag
,
5172 .query_stag
= i40iw_sc_query_stag
,
5173 .mw_alloc
= i40iw_sc_mw_alloc
5176 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops
= {
5177 .manage_push_page
= i40iw_sc_manage_push_page
,
5178 .manage_hmc_pm_func_table
= i40iw_sc_manage_hmc_pm_func_table
,
5179 .set_hmc_resource_profile
= i40iw_sc_set_hmc_resource_profile
,
5180 .commit_fpm_values
= i40iw_sc_commit_fpm_values
,
5181 .query_fpm_values
= i40iw_sc_query_fpm_values
,
5182 .static_hmc_pages_allocated
= i40iw_sc_static_hmc_pages_allocated
,
5183 .add_arp_cache_entry
= i40iw_sc_add_arp_cache_entry
,
5184 .del_arp_cache_entry
= i40iw_sc_del_arp_cache_entry
,
5185 .query_arp_cache_entry
= i40iw_sc_query_arp_cache_entry
,
5186 .manage_apbvt_entry
= i40iw_sc_manage_apbvt_entry
,
5187 .manage_qhash_table_entry
= i40iw_sc_manage_qhash_table_entry
,
5188 .alloc_local_mac_ipaddr_table_entry
= i40iw_sc_alloc_local_mac_ipaddr_entry
,
5189 .add_local_mac_ipaddr_entry
= i40iw_sc_add_local_mac_ipaddr_entry
,
5190 .del_local_mac_ipaddr_entry
= i40iw_sc_del_local_mac_ipaddr_entry
,
5191 .cqp_nop
= i40iw_sc_cqp_nop
,
5192 .commit_fpm_values_done
= i40iw_sc_commit_fpm_values_done
,
5193 .query_fpm_values_done
= i40iw_sc_query_fpm_values_done
,
5194 .manage_hmc_pm_func_table_done
= i40iw_sc_manage_hmc_pm_func_table_done
,
5195 .update_suspend_qp
= i40iw_sc_suspend_qp
,
5196 .update_resume_qp
= i40iw_sc_resume_qp
5199 static struct i40iw_hmc_ops iw_hmc_ops
= {
5200 .init_iw_hmc
= i40iw_sc_init_iw_hmc
,
5201 .parse_fpm_query_buf
= i40iw_sc_parse_fpm_query_buf
,
5202 .configure_iw_fpm
= i40iw_sc_configure_iw_fpm
,
5203 .parse_fpm_commit_buf
= i40iw_sc_parse_fpm_commit_buf
,
5204 .create_hmc_object
= i40iw_sc_create_hmc_obj
,
5205 .del_hmc_object
= i40iw_sc_del_hmc_obj
5209 * i40iw_device_init - Initialize IWARP device
5210 * @dev: IWARP device pointer
5211 * @info: IWARP init info
5213 enum i40iw_status_code
i40iw_device_init(struct i40iw_sc_dev
*dev
,
5214 struct i40iw_device_init_info
*info
)
5219 enum i40iw_status_code ret_code
= 0;
5222 spin_lock_init(&dev
->cqp_lock
);
5224 i40iw_device_init_uk(&dev
->dev_uk
);
5226 dev
->debug_mask
= info
->debug_mask
;
5228 dev
->hmc_fn_id
= info
->hmc_fn_id
;
5229 dev
->is_pf
= info
->is_pf
;
5231 dev
->fpm_query_buf_pa
= info
->fpm_query_buf_pa
;
5232 dev
->fpm_query_buf
= info
->fpm_query_buf
;
5234 dev
->fpm_commit_buf_pa
= info
->fpm_commit_buf_pa
;
5235 dev
->fpm_commit_buf
= info
->fpm_commit_buf
;
5238 dev
->hw
->hw_addr
= info
->bar0
;
5241 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_DREVID
);
5242 dev
->hw_rev
= (u8
)RS_32(val
, I40E_GLPCI_DREVID_DEFAULT_REVID
);
5244 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_LBARCTRL
);
5245 db_size
= (u8
)RS_32(val
, I40E_GLPCI_LBARCTRL_PE_DB_SIZE
);
5246 if ((db_size
!= I40IW_PE_DB_SIZE_4M
) &&
5247 (db_size
!= I40IW_PE_DB_SIZE_8M
)) {
5248 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
5249 "%s: PE doorbell is not enabled in CSR val 0x%x\n",
5251 ret_code
= I40IW_ERR_PE_DOORBELL_NOT_ENABLED
;
5254 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_DB_ADDR_OFFSET
;
5255 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_pf
;
5257 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_VF_DB_ADDR_OFFSET
;
5260 dev
->cqp_ops
= &iw_cqp_ops
;
5261 dev
->ccq_ops
= &iw_ccq_ops
;
5262 dev
->ceq_ops
= &iw_ceq_ops
;
5263 dev
->aeq_ops
= &iw_aeq_ops
;
5264 dev
->cqp_misc_ops
= &iw_cqp_misc_ops
;
5265 dev
->iw_pd_ops
= &iw_pd_ops
;
5266 dev
->iw_priv_qp_ops
= &iw_priv_qp_ops
;
5267 dev
->iw_priv_cq_ops
= &iw_priv_cq_ops
;
5268 dev
->mr_ops
= &iw_mr_ops
;
5269 dev
->hmc_ops
= &iw_hmc_ops
;
5270 dev
->vchnl_if
.vchnl_send
= info
->vchnl_send
;
5271 if (dev
->vchnl_if
.vchnl_send
)
5272 dev
->vchnl_up
= true;
5274 dev
->vchnl_up
= false;
5276 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_vf
;
5277 ret_code
= i40iw_vchnl_vf_get_ver(dev
, &vchnl_ver
);
5279 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
5280 "%s: Get Channel version rc = 0x%0x, version is %u\n",
5281 __func__
, ret_code
, vchnl_ver
);
5282 ret_code
= i40iw_vchnl_vf_get_hmc_fcn(dev
, &hmc_fcn
);
5284 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
5285 "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5286 __func__
, ret_code
, hmc_fcn
);
5287 dev
->hmc_fn_id
= (u8
)hmc_fcn
;
5291 dev
->iw_vf_cqp_ops
= &iw_vf_cqp_ops
;