1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
6 #include <linux/sort.h>
7 #include <linux/string.h>
11 #include "cxgb4_cudbg.h"
13 #include "cudbg_lib_common.h"
14 #include "cudbg_entity.h"
15 #include "cudbg_lib.h"
16 #include "cudbg_zlib.h"
18 static int cudbg_do_compression(struct cudbg_init
*pdbg_init
,
19 struct cudbg_buffer
*pin_buff
,
20 struct cudbg_buffer
*dbg_buff
)
22 struct cudbg_buffer temp_in_buff
= { 0 };
23 int bytes_left
, bytes_read
, bytes
;
24 u32 offset
= dbg_buff
->offset
;
27 temp_in_buff
.offset
= pin_buff
->offset
;
28 temp_in_buff
.data
= pin_buff
->data
;
29 temp_in_buff
.size
= pin_buff
->size
;
31 bytes_left
= pin_buff
->size
;
33 while (bytes_left
> 0) {
34 /* Do compression in smaller chunks */
35 bytes
= min_t(unsigned long, bytes_left
,
36 (unsigned long)CUDBG_CHUNK_SIZE
);
37 temp_in_buff
.data
= (char *)pin_buff
->data
+ bytes_read
;
38 temp_in_buff
.size
= bytes
;
39 rc
= cudbg_compress_buff(pdbg_init
, &temp_in_buff
, dbg_buff
);
46 pin_buff
->size
= dbg_buff
->offset
- offset
;
50 static int cudbg_write_and_release_buff(struct cudbg_init
*pdbg_init
,
51 struct cudbg_buffer
*pin_buff
,
52 struct cudbg_buffer
*dbg_buff
)
56 if (pdbg_init
->compress_type
== CUDBG_COMPRESSION_NONE
) {
57 cudbg_update_buff(pin_buff
, dbg_buff
);
59 rc
= cudbg_do_compression(pdbg_init
, pin_buff
, dbg_buff
);
65 cudbg_put_buff(pdbg_init
, pin_buff
);
69 static int is_fw_attached(struct cudbg_init
*pdbg_init
)
71 struct adapter
*padap
= pdbg_init
->adap
;
73 if (!(padap
->flags
& CXGB4_FW_OK
) || padap
->use_bd
)
79 /* This function will add additional padding bytes into debug_buffer to make it
82 void cudbg_align_debug_buffer(struct cudbg_buffer
*dbg_buff
,
83 struct cudbg_entity_hdr
*entity_hdr
)
88 remain
= (dbg_buff
->offset
- entity_hdr
->start_offset
) % 4;
91 memcpy(((u8
*)dbg_buff
->data
) + dbg_buff
->offset
, &zero_buf
,
93 dbg_buff
->offset
+= padding
;
94 entity_hdr
->num_pad
= padding
;
96 entity_hdr
->size
= dbg_buff
->offset
- entity_hdr
->start_offset
;
99 struct cudbg_entity_hdr
*cudbg_get_entity_hdr(void *outbuf
, int i
)
101 struct cudbg_hdr
*cudbg_hdr
= (struct cudbg_hdr
*)outbuf
;
103 return (struct cudbg_entity_hdr
*)
104 ((char *)outbuf
+ cudbg_hdr
->hdr_len
+
105 (sizeof(struct cudbg_entity_hdr
) * (i
- 1)));
108 static int cudbg_read_vpd_reg(struct adapter
*padap
, u32 addr
, u32 len
,
113 vaddr
= t4_eeprom_ptov(addr
, padap
->pf
, EEPROMPFSIZE
);
117 rc
= pci_read_vpd(padap
->pdev
, vaddr
, len
, dest
);
124 static int cudbg_mem_desc_cmp(const void *a
, const void *b
)
126 return ((const struct cudbg_mem_desc
*)a
)->base
-
127 ((const struct cudbg_mem_desc
*)b
)->base
;
130 int cudbg_fill_meminfo(struct adapter
*padap
,
131 struct cudbg_meminfo
*meminfo_buff
)
133 struct cudbg_mem_desc
*md
;
134 u32 lo
, hi
, used
, alloc
;
137 memset(meminfo_buff
->avail
, 0,
138 ARRAY_SIZE(meminfo_buff
->avail
) *
139 sizeof(struct cudbg_mem_desc
));
140 memset(meminfo_buff
->mem
, 0,
141 (ARRAY_SIZE(cudbg_region
) + 3) * sizeof(struct cudbg_mem_desc
));
142 md
= meminfo_buff
->mem
;
144 for (i
= 0; i
< ARRAY_SIZE(meminfo_buff
->mem
); i
++) {
145 meminfo_buff
->mem
[i
].limit
= 0;
146 meminfo_buff
->mem
[i
].idx
= i
;
149 /* Find and sort the populated memory ranges */
151 lo
= t4_read_reg(padap
, MA_TARGET_MEM_ENABLE_A
);
152 if (lo
& EDRAM0_ENABLE_F
) {
153 hi
= t4_read_reg(padap
, MA_EDRAM0_BAR_A
);
154 meminfo_buff
->avail
[i
].base
=
155 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi
));
156 meminfo_buff
->avail
[i
].limit
=
157 meminfo_buff
->avail
[i
].base
+
158 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi
));
159 meminfo_buff
->avail
[i
].idx
= 0;
163 if (lo
& EDRAM1_ENABLE_F
) {
164 hi
= t4_read_reg(padap
, MA_EDRAM1_BAR_A
);
165 meminfo_buff
->avail
[i
].base
=
166 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi
));
167 meminfo_buff
->avail
[i
].limit
=
168 meminfo_buff
->avail
[i
].base
+
169 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi
));
170 meminfo_buff
->avail
[i
].idx
= 1;
174 if (is_t5(padap
->params
.chip
)) {
175 if (lo
& EXT_MEM0_ENABLE_F
) {
176 hi
= t4_read_reg(padap
, MA_EXT_MEMORY0_BAR_A
);
177 meminfo_buff
->avail
[i
].base
=
178 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
179 meminfo_buff
->avail
[i
].limit
=
180 meminfo_buff
->avail
[i
].base
+
181 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
182 meminfo_buff
->avail
[i
].idx
= 3;
186 if (lo
& EXT_MEM1_ENABLE_F
) {
187 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
188 meminfo_buff
->avail
[i
].base
=
189 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
190 meminfo_buff
->avail
[i
].limit
=
191 meminfo_buff
->avail
[i
].base
+
192 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
193 meminfo_buff
->avail
[i
].idx
= 4;
197 if (lo
& EXT_MEM_ENABLE_F
) {
198 hi
= t4_read_reg(padap
, MA_EXT_MEMORY_BAR_A
);
199 meminfo_buff
->avail
[i
].base
=
200 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
201 meminfo_buff
->avail
[i
].limit
=
202 meminfo_buff
->avail
[i
].base
+
203 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
204 meminfo_buff
->avail
[i
].idx
= 2;
208 if (lo
& HMA_MUX_F
) {
209 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
210 meminfo_buff
->avail
[i
].base
=
211 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
212 meminfo_buff
->avail
[i
].limit
=
213 meminfo_buff
->avail
[i
].base
+
214 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
215 meminfo_buff
->avail
[i
].idx
= 5;
220 if (!i
) /* no memory available */
221 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
223 meminfo_buff
->avail_c
= i
;
224 sort(meminfo_buff
->avail
, i
, sizeof(struct cudbg_mem_desc
),
225 cudbg_mem_desc_cmp
, NULL
);
226 (md
++)->base
= t4_read_reg(padap
, SGE_DBQ_CTXT_BADDR_A
);
227 (md
++)->base
= t4_read_reg(padap
, SGE_IMSG_CTXT_BADDR_A
);
228 (md
++)->base
= t4_read_reg(padap
, SGE_FLM_CACHE_BADDR_A
);
229 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TCB_BASE_A
);
230 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_BASE_A
);
231 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TIMER_BASE_A
);
232 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_RX_FLST_BASE_A
);
233 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_TX_FLST_BASE_A
);
234 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_PS_FLST_BASE_A
);
236 /* the next few have explicit upper bounds */
237 md
->base
= t4_read_reg(padap
, TP_PMM_TX_BASE_A
);
238 md
->limit
= md
->base
- 1 +
239 t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
) *
240 PMTXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
));
243 md
->base
= t4_read_reg(padap
, TP_PMM_RX_BASE_A
);
244 md
->limit
= md
->base
- 1 +
245 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) *
246 PMRXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
));
249 if (t4_read_reg(padap
, LE_DB_CONFIG_A
) & HASHEN_F
) {
250 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) <= CHELSIO_T5
) {
251 hi
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
) / 4;
252 md
->base
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
254 hi
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
255 md
->base
= t4_read_reg(padap
,
256 LE_DB_HASH_TBL_BASE_ADDR_A
);
261 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
265 #define ulp_region(reg) do { \
266 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
267 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
270 ulp_region(RX_ISCSI
);
275 ulp_region(RX_RQUDP
);
280 md
->idx
= ARRAY_SIZE(cudbg_region
);
281 if (!is_t4(padap
->params
.chip
)) {
282 u32 fifo_size
= t4_read_reg(padap
, SGE_DBVFIFO_SIZE_A
);
283 u32 sge_ctrl
= t4_read_reg(padap
, SGE_CONTROL2_A
);
286 if (is_t5(padap
->params
.chip
)) {
287 if (sge_ctrl
& VFIFO_ENABLE_F
)
288 size
= DBVFIFO_SIZE_G(fifo_size
);
290 size
= T6_DBVFIFO_SIZE_G(fifo_size
);
294 md
->base
= BASEADDR_G(t4_read_reg(padap
,
295 SGE_DBVFIFO_BADDR_A
));
296 md
->limit
= md
->base
+ (size
<< 2) - 1;
302 md
->base
= t4_read_reg(padap
, ULP_RX_CTX_BASE_A
);
305 md
->base
= t4_read_reg(padap
, ULP_TX_ERR_TABLE_BASE_A
);
309 md
->base
= padap
->vres
.ocq
.start
;
310 if (padap
->vres
.ocq
.size
)
311 md
->limit
= md
->base
+ padap
->vres
.ocq
.size
- 1;
313 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
316 /* add any address-space holes, there can be up to 3 */
317 for (n
= 0; n
< i
- 1; n
++)
318 if (meminfo_buff
->avail
[n
].limit
<
319 meminfo_buff
->avail
[n
+ 1].base
)
320 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
322 if (meminfo_buff
->avail
[n
].limit
)
323 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
325 n
= md
- meminfo_buff
->mem
;
326 meminfo_buff
->mem_c
= n
;
328 sort(meminfo_buff
->mem
, n
, sizeof(struct cudbg_mem_desc
),
329 cudbg_mem_desc_cmp
, NULL
);
331 lo
= t4_read_reg(padap
, CIM_SDRAM_BASE_ADDR_A
);
332 hi
= t4_read_reg(padap
, CIM_SDRAM_ADDR_SIZE_A
) + lo
- 1;
333 meminfo_buff
->up_ram_lo
= lo
;
334 meminfo_buff
->up_ram_hi
= hi
;
336 lo
= t4_read_reg(padap
, CIM_EXTMEM2_BASE_ADDR_A
);
337 hi
= t4_read_reg(padap
, CIM_EXTMEM2_ADDR_SIZE_A
) + lo
- 1;
338 meminfo_buff
->up_extmem2_lo
= lo
;
339 meminfo_buff
->up_extmem2_hi
= hi
;
341 lo
= t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
);
342 for (i
= 0, meminfo_buff
->free_rx_cnt
= 0; i
< 2; i
++)
343 meminfo_buff
->free_rx_cnt
+=
344 FREERXPAGECOUNT_G(t4_read_reg(padap
,
345 TP_FLM_FREE_RX_CNT_A
));
347 meminfo_buff
->rx_pages_data
[0] = PMRXMAXPAGE_G(lo
);
348 meminfo_buff
->rx_pages_data
[1] =
349 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) >> 10;
350 meminfo_buff
->rx_pages_data
[2] = (lo
& PMRXNUMCHN_F
) ? 2 : 1;
352 lo
= t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
);
353 hi
= t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
);
354 for (i
= 0, meminfo_buff
->free_tx_cnt
= 0; i
< 4; i
++)
355 meminfo_buff
->free_tx_cnt
+=
356 FREETXPAGECOUNT_G(t4_read_reg(padap
,
357 TP_FLM_FREE_TX_CNT_A
));
359 meminfo_buff
->tx_pages_data
[0] = PMTXMAXPAGE_G(lo
);
360 meminfo_buff
->tx_pages_data
[1] =
361 hi
>= (1 << 20) ? (hi
>> 20) : (hi
>> 10);
362 meminfo_buff
->tx_pages_data
[2] =
363 hi
>= (1 << 20) ? 'M' : 'K';
364 meminfo_buff
->tx_pages_data
[3] = 1 << PMTXNUMCHN_G(lo
);
366 meminfo_buff
->p_structs
= t4_read_reg(padap
, TP_CMM_MM_MAX_PSTRUCT_A
);
367 meminfo_buff
->p_structs_free_cnt
=
368 FREEPSTRUCTCOUNT_G(t4_read_reg(padap
, TP_FLM_FREE_PS_CNT_A
));
370 for (i
= 0; i
< 4; i
++) {
371 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
372 lo
= t4_read_reg(padap
,
373 MPS_RX_MAC_BG_PG_CNT0_A
+ i
* 4);
375 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV0_A
+ i
* 4);
376 if (is_t5(padap
->params
.chip
)) {
377 used
= T5_USED_G(lo
);
378 alloc
= T5_ALLOC_G(lo
);
383 meminfo_buff
->port_used
[i
] = used
;
384 meminfo_buff
->port_alloc
[i
] = alloc
;
387 for (i
= 0; i
< padap
->params
.arch
.nchan
; i
++) {
388 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
389 lo
= t4_read_reg(padap
,
390 MPS_RX_LPBK_BG_PG_CNT0_A
+ i
* 4);
392 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV4_A
+ i
* 4);
393 if (is_t5(padap
->params
.chip
)) {
394 used
= T5_USED_G(lo
);
395 alloc
= T5_ALLOC_G(lo
);
400 meminfo_buff
->loopback_used
[i
] = used
;
401 meminfo_buff
->loopback_alloc
[i
] = alloc
;
407 int cudbg_collect_reg_dump(struct cudbg_init
*pdbg_init
,
408 struct cudbg_buffer
*dbg_buff
,
409 struct cudbg_error
*cudbg_err
)
411 struct adapter
*padap
= pdbg_init
->adap
;
412 struct cudbg_buffer temp_buff
= { 0 };
416 if (is_t4(padap
->params
.chip
))
417 buf_size
= T4_REGMAP_SIZE
;
418 else if (is_t5(padap
->params
.chip
) || is_t6(padap
->params
.chip
))
419 buf_size
= T5_REGMAP_SIZE
;
421 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, buf_size
, &temp_buff
);
424 t4_get_regs(padap
, (void *)temp_buff
.data
, temp_buff
.size
);
425 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
428 int cudbg_collect_fw_devlog(struct cudbg_init
*pdbg_init
,
429 struct cudbg_buffer
*dbg_buff
,
430 struct cudbg_error
*cudbg_err
)
432 struct adapter
*padap
= pdbg_init
->adap
;
433 struct cudbg_buffer temp_buff
= { 0 };
434 struct devlog_params
*dparams
;
437 rc
= t4_init_devlog_params(padap
);
439 cudbg_err
->sys_err
= rc
;
443 dparams
= &padap
->params
.devlog
;
444 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, dparams
->size
, &temp_buff
);
448 /* Collect FW devlog */
449 if (dparams
->start
!= 0) {
450 spin_lock(&padap
->win0_lock
);
451 rc
= t4_memory_rw(padap
, padap
->params
.drv_memwin
,
452 dparams
->memtype
, dparams
->start
,
454 (__be32
*)(char *)temp_buff
.data
,
456 spin_unlock(&padap
->win0_lock
);
458 cudbg_err
->sys_err
= rc
;
459 cudbg_put_buff(pdbg_init
, &temp_buff
);
463 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
466 int cudbg_collect_cim_la(struct cudbg_init
*pdbg_init
,
467 struct cudbg_buffer
*dbg_buff
,
468 struct cudbg_error
*cudbg_err
)
470 struct adapter
*padap
= pdbg_init
->adap
;
471 struct cudbg_buffer temp_buff
= { 0 };
475 if (is_t6(padap
->params
.chip
)) {
476 size
= padap
->params
.cim_la_size
/ 10 + 1;
477 size
*= 10 * sizeof(u32
);
479 size
= padap
->params
.cim_la_size
/ 8;
480 size
*= 8 * sizeof(u32
);
484 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
488 rc
= t4_cim_read(padap
, UP_UP_DBG_LA_CFG_A
, 1, &cfg
);
490 cudbg_err
->sys_err
= rc
;
491 cudbg_put_buff(pdbg_init
, &temp_buff
);
495 memcpy((char *)temp_buff
.data
, &cfg
, sizeof(cfg
));
496 rc
= t4_cim_read_la(padap
,
497 (u32
*)((char *)temp_buff
.data
+ sizeof(cfg
)),
500 cudbg_err
->sys_err
= rc
;
501 cudbg_put_buff(pdbg_init
, &temp_buff
);
504 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
507 int cudbg_collect_cim_ma_la(struct cudbg_init
*pdbg_init
,
508 struct cudbg_buffer
*dbg_buff
,
509 struct cudbg_error
*cudbg_err
)
511 struct adapter
*padap
= pdbg_init
->adap
;
512 struct cudbg_buffer temp_buff
= { 0 };
515 size
= 2 * CIM_MALA_SIZE
* 5 * sizeof(u32
);
516 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
520 t4_cim_read_ma_la(padap
,
521 (u32
*)temp_buff
.data
,
522 (u32
*)((char *)temp_buff
.data
+
524 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
527 int cudbg_collect_cim_qcfg(struct cudbg_init
*pdbg_init
,
528 struct cudbg_buffer
*dbg_buff
,
529 struct cudbg_error
*cudbg_err
)
531 struct adapter
*padap
= pdbg_init
->adap
;
532 struct cudbg_buffer temp_buff
= { 0 };
533 struct cudbg_cim_qcfg
*cim_qcfg_data
;
536 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_cim_qcfg
),
541 cim_qcfg_data
= (struct cudbg_cim_qcfg
*)temp_buff
.data
;
542 cim_qcfg_data
->chip
= padap
->params
.chip
;
543 rc
= t4_cim_read(padap
, UP_IBQ_0_RDADDR_A
,
544 ARRAY_SIZE(cim_qcfg_data
->stat
), cim_qcfg_data
->stat
);
546 cudbg_err
->sys_err
= rc
;
547 cudbg_put_buff(pdbg_init
, &temp_buff
);
551 rc
= t4_cim_read(padap
, UP_OBQ_0_REALADDR_A
,
552 ARRAY_SIZE(cim_qcfg_data
->obq_wr
),
553 cim_qcfg_data
->obq_wr
);
555 cudbg_err
->sys_err
= rc
;
556 cudbg_put_buff(pdbg_init
, &temp_buff
);
560 t4_read_cimq_cfg(padap
, cim_qcfg_data
->base
, cim_qcfg_data
->size
,
561 cim_qcfg_data
->thres
);
562 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
565 static int cudbg_read_cim_ibq(struct cudbg_init
*pdbg_init
,
566 struct cudbg_buffer
*dbg_buff
,
567 struct cudbg_error
*cudbg_err
, int qid
)
569 struct adapter
*padap
= pdbg_init
->adap
;
570 struct cudbg_buffer temp_buff
= { 0 };
571 int no_of_read_words
, rc
= 0;
574 /* collect CIM IBQ */
575 qsize
= CIM_IBQ_SIZE
* 4 * sizeof(u32
);
576 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
580 /* t4_read_cim_ibq will return no. of read words or error */
581 no_of_read_words
= t4_read_cim_ibq(padap
, qid
,
582 (u32
*)temp_buff
.data
, qsize
);
583 /* no_of_read_words is less than or equal to 0 means error */
584 if (no_of_read_words
<= 0) {
585 if (!no_of_read_words
)
586 rc
= CUDBG_SYSTEM_ERROR
;
588 rc
= no_of_read_words
;
589 cudbg_err
->sys_err
= rc
;
590 cudbg_put_buff(pdbg_init
, &temp_buff
);
593 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
596 int cudbg_collect_cim_ibq_tp0(struct cudbg_init
*pdbg_init
,
597 struct cudbg_buffer
*dbg_buff
,
598 struct cudbg_error
*cudbg_err
)
600 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
603 int cudbg_collect_cim_ibq_tp1(struct cudbg_init
*pdbg_init
,
604 struct cudbg_buffer
*dbg_buff
,
605 struct cudbg_error
*cudbg_err
)
607 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
610 int cudbg_collect_cim_ibq_ulp(struct cudbg_init
*pdbg_init
,
611 struct cudbg_buffer
*dbg_buff
,
612 struct cudbg_error
*cudbg_err
)
614 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
617 int cudbg_collect_cim_ibq_sge0(struct cudbg_init
*pdbg_init
,
618 struct cudbg_buffer
*dbg_buff
,
619 struct cudbg_error
*cudbg_err
)
621 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
624 int cudbg_collect_cim_ibq_sge1(struct cudbg_init
*pdbg_init
,
625 struct cudbg_buffer
*dbg_buff
,
626 struct cudbg_error
*cudbg_err
)
628 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
631 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init
*pdbg_init
,
632 struct cudbg_buffer
*dbg_buff
,
633 struct cudbg_error
*cudbg_err
)
635 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
638 u32
cudbg_cim_obq_size(struct adapter
*padap
, int qid
)
642 t4_write_reg(padap
, CIM_QUEUE_CONFIG_REF_A
, OBQSELECT_F
|
643 QUENUMSELECT_V(qid
));
644 value
= t4_read_reg(padap
, CIM_QUEUE_CONFIG_CTRL_A
);
645 value
= CIMQSIZE_G(value
) * 64; /* size in number of words */
646 return value
* sizeof(u32
);
649 static int cudbg_read_cim_obq(struct cudbg_init
*pdbg_init
,
650 struct cudbg_buffer
*dbg_buff
,
651 struct cudbg_error
*cudbg_err
, int qid
)
653 struct adapter
*padap
= pdbg_init
->adap
;
654 struct cudbg_buffer temp_buff
= { 0 };
655 int no_of_read_words
, rc
= 0;
658 /* collect CIM OBQ */
659 qsize
= cudbg_cim_obq_size(padap
, qid
);
660 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
664 /* t4_read_cim_obq will return no. of read words or error */
665 no_of_read_words
= t4_read_cim_obq(padap
, qid
,
666 (u32
*)temp_buff
.data
, qsize
);
667 /* no_of_read_words is less than or equal to 0 means error */
668 if (no_of_read_words
<= 0) {
669 if (!no_of_read_words
)
670 rc
= CUDBG_SYSTEM_ERROR
;
672 rc
= no_of_read_words
;
673 cudbg_err
->sys_err
= rc
;
674 cudbg_put_buff(pdbg_init
, &temp_buff
);
677 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
680 int cudbg_collect_cim_obq_ulp0(struct cudbg_init
*pdbg_init
,
681 struct cudbg_buffer
*dbg_buff
,
682 struct cudbg_error
*cudbg_err
)
684 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
687 int cudbg_collect_cim_obq_ulp1(struct cudbg_init
*pdbg_init
,
688 struct cudbg_buffer
*dbg_buff
,
689 struct cudbg_error
*cudbg_err
)
691 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
694 int cudbg_collect_cim_obq_ulp2(struct cudbg_init
*pdbg_init
,
695 struct cudbg_buffer
*dbg_buff
,
696 struct cudbg_error
*cudbg_err
)
698 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
701 int cudbg_collect_cim_obq_ulp3(struct cudbg_init
*pdbg_init
,
702 struct cudbg_buffer
*dbg_buff
,
703 struct cudbg_error
*cudbg_err
)
705 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
708 int cudbg_collect_cim_obq_sge(struct cudbg_init
*pdbg_init
,
709 struct cudbg_buffer
*dbg_buff
,
710 struct cudbg_error
*cudbg_err
)
712 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
715 int cudbg_collect_cim_obq_ncsi(struct cudbg_init
*pdbg_init
,
716 struct cudbg_buffer
*dbg_buff
,
717 struct cudbg_error
*cudbg_err
)
719 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
722 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init
*pdbg_init
,
723 struct cudbg_buffer
*dbg_buff
,
724 struct cudbg_error
*cudbg_err
)
726 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 6);
729 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init
*pdbg_init
,
730 struct cudbg_buffer
*dbg_buff
,
731 struct cudbg_error
*cudbg_err
)
733 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 7);
736 static int cudbg_meminfo_get_mem_index(struct adapter
*padap
,
737 struct cudbg_meminfo
*mem_info
,
738 u8 mem_type
, u8
*idx
)
750 /* Some T5 cards have both MC0 and MC1. */
751 flag
= is_t5(padap
->params
.chip
) ? MC0_FLAG
: MC_FLAG
;
760 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
763 for (i
= 0; i
< mem_info
->avail_c
; i
++) {
764 if (mem_info
->avail
[i
].idx
== flag
) {
770 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
773 /* Fetch the @region_name's start and end from @meminfo. */
774 static int cudbg_get_mem_region(struct adapter
*padap
,
775 struct cudbg_meminfo
*meminfo
,
776 u8 mem_type
, const char *region_name
,
777 struct cudbg_mem_desc
*mem_desc
)
783 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc
);
787 i
= match_string(cudbg_region
, ARRAY_SIZE(cudbg_region
), region_name
);
792 for (i
= 0; i
< meminfo
->mem_c
; i
++) {
793 if (meminfo
->mem
[i
].idx
>= ARRAY_SIZE(cudbg_region
))
794 continue; /* Skip holes */
796 if (!(meminfo
->mem
[i
].limit
))
797 meminfo
->mem
[i
].limit
=
798 i
< meminfo
->mem_c
- 1 ?
799 meminfo
->mem
[i
+ 1].base
- 1 : ~0;
801 if (meminfo
->mem
[i
].idx
== idx
) {
802 /* Check if the region exists in @mem_type memory */
803 if (meminfo
->mem
[i
].base
< meminfo
->avail
[mc
].base
&&
804 meminfo
->mem
[i
].limit
< meminfo
->avail
[mc
].base
)
807 if (meminfo
->mem
[i
].base
> meminfo
->avail
[mc
].limit
)
810 memcpy(mem_desc
, &meminfo
->mem
[i
],
811 sizeof(struct cudbg_mem_desc
));
822 /* Fetch and update the start and end of the requested memory region w.r.t 0
823 * in the corresponding EDC/MC/HMA.
825 static int cudbg_get_mem_relative(struct adapter
*padap
,
826 struct cudbg_meminfo
*meminfo
,
827 u8 mem_type
, u32
*out_base
, u32
*out_end
)
832 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc_idx
);
836 if (*out_base
< meminfo
->avail
[mc_idx
].base
)
839 *out_base
-= meminfo
->avail
[mc_idx
].base
;
841 if (*out_end
> meminfo
->avail
[mc_idx
].limit
)
842 *out_end
= meminfo
->avail
[mc_idx
].limit
;
844 *out_end
-= meminfo
->avail
[mc_idx
].base
;
849 /* Get TX and RX Payload region */
850 static int cudbg_get_payload_range(struct adapter
*padap
, u8 mem_type
,
851 const char *region_name
,
852 struct cudbg_region_info
*payload
)
854 struct cudbg_mem_desc mem_desc
= { 0 };
855 struct cudbg_meminfo meminfo
;
858 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
862 rc
= cudbg_get_mem_region(padap
, &meminfo
, mem_type
, region_name
,
865 payload
->exist
= false;
869 payload
->exist
= true;
870 payload
->start
= mem_desc
.base
;
871 payload
->end
= mem_desc
.limit
;
873 return cudbg_get_mem_relative(padap
, &meminfo
, mem_type
,
874 &payload
->start
, &payload
->end
);
877 static int cudbg_memory_read(struct cudbg_init
*pdbg_init
, int win
,
878 int mtype
, u32 addr
, u32 len
, void *hbuf
)
880 u32 win_pf
, memoffset
, mem_aperture
, mem_base
;
881 struct adapter
*adap
= pdbg_init
->adap
;
882 u32 pos
, offset
, resid
;
887 /* Argument sanity checks ...
889 if (addr
& 0x3 || (uintptr_t)hbuf
& 0x3)
894 /* Try to do 64-bit reads. Residual will be handled later. */
898 ret
= t4_memory_rw_init(adap
, win
, mtype
, &memoffset
, &mem_base
,
903 addr
= addr
+ memoffset
;
904 win_pf
= is_t4(adap
->params
.chip
) ? 0 : PFNUM_V(adap
->pf
);
906 pos
= addr
& ~(mem_aperture
- 1);
909 /* Set up initial PCI-E Memory Window to cover the start of our
912 t4_memory_update_win(adap
, win
, pos
| win_pf
);
914 /* Transfer data from the adapter */
916 *buf
++ = le64_to_cpu((__force __le64
)
917 t4_read_reg64(adap
, mem_base
+ offset
));
918 offset
+= sizeof(u64
);
921 /* If we've reached the end of our current window aperture,
922 * move the PCI-E Memory Window on to the next.
924 if (offset
== mem_aperture
) {
927 t4_memory_update_win(adap
, win
, pos
| win_pf
);
931 res_buf
= (u32
*)buf
;
932 /* Read residual in 32-bit multiples */
933 while (resid
> sizeof(u32
)) {
934 *res_buf
++ = le32_to_cpu((__force __le32
)
935 t4_read_reg(adap
, mem_base
+ offset
));
936 offset
+= sizeof(u32
);
937 resid
-= sizeof(u32
);
939 /* If we've reached the end of our current window aperture,
940 * move the PCI-E Memory Window on to the next.
942 if (offset
== mem_aperture
) {
945 t4_memory_update_win(adap
, win
, pos
| win_pf
);
949 /* Transfer residual < 32-bits */
951 t4_memory_rw_residual(adap
, resid
, mem_base
+ offset
,
952 (u8
*)res_buf
, T4_MEMORY_READ
);
957 #define CUDBG_YIELD_ITERATION 256
959 static int cudbg_read_fw_mem(struct cudbg_init
*pdbg_init
,
960 struct cudbg_buffer
*dbg_buff
, u8 mem_type
,
961 unsigned long tot_len
,
962 struct cudbg_error
*cudbg_err
)
964 static const char * const region_name
[] = { "Tx payload:",
966 unsigned long bytes
, bytes_left
, bytes_read
= 0;
967 struct adapter
*padap
= pdbg_init
->adap
;
968 struct cudbg_buffer temp_buff
= { 0 };
969 struct cudbg_region_info payload
[2];
974 /* Get TX/RX Payload region range if they exist */
975 memset(payload
, 0, sizeof(payload
));
976 for (i
= 0; i
< ARRAY_SIZE(region_name
); i
++) {
977 rc
= cudbg_get_payload_range(padap
, mem_type
, region_name
[i
],
982 if (payload
[i
].exist
) {
983 /* Align start and end to avoid wrap around */
984 payload
[i
].start
= roundup(payload
[i
].start
,
986 payload
[i
].end
= rounddown(payload
[i
].end
,
991 bytes_left
= tot_len
;
992 while (bytes_left
> 0) {
993 /* As MC size is huge and read through PIO access, this
994 * loop will hold cpu for a longer time. OS may think that
995 * the process is hanged and will generate CPU stall traces.
996 * So yield the cpu regularly.
999 if (!(yield_count
% CUDBG_YIELD_ITERATION
))
1002 bytes
= min_t(unsigned long, bytes_left
,
1003 (unsigned long)CUDBG_CHUNK_SIZE
);
1004 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, bytes
, &temp_buff
);
1008 for (i
= 0; i
< ARRAY_SIZE(payload
); i
++)
1009 if (payload
[i
].exist
&&
1010 bytes_read
>= payload
[i
].start
&&
1011 bytes_read
+ bytes
<= payload
[i
].end
)
1012 /* TX and RX Payload regions can't overlap */
1015 spin_lock(&padap
->win0_lock
);
1016 rc
= cudbg_memory_read(pdbg_init
, MEMWIN_NIC
, mem_type
,
1017 bytes_read
, bytes
, temp_buff
.data
);
1018 spin_unlock(&padap
->win0_lock
);
1020 cudbg_err
->sys_err
= rc
;
1021 cudbg_put_buff(pdbg_init
, &temp_buff
);
1026 bytes_left
-= bytes
;
1027 bytes_read
+= bytes
;
1028 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
1031 cudbg_put_buff(pdbg_init
, &temp_buff
);
1038 static void cudbg_t4_fwcache(struct cudbg_init
*pdbg_init
,
1039 struct cudbg_error
*cudbg_err
)
1041 struct adapter
*padap
= pdbg_init
->adap
;
1044 if (is_fw_attached(pdbg_init
)) {
1045 /* Flush uP dcache before reading edcX/mcX */
1046 rc
= t4_fwcache(padap
, FW_PARAM_DEV_FWCACHE_FLUSH
);
1048 cudbg_err
->sys_warn
= rc
;
1052 static int cudbg_mem_region_size(struct cudbg_init
*pdbg_init
,
1053 struct cudbg_error
*cudbg_err
,
1054 u8 mem_type
, unsigned long *region_size
)
1056 struct adapter
*padap
= pdbg_init
->adap
;
1057 struct cudbg_meminfo mem_info
;
1061 memset(&mem_info
, 0, sizeof(struct cudbg_meminfo
));
1062 rc
= cudbg_fill_meminfo(padap
, &mem_info
);
1064 cudbg_err
->sys_err
= rc
;
1068 cudbg_t4_fwcache(pdbg_init
, cudbg_err
);
1069 rc
= cudbg_meminfo_get_mem_index(padap
, &mem_info
, mem_type
, &mc_idx
);
1071 cudbg_err
->sys_err
= rc
;
1076 *region_size
= mem_info
.avail
[mc_idx
].limit
-
1077 mem_info
.avail
[mc_idx
].base
;
1082 static int cudbg_collect_mem_region(struct cudbg_init
*pdbg_init
,
1083 struct cudbg_buffer
*dbg_buff
,
1084 struct cudbg_error
*cudbg_err
,
1087 unsigned long size
= 0;
1090 rc
= cudbg_mem_region_size(pdbg_init
, cudbg_err
, mem_type
, &size
);
1094 return cudbg_read_fw_mem(pdbg_init
, dbg_buff
, mem_type
, size
,
1098 int cudbg_collect_edc0_meminfo(struct cudbg_init
*pdbg_init
,
1099 struct cudbg_buffer
*dbg_buff
,
1100 struct cudbg_error
*cudbg_err
)
1102 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1106 int cudbg_collect_edc1_meminfo(struct cudbg_init
*pdbg_init
,
1107 struct cudbg_buffer
*dbg_buff
,
1108 struct cudbg_error
*cudbg_err
)
1110 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1114 int cudbg_collect_mc0_meminfo(struct cudbg_init
*pdbg_init
,
1115 struct cudbg_buffer
*dbg_buff
,
1116 struct cudbg_error
*cudbg_err
)
1118 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1122 int cudbg_collect_mc1_meminfo(struct cudbg_init
*pdbg_init
,
1123 struct cudbg_buffer
*dbg_buff
,
1124 struct cudbg_error
*cudbg_err
)
1126 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1130 int cudbg_collect_hma_meminfo(struct cudbg_init
*pdbg_init
,
1131 struct cudbg_buffer
*dbg_buff
,
1132 struct cudbg_error
*cudbg_err
)
1134 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1138 int cudbg_collect_rss(struct cudbg_init
*pdbg_init
,
1139 struct cudbg_buffer
*dbg_buff
,
1140 struct cudbg_error
*cudbg_err
)
1142 struct adapter
*padap
= pdbg_init
->adap
;
1143 struct cudbg_buffer temp_buff
= { 0 };
1146 nentries
= t4_chip_rss_size(padap
);
1147 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, nentries
* sizeof(u16
),
1152 rc
= t4_read_rss(padap
, (u16
*)temp_buff
.data
);
1154 cudbg_err
->sys_err
= rc
;
1155 cudbg_put_buff(pdbg_init
, &temp_buff
);
1158 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1161 int cudbg_collect_rss_vf_config(struct cudbg_init
*pdbg_init
,
1162 struct cudbg_buffer
*dbg_buff
,
1163 struct cudbg_error
*cudbg_err
)
1165 struct adapter
*padap
= pdbg_init
->adap
;
1166 struct cudbg_buffer temp_buff
= { 0 };
1167 struct cudbg_rss_vf_conf
*vfconf
;
1168 int vf
, rc
, vf_count
;
1170 vf_count
= padap
->params
.arch
.vfcount
;
1171 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1172 vf_count
* sizeof(struct cudbg_rss_vf_conf
),
1177 vfconf
= (struct cudbg_rss_vf_conf
*)temp_buff
.data
;
1178 for (vf
= 0; vf
< vf_count
; vf
++)
1179 t4_read_rss_vf_config(padap
, vf
, &vfconf
[vf
].rss_vf_vfl
,
1180 &vfconf
[vf
].rss_vf_vfh
, true);
1181 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1184 int cudbg_collect_path_mtu(struct cudbg_init
*pdbg_init
,
1185 struct cudbg_buffer
*dbg_buff
,
1186 struct cudbg_error
*cudbg_err
)
1188 struct adapter
*padap
= pdbg_init
->adap
;
1189 struct cudbg_buffer temp_buff
= { 0 };
1192 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, NMTUS
* sizeof(u16
),
1197 t4_read_mtu_tbl(padap
, (u16
*)temp_buff
.data
, NULL
);
1198 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1201 int cudbg_collect_pm_stats(struct cudbg_init
*pdbg_init
,
1202 struct cudbg_buffer
*dbg_buff
,
1203 struct cudbg_error
*cudbg_err
)
1205 struct adapter
*padap
= pdbg_init
->adap
;
1206 struct cudbg_buffer temp_buff
= { 0 };
1207 struct cudbg_pm_stats
*pm_stats_buff
;
1210 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_pm_stats
),
1215 pm_stats_buff
= (struct cudbg_pm_stats
*)temp_buff
.data
;
1216 t4_pmtx_get_stats(padap
, pm_stats_buff
->tx_cnt
, pm_stats_buff
->tx_cyc
);
1217 t4_pmrx_get_stats(padap
, pm_stats_buff
->rx_cnt
, pm_stats_buff
->rx_cyc
);
1218 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1221 int cudbg_collect_hw_sched(struct cudbg_init
*pdbg_init
,
1222 struct cudbg_buffer
*dbg_buff
,
1223 struct cudbg_error
*cudbg_err
)
1225 struct adapter
*padap
= pdbg_init
->adap
;
1226 struct cudbg_buffer temp_buff
= { 0 };
1227 struct cudbg_hw_sched
*hw_sched_buff
;
1230 if (!padap
->params
.vpd
.cclk
)
1231 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1233 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_hw_sched
),
1239 hw_sched_buff
= (struct cudbg_hw_sched
*)temp_buff
.data
;
1240 hw_sched_buff
->map
= t4_read_reg(padap
, TP_TX_MOD_QUEUE_REQ_MAP_A
);
1241 hw_sched_buff
->mode
= TIMERMODE_G(t4_read_reg(padap
, TP_MOD_CONFIG_A
));
1242 t4_read_pace_tbl(padap
, hw_sched_buff
->pace_tab
);
1243 for (i
= 0; i
< NTX_SCHED
; ++i
)
1244 t4_get_tx_sched(padap
, i
, &hw_sched_buff
->kbps
[i
],
1245 &hw_sched_buff
->ipg
[i
], true);
1246 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1249 int cudbg_collect_tp_indirect(struct cudbg_init
*pdbg_init
,
1250 struct cudbg_buffer
*dbg_buff
,
1251 struct cudbg_error
*cudbg_err
)
1253 struct adapter
*padap
= pdbg_init
->adap
;
1254 struct cudbg_buffer temp_buff
= { 0 };
1255 struct ireg_buf
*ch_tp_pio
;
1259 if (is_t5(padap
->params
.chip
))
1260 n
= sizeof(t5_tp_pio_array
) +
1261 sizeof(t5_tp_tm_pio_array
) +
1262 sizeof(t5_tp_mib_index_array
);
1264 n
= sizeof(t6_tp_pio_array
) +
1265 sizeof(t6_tp_tm_pio_array
) +
1266 sizeof(t6_tp_mib_index_array
);
1268 n
= n
/ (IREG_NUM_ELEM
* sizeof(u32
));
1269 size
= sizeof(struct ireg_buf
) * n
;
1270 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1274 ch_tp_pio
= (struct ireg_buf
*)temp_buff
.data
;
1277 if (is_t5(padap
->params
.chip
))
1278 n
= sizeof(t5_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1279 else if (is_t6(padap
->params
.chip
))
1280 n
= sizeof(t6_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1282 for (i
= 0; i
< n
; i
++) {
1283 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1284 u32
*buff
= ch_tp_pio
->outbuf
;
1286 if (is_t5(padap
->params
.chip
)) {
1287 tp_pio
->ireg_addr
= t5_tp_pio_array
[i
][0];
1288 tp_pio
->ireg_data
= t5_tp_pio_array
[i
][1];
1289 tp_pio
->ireg_local_offset
= t5_tp_pio_array
[i
][2];
1290 tp_pio
->ireg_offset_range
= t5_tp_pio_array
[i
][3];
1291 } else if (is_t6(padap
->params
.chip
)) {
1292 tp_pio
->ireg_addr
= t6_tp_pio_array
[i
][0];
1293 tp_pio
->ireg_data
= t6_tp_pio_array
[i
][1];
1294 tp_pio
->ireg_local_offset
= t6_tp_pio_array
[i
][2];
1295 tp_pio
->ireg_offset_range
= t6_tp_pio_array
[i
][3];
1297 t4_tp_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1298 tp_pio
->ireg_local_offset
, true);
1303 if (is_t5(padap
->params
.chip
))
1304 n
= sizeof(t5_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1305 else if (is_t6(padap
->params
.chip
))
1306 n
= sizeof(t6_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1308 for (i
= 0; i
< n
; i
++) {
1309 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1310 u32
*buff
= ch_tp_pio
->outbuf
;
1312 if (is_t5(padap
->params
.chip
)) {
1313 tp_pio
->ireg_addr
= t5_tp_tm_pio_array
[i
][0];
1314 tp_pio
->ireg_data
= t5_tp_tm_pio_array
[i
][1];
1315 tp_pio
->ireg_local_offset
= t5_tp_tm_pio_array
[i
][2];
1316 tp_pio
->ireg_offset_range
= t5_tp_tm_pio_array
[i
][3];
1317 } else if (is_t6(padap
->params
.chip
)) {
1318 tp_pio
->ireg_addr
= t6_tp_tm_pio_array
[i
][0];
1319 tp_pio
->ireg_data
= t6_tp_tm_pio_array
[i
][1];
1320 tp_pio
->ireg_local_offset
= t6_tp_tm_pio_array
[i
][2];
1321 tp_pio
->ireg_offset_range
= t6_tp_tm_pio_array
[i
][3];
1323 t4_tp_tm_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1324 tp_pio
->ireg_local_offset
, true);
1329 if (is_t5(padap
->params
.chip
))
1330 n
= sizeof(t5_tp_mib_index_array
) /
1331 (IREG_NUM_ELEM
* sizeof(u32
));
1332 else if (is_t6(padap
->params
.chip
))
1333 n
= sizeof(t6_tp_mib_index_array
) /
1334 (IREG_NUM_ELEM
* sizeof(u32
));
1336 for (i
= 0; i
< n
; i
++) {
1337 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1338 u32
*buff
= ch_tp_pio
->outbuf
;
1340 if (is_t5(padap
->params
.chip
)) {
1341 tp_pio
->ireg_addr
= t5_tp_mib_index_array
[i
][0];
1342 tp_pio
->ireg_data
= t5_tp_mib_index_array
[i
][1];
1343 tp_pio
->ireg_local_offset
=
1344 t5_tp_mib_index_array
[i
][2];
1345 tp_pio
->ireg_offset_range
=
1346 t5_tp_mib_index_array
[i
][3];
1347 } else if (is_t6(padap
->params
.chip
)) {
1348 tp_pio
->ireg_addr
= t6_tp_mib_index_array
[i
][0];
1349 tp_pio
->ireg_data
= t6_tp_mib_index_array
[i
][1];
1350 tp_pio
->ireg_local_offset
=
1351 t6_tp_mib_index_array
[i
][2];
1352 tp_pio
->ireg_offset_range
=
1353 t6_tp_mib_index_array
[i
][3];
1355 t4_tp_mib_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1356 tp_pio
->ireg_local_offset
, true);
1359 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1362 static void cudbg_read_sge_qbase_indirect_reg(struct adapter
*padap
,
1363 struct sge_qbase_reg_field
*qbase
,
1364 u32 func
, bool is_pf
)
1369 buff
= qbase
->pf_data_value
[func
];
1371 buff
= qbase
->vf_data_value
[func
];
1372 /* In SGE_QBASE_INDEX,
1373 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1378 t4_write_reg(padap
, qbase
->reg_addr
, func
);
1379 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++, buff
++)
1380 *buff
= t4_read_reg(padap
, qbase
->reg_data
[i
]);
1383 int cudbg_collect_sge_indirect(struct cudbg_init
*pdbg_init
,
1384 struct cudbg_buffer
*dbg_buff
,
1385 struct cudbg_error
*cudbg_err
)
1387 struct adapter
*padap
= pdbg_init
->adap
;
1388 struct cudbg_buffer temp_buff
= { 0 };
1389 struct sge_qbase_reg_field
*sge_qbase
;
1390 struct ireg_buf
*ch_sge_dbg
;
1393 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1394 sizeof(*ch_sge_dbg
) * 2 + sizeof(*sge_qbase
),
1399 ch_sge_dbg
= (struct ireg_buf
*)temp_buff
.data
;
1400 for (i
= 0; i
< 2; i
++) {
1401 struct ireg_field
*sge_pio
= &ch_sge_dbg
->tp_pio
;
1402 u32
*buff
= ch_sge_dbg
->outbuf
;
1404 sge_pio
->ireg_addr
= t5_sge_dbg_index_array
[i
][0];
1405 sge_pio
->ireg_data
= t5_sge_dbg_index_array
[i
][1];
1406 sge_pio
->ireg_local_offset
= t5_sge_dbg_index_array
[i
][2];
1407 sge_pio
->ireg_offset_range
= t5_sge_dbg_index_array
[i
][3];
1408 t4_read_indirect(padap
,
1412 sge_pio
->ireg_offset_range
,
1413 sge_pio
->ireg_local_offset
);
1417 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
1418 sge_qbase
= (struct sge_qbase_reg_field
*)ch_sge_dbg
;
1419 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1420 * SGE_QBASE_MAP[0-3]
1422 sge_qbase
->reg_addr
= t6_sge_qbase_index_array
[0];
1423 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++)
1424 sge_qbase
->reg_data
[i
] =
1425 t6_sge_qbase_index_array
[i
+ 1];
1427 for (i
= 0; i
<= PCIE_FW_MASTER_M
; i
++)
1428 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1431 for (i
= 0; i
< padap
->params
.arch
.vfcount
; i
++)
1432 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1435 sge_qbase
->vfcount
= padap
->params
.arch
.vfcount
;
1438 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1441 int cudbg_collect_ulprx_la(struct cudbg_init
*pdbg_init
,
1442 struct cudbg_buffer
*dbg_buff
,
1443 struct cudbg_error
*cudbg_err
)
1445 struct adapter
*padap
= pdbg_init
->adap
;
1446 struct cudbg_buffer temp_buff
= { 0 };
1447 struct cudbg_ulprx_la
*ulprx_la_buff
;
1450 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_ulprx_la
),
1455 ulprx_la_buff
= (struct cudbg_ulprx_la
*)temp_buff
.data
;
1456 t4_ulprx_read_la(padap
, (u32
*)ulprx_la_buff
->data
);
1457 ulprx_la_buff
->size
= ULPRX_LA_SIZE
;
1458 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1461 int cudbg_collect_tp_la(struct cudbg_init
*pdbg_init
,
1462 struct cudbg_buffer
*dbg_buff
,
1463 struct cudbg_error
*cudbg_err
)
1465 struct adapter
*padap
= pdbg_init
->adap
;
1466 struct cudbg_buffer temp_buff
= { 0 };
1467 struct cudbg_tp_la
*tp_la_buff
;
1470 size
= sizeof(struct cudbg_tp_la
) + TPLA_SIZE
* sizeof(u64
);
1471 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1475 tp_la_buff
= (struct cudbg_tp_la
*)temp_buff
.data
;
1476 tp_la_buff
->mode
= DBGLAMODE_G(t4_read_reg(padap
, TP_DBG_LA_CONFIG_A
));
1477 t4_tp_read_la(padap
, (u64
*)tp_la_buff
->data
, NULL
);
1478 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1481 int cudbg_collect_meminfo(struct cudbg_init
*pdbg_init
,
1482 struct cudbg_buffer
*dbg_buff
,
1483 struct cudbg_error
*cudbg_err
)
1485 struct adapter
*padap
= pdbg_init
->adap
;
1486 struct cudbg_buffer temp_buff
= { 0 };
1487 struct cudbg_meminfo
*meminfo_buff
;
1488 struct cudbg_ver_hdr
*ver_hdr
;
1491 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1492 sizeof(struct cudbg_ver_hdr
) +
1493 sizeof(struct cudbg_meminfo
),
1498 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
1499 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
1500 ver_hdr
->revision
= CUDBG_MEMINFO_REV
;
1501 ver_hdr
->size
= sizeof(struct cudbg_meminfo
);
1503 meminfo_buff
= (struct cudbg_meminfo
*)(temp_buff
.data
+
1505 rc
= cudbg_fill_meminfo(padap
, meminfo_buff
);
1507 cudbg_err
->sys_err
= rc
;
1508 cudbg_put_buff(pdbg_init
, &temp_buff
);
1512 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1515 int cudbg_collect_cim_pif_la(struct cudbg_init
*pdbg_init
,
1516 struct cudbg_buffer
*dbg_buff
,
1517 struct cudbg_error
*cudbg_err
)
1519 struct cudbg_cim_pif_la
*cim_pif_la_buff
;
1520 struct adapter
*padap
= pdbg_init
->adap
;
1521 struct cudbg_buffer temp_buff
= { 0 };
1524 size
= sizeof(struct cudbg_cim_pif_la
) +
1525 2 * CIM_PIFLA_SIZE
* 6 * sizeof(u32
);
1526 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1530 cim_pif_la_buff
= (struct cudbg_cim_pif_la
*)temp_buff
.data
;
1531 cim_pif_la_buff
->size
= CIM_PIFLA_SIZE
;
1532 t4_cim_read_pif_la(padap
, (u32
*)cim_pif_la_buff
->data
,
1533 (u32
*)cim_pif_la_buff
->data
+ 6 * CIM_PIFLA_SIZE
,
1535 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1538 int cudbg_collect_clk_info(struct cudbg_init
*pdbg_init
,
1539 struct cudbg_buffer
*dbg_buff
,
1540 struct cudbg_error
*cudbg_err
)
1542 struct adapter
*padap
= pdbg_init
->adap
;
1543 struct cudbg_buffer temp_buff
= { 0 };
1544 struct cudbg_clk_info
*clk_info_buff
;
1548 if (!padap
->params
.vpd
.cclk
)
1549 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1551 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_clk_info
),
1556 clk_info_buff
= (struct cudbg_clk_info
*)temp_buff
.data
;
1557 clk_info_buff
->cclk_ps
= 1000000000 / padap
->params
.vpd
.cclk
; /* psec */
1558 clk_info_buff
->res
= t4_read_reg(padap
, TP_TIMER_RESOLUTION_A
);
1559 clk_info_buff
->tre
= TIMERRESOLUTION_G(clk_info_buff
->res
);
1560 clk_info_buff
->dack_re
= DELAYEDACKRESOLUTION_G(clk_info_buff
->res
);
1561 tp_tick_us
= (clk_info_buff
->cclk_ps
<< clk_info_buff
->tre
) / 1000000;
1563 clk_info_buff
->dack_timer
=
1564 (clk_info_buff
->cclk_ps
<< clk_info_buff
->dack_re
) / 1000000 *
1565 t4_read_reg(padap
, TP_DACK_TIMER_A
);
1566 clk_info_buff
->retransmit_min
=
1567 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MIN_A
);
1568 clk_info_buff
->retransmit_max
=
1569 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MAX_A
);
1570 clk_info_buff
->persist_timer_min
=
1571 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MIN_A
);
1572 clk_info_buff
->persist_timer_max
=
1573 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MAX_A
);
1574 clk_info_buff
->keepalive_idle_timer
=
1575 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_IDLE_A
);
1576 clk_info_buff
->keepalive_interval
=
1577 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_INTVL_A
);
1578 clk_info_buff
->initial_srtt
=
1579 tp_tick_us
* INITSRTT_G(t4_read_reg(padap
, TP_INIT_SRTT_A
));
1580 clk_info_buff
->finwait2_timer
=
1581 tp_tick_us
* t4_read_reg(padap
, TP_FINWAIT2_TIMER_A
);
1583 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1586 int cudbg_collect_pcie_indirect(struct cudbg_init
*pdbg_init
,
1587 struct cudbg_buffer
*dbg_buff
,
1588 struct cudbg_error
*cudbg_err
)
1590 struct adapter
*padap
= pdbg_init
->adap
;
1591 struct cudbg_buffer temp_buff
= { 0 };
1592 struct ireg_buf
*ch_pcie
;
1596 n
= sizeof(t5_pcie_pdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1597 size
= sizeof(struct ireg_buf
) * n
* 2;
1598 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1602 ch_pcie
= (struct ireg_buf
*)temp_buff
.data
;
1604 for (i
= 0; i
< n
; i
++) {
1605 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
1606 u32
*buff
= ch_pcie
->outbuf
;
1608 pcie_pio
->ireg_addr
= t5_pcie_pdbg_array
[i
][0];
1609 pcie_pio
->ireg_data
= t5_pcie_pdbg_array
[i
][1];
1610 pcie_pio
->ireg_local_offset
= t5_pcie_pdbg_array
[i
][2];
1611 pcie_pio
->ireg_offset_range
= t5_pcie_pdbg_array
[i
][3];
1612 t4_read_indirect(padap
,
1613 pcie_pio
->ireg_addr
,
1614 pcie_pio
->ireg_data
,
1616 pcie_pio
->ireg_offset_range
,
1617 pcie_pio
->ireg_local_offset
);
1622 n
= sizeof(t5_pcie_cdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1623 for (i
= 0; i
< n
; i
++) {
1624 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
1625 u32
*buff
= ch_pcie
->outbuf
;
1627 pcie_pio
->ireg_addr
= t5_pcie_cdbg_array
[i
][0];
1628 pcie_pio
->ireg_data
= t5_pcie_cdbg_array
[i
][1];
1629 pcie_pio
->ireg_local_offset
= t5_pcie_cdbg_array
[i
][2];
1630 pcie_pio
->ireg_offset_range
= t5_pcie_cdbg_array
[i
][3];
1631 t4_read_indirect(padap
,
1632 pcie_pio
->ireg_addr
,
1633 pcie_pio
->ireg_data
,
1635 pcie_pio
->ireg_offset_range
,
1636 pcie_pio
->ireg_local_offset
);
1639 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1642 int cudbg_collect_pm_indirect(struct cudbg_init
*pdbg_init
,
1643 struct cudbg_buffer
*dbg_buff
,
1644 struct cudbg_error
*cudbg_err
)
1646 struct adapter
*padap
= pdbg_init
->adap
;
1647 struct cudbg_buffer temp_buff
= { 0 };
1648 struct ireg_buf
*ch_pm
;
1652 n
= sizeof(t5_pm_rx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1653 size
= sizeof(struct ireg_buf
) * n
* 2;
1654 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1658 ch_pm
= (struct ireg_buf
*)temp_buff
.data
;
1660 for (i
= 0; i
< n
; i
++) {
1661 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
1662 u32
*buff
= ch_pm
->outbuf
;
1664 pm_pio
->ireg_addr
= t5_pm_rx_array
[i
][0];
1665 pm_pio
->ireg_data
= t5_pm_rx_array
[i
][1];
1666 pm_pio
->ireg_local_offset
= t5_pm_rx_array
[i
][2];
1667 pm_pio
->ireg_offset_range
= t5_pm_rx_array
[i
][3];
1668 t4_read_indirect(padap
,
1672 pm_pio
->ireg_offset_range
,
1673 pm_pio
->ireg_local_offset
);
1678 n
= sizeof(t5_pm_tx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1679 for (i
= 0; i
< n
; i
++) {
1680 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
1681 u32
*buff
= ch_pm
->outbuf
;
1683 pm_pio
->ireg_addr
= t5_pm_tx_array
[i
][0];
1684 pm_pio
->ireg_data
= t5_pm_tx_array
[i
][1];
1685 pm_pio
->ireg_local_offset
= t5_pm_tx_array
[i
][2];
1686 pm_pio
->ireg_offset_range
= t5_pm_tx_array
[i
][3];
1687 t4_read_indirect(padap
,
1691 pm_pio
->ireg_offset_range
,
1692 pm_pio
->ireg_local_offset
);
1695 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1698 int cudbg_collect_tid(struct cudbg_init
*pdbg_init
,
1699 struct cudbg_buffer
*dbg_buff
,
1700 struct cudbg_error
*cudbg_err
)
1702 struct adapter
*padap
= pdbg_init
->adap
;
1703 struct cudbg_tid_info_region_rev1
*tid1
;
1704 struct cudbg_buffer temp_buff
= { 0 };
1705 struct cudbg_tid_info_region
*tid
;
1706 u32 para
[2], val
[2];
1709 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1710 sizeof(struct cudbg_tid_info_region_rev1
),
1715 tid1
= (struct cudbg_tid_info_region_rev1
*)temp_buff
.data
;
1717 tid1
->ver_hdr
.signature
= CUDBG_ENTITY_SIGNATURE
;
1718 tid1
->ver_hdr
.revision
= CUDBG_TID_INFO_REV
;
1719 tid1
->ver_hdr
.size
= sizeof(struct cudbg_tid_info_region_rev1
) -
1720 sizeof(struct cudbg_ver_hdr
);
1722 /* If firmware is not attached/alive, use backdoor register
1723 * access to collect dump.
1725 if (!is_fw_attached(pdbg_init
))
1728 #define FW_PARAM_PFVF_A(param) \
1729 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1730 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1731 FW_PARAMS_PARAM_Y_V(0) | \
1732 FW_PARAMS_PARAM_Z_V(0))
1734 para
[0] = FW_PARAM_PFVF_A(ETHOFLD_START
);
1735 para
[1] = FW_PARAM_PFVF_A(ETHOFLD_END
);
1736 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2, para
, val
);
1738 cudbg_err
->sys_err
= rc
;
1739 cudbg_put_buff(pdbg_init
, &temp_buff
);
1742 tid
->uotid_base
= val
[0];
1743 tid
->nuotids
= val
[1] - val
[0] + 1;
1745 if (is_t5(padap
->params
.chip
)) {
1746 tid
->sb
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
) / 4;
1747 } else if (is_t6(padap
->params
.chip
)) {
1749 t4_read_reg(padap
, LE_DB_ACTIVE_TABLE_START_INDEX_A
);
1750 tid
->sb
= t4_read_reg(padap
, LE_DB_SRVR_START_INDEX_A
);
1752 para
[0] = FW_PARAM_PFVF_A(HPFILTER_START
);
1753 para
[1] = FW_PARAM_PFVF_A(HPFILTER_END
);
1754 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2,
1757 cudbg_err
->sys_err
= rc
;
1758 cudbg_put_buff(pdbg_init
, &temp_buff
);
1761 tid
->hpftid_base
= val
[0];
1762 tid
->nhpftids
= val
[1] - val
[0] + 1;
1765 #undef FW_PARAM_PFVF_A
1768 tid
->ntids
= padap
->tids
.ntids
;
1769 tid
->nstids
= padap
->tids
.nstids
;
1770 tid
->stid_base
= padap
->tids
.stid_base
;
1771 tid
->hash_base
= padap
->tids
.hash_base
;
1773 tid
->natids
= padap
->tids
.natids
;
1774 tid
->nftids
= padap
->tids
.nftids
;
1775 tid
->ftid_base
= padap
->tids
.ftid_base
;
1776 tid
->aftid_base
= padap
->tids
.aftid_base
;
1777 tid
->aftid_end
= padap
->tids
.aftid_end
;
1779 tid
->sftid_base
= padap
->tids
.sftid_base
;
1780 tid
->nsftids
= padap
->tids
.nsftids
;
1782 tid
->flags
= padap
->flags
;
1783 tid
->le_db_conf
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
1784 tid
->ip_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV4_A
);
1785 tid
->ipv6_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV6_A
);
1787 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1790 int cudbg_collect_pcie_config(struct cudbg_init
*pdbg_init
,
1791 struct cudbg_buffer
*dbg_buff
,
1792 struct cudbg_error
*cudbg_err
)
1794 struct adapter
*padap
= pdbg_init
->adap
;
1795 struct cudbg_buffer temp_buff
= { 0 };
1796 u32 size
, *value
, j
;
1799 size
= sizeof(u32
) * CUDBG_NUM_PCIE_CONFIG_REGS
;
1800 n
= sizeof(t5_pcie_config_array
) / (2 * sizeof(u32
));
1801 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1805 value
= (u32
*)temp_buff
.data
;
1806 for (i
= 0; i
< n
; i
++) {
1807 for (j
= t5_pcie_config_array
[i
][0];
1808 j
<= t5_pcie_config_array
[i
][1]; j
+= 4) {
1809 t4_hw_pci_read_cfg4(padap
, j
, value
);
1813 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1816 static int cudbg_sge_ctxt_check_valid(u32
*buf
, int type
)
1818 int index
, bit
, bit_pos
= 0;
1831 index
= bit_pos
/ 32;
1833 return buf
[index
] & (1U << bit
);
1836 static int cudbg_get_ctxt_region_info(struct adapter
*padap
,
1837 struct cudbg_region_info
*ctx_info
,
1840 struct cudbg_mem_desc mem_desc
;
1841 struct cudbg_meminfo meminfo
;
1842 u32 i
, j
, value
, found
;
1846 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
1850 /* Get EGRESS and INGRESS context region size */
1851 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
1853 memset(&mem_desc
, 0, sizeof(struct cudbg_mem_desc
));
1854 for (j
= 0; j
< ARRAY_SIZE(meminfo
.avail
); j
++) {
1855 rc
= cudbg_get_mem_region(padap
, &meminfo
, j
,
1860 rc
= cudbg_get_mem_relative(padap
, &meminfo
, j
,
1864 ctx_info
[i
].exist
= false;
1867 ctx_info
[i
].exist
= true;
1868 ctx_info
[i
].start
= mem_desc
.base
;
1869 ctx_info
[i
].end
= mem_desc
.limit
;
1875 ctx_info
[i
].exist
= false;
1878 /* Get FLM and CNM max qid. */
1879 value
= t4_read_reg(padap
, SGE_FLM_CFG_A
);
1881 /* Get number of data freelist queues */
1882 flq
= HDRSTARTFLQ_G(value
);
1883 ctx_info
[CTXT_FLM
].exist
= true;
1884 ctx_info
[CTXT_FLM
].end
= (CUDBG_MAX_FL_QIDS
>> flq
) * SGE_CTXT_SIZE
;
1886 /* The number of CONM contexts are same as number of freelist
1889 ctx_info
[CTXT_CNM
].exist
= true;
1890 ctx_info
[CTXT_CNM
].end
= ctx_info
[CTXT_FLM
].end
;
1895 int cudbg_dump_context_size(struct adapter
*padap
)
1897 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
1898 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
1902 /* Get max valid qid for each type of queue */
1903 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
1907 for (i
= 0; i
< CTXT_CNM
; i
++) {
1908 if (!region_info
[i
].exist
) {
1909 if (i
== CTXT_EGRESS
|| i
== CTXT_INGRESS
)
1910 size
+= CUDBG_LOWMEM_MAX_CTXT_QIDS
*
1915 size
+= (region_info
[i
].end
- region_info
[i
].start
+ 1) /
1918 return size
* sizeof(struct cudbg_ch_cntxt
);
1921 static void cudbg_read_sge_ctxt(struct cudbg_init
*pdbg_init
, u32 cid
,
1922 enum ctxt_type ctype
, u32
*data
)
1924 struct adapter
*padap
= pdbg_init
->adap
;
1927 /* Under heavy traffic, the SGE Queue contexts registers will be
1928 * frequently accessed by firmware.
1930 * To avoid conflicts with firmware, always ask firmware to fetch
1931 * the SGE Queue contexts via mailbox. On failure, fallback to
1932 * accessing hardware registers directly.
1934 if (is_fw_attached(pdbg_init
))
1935 rc
= t4_sge_ctxt_rd(padap
, padap
->mbox
, cid
, ctype
, data
);
1937 t4_sge_ctxt_rd_bd(padap
, cid
, ctype
, data
);
1940 static void cudbg_get_sge_ctxt_fw(struct cudbg_init
*pdbg_init
, u32 max_qid
,
1942 struct cudbg_ch_cntxt
**out_buff
)
1944 struct cudbg_ch_cntxt
*buff
= *out_buff
;
1948 for (j
= 0; j
< max_qid
; j
++) {
1949 cudbg_read_sge_ctxt(pdbg_init
, j
, ctxt_type
, buff
->data
);
1950 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, ctxt_type
);
1954 buff
->cntxt_type
= ctxt_type
;
1957 if (ctxt_type
== CTXT_FLM
) {
1958 cudbg_read_sge_ctxt(pdbg_init
, j
, CTXT_CNM
, buff
->data
);
1959 buff
->cntxt_type
= CTXT_CNM
;
1968 int cudbg_collect_dump_context(struct cudbg_init
*pdbg_init
,
1969 struct cudbg_buffer
*dbg_buff
,
1970 struct cudbg_error
*cudbg_err
)
1972 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
1973 struct adapter
*padap
= pdbg_init
->adap
;
1974 u32 j
, size
, max_ctx_size
, max_ctx_qid
;
1975 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
1976 struct cudbg_buffer temp_buff
= { 0 };
1977 struct cudbg_ch_cntxt
*buff
;
1978 u64
*dst_off
, *src_off
;
1983 /* Get max valid qid for each type of queue */
1984 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
1988 rc
= cudbg_dump_context_size(padap
);
1990 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
1993 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1997 /* Get buffer with enough space to read the biggest context
2000 max_ctx_size
= max(region_info
[CTXT_EGRESS
].end
-
2001 region_info
[CTXT_EGRESS
].start
+ 1,
2002 region_info
[CTXT_INGRESS
].end
-
2003 region_info
[CTXT_INGRESS
].start
+ 1);
2005 ctx_buf
= kvzalloc(max_ctx_size
, GFP_KERNEL
);
2007 cudbg_put_buff(pdbg_init
, &temp_buff
);
2011 buff
= (struct cudbg_ch_cntxt
*)temp_buff
.data
;
2013 /* Collect EGRESS and INGRESS context data.
2014 * In case of failures, fallback to collecting via FW or
2017 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
2018 if (!region_info
[i
].exist
) {
2019 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2020 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2025 max_ctx_size
= region_info
[i
].end
- region_info
[i
].start
+ 1;
2026 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2028 /* If firmware is not attached/alive, use backdoor register
2029 * access to collect dump.
2031 if (is_fw_attached(pdbg_init
)) {
2032 t4_sge_ctxt_flush(padap
, padap
->mbox
, i
);
2034 rc
= t4_memory_rw(padap
, MEMWIN_NIC
, mem_type
[i
],
2035 region_info
[i
].start
, max_ctx_size
,
2036 (__be32
*)ctx_buf
, 1);
2039 if (rc
|| !is_fw_attached(pdbg_init
)) {
2040 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2041 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2046 for (j
= 0; j
< max_ctx_qid
; j
++) {
2047 src_off
= (u64
*)(ctx_buf
+ j
* SGE_CTXT_SIZE
);
2048 dst_off
= (u64
*)buff
->data
;
2050 /* The data is stored in 64-bit cpu order. Convert it
2051 * to big endian before parsing.
2053 for (k
= 0; k
< SGE_CTXT_SIZE
/ sizeof(u64
); k
++)
2054 dst_off
[k
] = cpu_to_be64(src_off
[k
]);
2056 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, i
);
2060 buff
->cntxt_type
= i
;
2068 /* Collect FREELIST and CONGESTION MANAGER contexts */
2069 max_ctx_size
= region_info
[CTXT_FLM
].end
-
2070 region_info
[CTXT_FLM
].start
+ 1;
2071 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2072 /* Since FLM and CONM are 1-to-1 mapped, the below function
2073 * will fetch both FLM and CONM contexts.
2075 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, CTXT_FLM
, &buff
);
2077 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2080 static inline void cudbg_tcamxy2valmask(u64 x
, u64 y
, u8
*addr
, u64
*mask
)
2083 y
= (__force u64
)cpu_to_be64(y
);
2084 memcpy(addr
, (char *)&y
+ 2, ETH_ALEN
);
2087 static void cudbg_mps_rpl_backdoor(struct adapter
*padap
,
2088 struct fw_ldst_mps_rplc
*mps_rplc
)
2090 if (is_t5(padap
->params
.chip
)) {
2091 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2092 MPS_VF_RPLCT_MAP3_A
));
2093 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2094 MPS_VF_RPLCT_MAP2_A
));
2095 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2096 MPS_VF_RPLCT_MAP1_A
));
2097 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2098 MPS_VF_RPLCT_MAP0_A
));
2100 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2101 MPS_VF_RPLCT_MAP7_A
));
2102 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2103 MPS_VF_RPLCT_MAP6_A
));
2104 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2105 MPS_VF_RPLCT_MAP5_A
));
2106 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2107 MPS_VF_RPLCT_MAP4_A
));
2109 mps_rplc
->rplc127_96
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP3_A
));
2110 mps_rplc
->rplc95_64
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP2_A
));
2111 mps_rplc
->rplc63_32
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP1_A
));
2112 mps_rplc
->rplc31_0
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP0_A
));
2115 static int cudbg_collect_tcam_index(struct cudbg_init
*pdbg_init
,
2116 struct cudbg_mps_tcam
*tcam
, u32 idx
)
2118 struct adapter
*padap
= pdbg_init
->adap
;
2119 u64 tcamy
, tcamx
, val
;
2123 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) >= CHELSIO_T6
) {
2124 /* CtlReqID - 1: use Host Driver Requester ID
2125 * CtlCmdType - 0: Read, 1: Write
2126 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2127 * CtlXYBitSel- 0: Y bit, 1: X bit
2131 ctl
= CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2133 ctl
|= CTLTCAMINDEX_V(idx
) | CTLTCAMSEL_V(0);
2135 ctl
|= CTLTCAMINDEX_V(idx
- 256) | CTLTCAMSEL_V(1);
2137 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2138 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2139 tcamy
= DMACH_G(val
) << 32;
2140 tcamy
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2141 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2142 tcam
->lookup_type
= DATALKPTYPE_G(data2
);
2144 /* 0 - Outer header, 1 - Inner header
2145 * [71:48] bit locations are overloaded for
2146 * outer vs. inner lookup types.
2148 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2149 /* Inner header VNI */
2150 tcam
->vniy
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2151 tcam
->vniy
= (tcam
->vniy
<< 16) | VIDL_G(val
);
2152 tcam
->dip_hit
= data2
& DATADIPHIT_F
;
2154 tcam
->vlan_vld
= data2
& DATAVIDH2_F
;
2155 tcam
->ivlan
= VIDL_G(val
);
2158 tcam
->port_num
= DATAPORTNUM_G(data2
);
2160 /* Read tcamx. Change the control param */
2161 ctl
|= CTLXYBITSEL_V(1);
2162 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2163 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2164 tcamx
= DMACH_G(val
) << 32;
2165 tcamx
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2166 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2167 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2168 /* Inner header VNI mask */
2169 tcam
->vnix
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2170 tcam
->vnix
= (tcam
->vnix
<< 16) | VIDL_G(val
);
2173 tcamy
= t4_read_reg64(padap
, MPS_CLS_TCAM_Y_L(idx
));
2174 tcamx
= t4_read_reg64(padap
, MPS_CLS_TCAM_X_L(idx
));
2177 /* If no entry, return */
2181 tcam
->cls_lo
= t4_read_reg(padap
, MPS_CLS_SRAM_L(idx
));
2182 tcam
->cls_hi
= t4_read_reg(padap
, MPS_CLS_SRAM_H(idx
));
2184 if (is_t5(padap
->params
.chip
))
2185 tcam
->repli
= (tcam
->cls_lo
& REPLICATE_F
);
2186 else if (is_t6(padap
->params
.chip
))
2187 tcam
->repli
= (tcam
->cls_lo
& T6_REPLICATE_F
);
2190 struct fw_ldst_cmd ldst_cmd
;
2191 struct fw_ldst_mps_rplc mps_rplc
;
2193 memset(&ldst_cmd
, 0, sizeof(ldst_cmd
));
2194 ldst_cmd
.op_to_addrspace
=
2195 htonl(FW_CMD_OP_V(FW_LDST_CMD
) |
2196 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
2197 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS
));
2198 ldst_cmd
.cycles_to_len16
= htonl(FW_LEN16(ldst_cmd
));
2199 ldst_cmd
.u
.mps
.rplc
.fid_idx
=
2200 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC
) |
2201 FW_LDST_CMD_IDX_V(idx
));
2203 /* If firmware is not attached/alive, use backdoor register
2204 * access to collect dump.
2206 if (is_fw_attached(pdbg_init
))
2207 rc
= t4_wr_mbox(padap
, padap
->mbox
, &ldst_cmd
,
2208 sizeof(ldst_cmd
), &ldst_cmd
);
2210 if (rc
|| !is_fw_attached(pdbg_init
)) {
2211 cudbg_mps_rpl_backdoor(padap
, &mps_rplc
);
2212 /* Ignore error since we collected directly from
2213 * reading registers.
2217 mps_rplc
= ldst_cmd
.u
.mps
.rplc
;
2220 tcam
->rplc
[0] = ntohl(mps_rplc
.rplc31_0
);
2221 tcam
->rplc
[1] = ntohl(mps_rplc
.rplc63_32
);
2222 tcam
->rplc
[2] = ntohl(mps_rplc
.rplc95_64
);
2223 tcam
->rplc
[3] = ntohl(mps_rplc
.rplc127_96
);
2224 if (padap
->params
.arch
.mps_rplc_size
> CUDBG_MAX_RPLC_SIZE
) {
2225 tcam
->rplc
[4] = ntohl(mps_rplc
.rplc159_128
);
2226 tcam
->rplc
[5] = ntohl(mps_rplc
.rplc191_160
);
2227 tcam
->rplc
[6] = ntohl(mps_rplc
.rplc223_192
);
2228 tcam
->rplc
[7] = ntohl(mps_rplc
.rplc255_224
);
2231 cudbg_tcamxy2valmask(tcamx
, tcamy
, tcam
->addr
, &tcam
->mask
);
2233 tcam
->rplc_size
= padap
->params
.arch
.mps_rplc_size
;
2237 int cudbg_collect_mps_tcam(struct cudbg_init
*pdbg_init
,
2238 struct cudbg_buffer
*dbg_buff
,
2239 struct cudbg_error
*cudbg_err
)
2241 struct adapter
*padap
= pdbg_init
->adap
;
2242 struct cudbg_buffer temp_buff
= { 0 };
2243 u32 size
= 0, i
, n
, total_size
= 0;
2244 struct cudbg_mps_tcam
*tcam
;
2247 n
= padap
->params
.arch
.mps_tcam_size
;
2248 size
= sizeof(struct cudbg_mps_tcam
) * n
;
2249 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2253 tcam
= (struct cudbg_mps_tcam
*)temp_buff
.data
;
2254 for (i
= 0; i
< n
; i
++) {
2255 rc
= cudbg_collect_tcam_index(pdbg_init
, tcam
, i
);
2257 cudbg_err
->sys_err
= rc
;
2258 cudbg_put_buff(pdbg_init
, &temp_buff
);
2261 total_size
+= sizeof(struct cudbg_mps_tcam
);
2266 rc
= CUDBG_SYSTEM_ERROR
;
2267 cudbg_err
->sys_err
= rc
;
2268 cudbg_put_buff(pdbg_init
, &temp_buff
);
2271 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2274 int cudbg_collect_vpd_data(struct cudbg_init
*pdbg_init
,
2275 struct cudbg_buffer
*dbg_buff
,
2276 struct cudbg_error
*cudbg_err
)
2278 struct adapter
*padap
= pdbg_init
->adap
;
2279 struct cudbg_buffer temp_buff
= { 0 };
2280 char vpd_str
[CUDBG_VPD_VER_LEN
+ 1];
2281 u32 scfg_vers
, vpd_vers
, fw_vers
;
2282 struct cudbg_vpd_data
*vpd_data
;
2283 struct vpd_params vpd
= { 0 };
2286 rc
= t4_get_raw_vpd_params(padap
, &vpd
);
2290 rc
= t4_get_fw_version(padap
, &fw_vers
);
2294 /* Serial Configuration Version is located beyond the PF's vpd size.
2295 * Temporarily give access to entire EEPROM to get it.
2297 rc
= pci_set_vpd_size(padap
->pdev
, EEPROMVSIZE
);
2301 ret
= cudbg_read_vpd_reg(padap
, CUDBG_SCFG_VER_ADDR
, CUDBG_SCFG_VER_LEN
,
2304 /* Restore back to original PF's vpd size */
2305 rc
= pci_set_vpd_size(padap
->pdev
, CUDBG_VPD_PF_SIZE
);
2312 rc
= cudbg_read_vpd_reg(padap
, CUDBG_VPD_VER_ADDR
, CUDBG_VPD_VER_LEN
,
2317 vpd_str
[CUDBG_VPD_VER_LEN
] = '\0';
2318 rc
= kstrtouint(vpd_str
, 0, &vpd_vers
);
2322 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_vpd_data
),
2327 vpd_data
= (struct cudbg_vpd_data
*)temp_buff
.data
;
2328 memcpy(vpd_data
->sn
, vpd
.sn
, SERNUM_LEN
+ 1);
2329 memcpy(vpd_data
->bn
, vpd
.pn
, PN_LEN
+ 1);
2330 memcpy(vpd_data
->na
, vpd
.na
, MACADDR_LEN
+ 1);
2331 memcpy(vpd_data
->mn
, vpd
.id
, ID_LEN
+ 1);
2332 vpd_data
->scfg_vers
= scfg_vers
;
2333 vpd_data
->vpd_vers
= vpd_vers
;
2334 vpd_data
->fw_major
= FW_HDR_FW_VER_MAJOR_G(fw_vers
);
2335 vpd_data
->fw_minor
= FW_HDR_FW_VER_MINOR_G(fw_vers
);
2336 vpd_data
->fw_micro
= FW_HDR_FW_VER_MICRO_G(fw_vers
);
2337 vpd_data
->fw_build
= FW_HDR_FW_VER_BUILD_G(fw_vers
);
2338 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2341 static int cudbg_read_tid(struct cudbg_init
*pdbg_init
, u32 tid
,
2342 struct cudbg_tid_data
*tid_data
)
2344 struct adapter
*padap
= pdbg_init
->adap
;
2345 int i
, cmd_retry
= 8;
2348 /* Fill REQ_DATA regs with 0's */
2349 for (i
= 0; i
< NUM_LE_DB_DBGI_REQ_DATA_INSTANCES
; i
++)
2350 t4_write_reg(padap
, LE_DB_DBGI_REQ_DATA_A
+ (i
<< 2), 0);
2352 /* Write DBIG command */
2353 val
= DBGICMD_V(4) | DBGITID_V(tid
);
2354 t4_write_reg(padap
, LE_DB_DBGI_REQ_TCAM_CMD_A
, val
);
2355 tid_data
->dbig_cmd
= val
;
2357 val
= DBGICMDSTRT_F
| DBGICMDMODE_V(1); /* LE mode */
2358 t4_write_reg(padap
, LE_DB_DBGI_CONFIG_A
, val
);
2359 tid_data
->dbig_conf
= val
;
2361 /* Poll the DBGICMDBUSY bit */
2364 val
= t4_read_reg(padap
, LE_DB_DBGI_CONFIG_A
);
2365 val
= val
& DBGICMDBUSY_F
;
2368 return CUDBG_SYSTEM_ERROR
;
2371 /* Check RESP status */
2372 val
= t4_read_reg(padap
, LE_DB_DBGI_RSP_STATUS_A
);
2373 tid_data
->dbig_rsp_stat
= val
;
2375 return CUDBG_SYSTEM_ERROR
;
2377 /* Read RESP data */
2378 for (i
= 0; i
< NUM_LE_DB_DBGI_RSP_DATA_INSTANCES
; i
++)
2379 tid_data
->data
[i
] = t4_read_reg(padap
,
2380 LE_DB_DBGI_RSP_DATA_A
+
2382 tid_data
->tid
= tid
;
2386 static int cudbg_get_le_type(u32 tid
, struct cudbg_tcam tcam_region
)
2388 int type
= LE_ET_UNKNOWN
;
2390 if (tid
< tcam_region
.server_start
)
2391 type
= LE_ET_TCAM_CON
;
2392 else if (tid
< tcam_region
.filter_start
)
2393 type
= LE_ET_TCAM_SERVER
;
2394 else if (tid
< tcam_region
.clip_start
)
2395 type
= LE_ET_TCAM_FILTER
;
2396 else if (tid
< tcam_region
.routing_start
)
2397 type
= LE_ET_TCAM_CLIP
;
2398 else if (tid
< tcam_region
.tid_hash_base
)
2399 type
= LE_ET_TCAM_ROUTING
;
2400 else if (tid
< tcam_region
.max_tid
)
2401 type
= LE_ET_HASH_CON
;
2403 type
= LE_ET_INVALID_TID
;
2408 static int cudbg_is_ipv6_entry(struct cudbg_tid_data
*tid_data
,
2409 struct cudbg_tcam tcam_region
)
2414 le_type
= cudbg_get_le_type(tid_data
->tid
, tcam_region
);
2415 if (tid_data
->tid
& 1)
2418 if (le_type
== LE_ET_HASH_CON
) {
2419 ipv6
= tid_data
->data
[16] & 0x8000;
2420 } else if (le_type
== LE_ET_TCAM_CON
) {
2421 ipv6
= tid_data
->data
[16] & 0x8000;
2423 ipv6
= tid_data
->data
[9] == 0x00C00000;
2430 void cudbg_fill_le_tcam_info(struct adapter
*padap
,
2431 struct cudbg_tcam
*tcam_region
)
2435 /* Get the LE regions */
2436 value
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
); /* hash base index */
2437 tcam_region
->tid_hash_base
= value
;
2439 /* Get routing table index */
2440 value
= t4_read_reg(padap
, LE_DB_ROUTING_TABLE_INDEX_A
);
2441 tcam_region
->routing_start
= value
;
2443 /* Get clip table index. For T6 there is separate CLIP TCAM */
2444 if (is_t6(padap
->params
.chip
))
2445 value
= t4_read_reg(padap
, LE_DB_CLCAM_TID_BASE_A
);
2447 value
= t4_read_reg(padap
, LE_DB_CLIP_TABLE_INDEX_A
);
2448 tcam_region
->clip_start
= value
;
2450 /* Get filter table index */
2451 value
= t4_read_reg(padap
, LE_DB_FILTER_TABLE_INDEX_A
);
2452 tcam_region
->filter_start
= value
;
2454 /* Get server table index */
2455 value
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
);
2456 tcam_region
->server_start
= value
;
2458 /* Check whether hash is enabled and calculate the max tids */
2459 value
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
2460 if ((value
>> HASHEN_S
) & 1) {
2461 value
= t4_read_reg(padap
, LE_DB_HASH_CONFIG_A
);
2462 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
2463 tcam_region
->max_tid
= (value
& 0xFFFFF) +
2464 tcam_region
->tid_hash_base
;
2466 value
= HASHTIDSIZE_G(value
);
2468 tcam_region
->max_tid
= value
+
2469 tcam_region
->tid_hash_base
;
2471 } else { /* hash not enabled */
2472 if (is_t6(padap
->params
.chip
))
2473 tcam_region
->max_tid
= (value
& ASLIPCOMPEN_F
) ?
2474 CUDBG_MAX_TID_COMP_EN
:
2475 CUDBG_MAX_TID_COMP_DIS
;
2477 tcam_region
->max_tid
= CUDBG_MAX_TCAM_TID
;
2480 if (is_t6(padap
->params
.chip
))
2481 tcam_region
->max_tid
+= CUDBG_T6_CLIP
;
2484 int cudbg_collect_le_tcam(struct cudbg_init
*pdbg_init
,
2485 struct cudbg_buffer
*dbg_buff
,
2486 struct cudbg_error
*cudbg_err
)
2488 struct adapter
*padap
= pdbg_init
->adap
;
2489 struct cudbg_buffer temp_buff
= { 0 };
2490 struct cudbg_tcam tcam_region
= { 0 };
2491 struct cudbg_tid_data
*tid_data
;
2496 cudbg_fill_le_tcam_info(padap
, &tcam_region
);
2498 size
= sizeof(struct cudbg_tid_data
) * tcam_region
.max_tid
;
2499 size
+= sizeof(struct cudbg_tcam
);
2500 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2504 memcpy(temp_buff
.data
, &tcam_region
, sizeof(struct cudbg_tcam
));
2505 bytes
= sizeof(struct cudbg_tcam
);
2506 tid_data
= (struct cudbg_tid_data
*)(temp_buff
.data
+ bytes
);
2508 for (i
= 0; i
< tcam_region
.max_tid
; ) {
2509 rc
= cudbg_read_tid(pdbg_init
, i
, tid_data
);
2511 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
2512 /* Update tcam header and exit */
2513 tcam_region
.max_tid
= i
;
2514 memcpy(temp_buff
.data
, &tcam_region
,
2515 sizeof(struct cudbg_tcam
));
2519 if (cudbg_is_ipv6_entry(tid_data
, tcam_region
)) {
2520 /* T6 CLIP TCAM: ipv6 takes 4 entries */
2521 if (is_t6(padap
->params
.chip
) &&
2522 i
>= tcam_region
.clip_start
&&
2523 i
< tcam_region
.clip_start
+ CUDBG_T6_CLIP
)
2525 else /* Main TCAM: ipv6 takes two tids */
2532 bytes
+= sizeof(struct cudbg_tid_data
);
2536 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2539 int cudbg_collect_cctrl(struct cudbg_init
*pdbg_init
,
2540 struct cudbg_buffer
*dbg_buff
,
2541 struct cudbg_error
*cudbg_err
)
2543 struct adapter
*padap
= pdbg_init
->adap
;
2544 struct cudbg_buffer temp_buff
= { 0 };
2548 size
= sizeof(u16
) * NMTUS
* NCCTRL_WIN
;
2549 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2553 t4_read_cong_tbl(padap
, (void *)temp_buff
.data
);
2554 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2557 int cudbg_collect_ma_indirect(struct cudbg_init
*pdbg_init
,
2558 struct cudbg_buffer
*dbg_buff
,
2559 struct cudbg_error
*cudbg_err
)
2561 struct adapter
*padap
= pdbg_init
->adap
;
2562 struct cudbg_buffer temp_buff
= { 0 };
2563 struct ireg_buf
*ma_indr
;
2567 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
2568 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2570 n
= sizeof(t6_ma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2571 size
= sizeof(struct ireg_buf
) * n
* 2;
2572 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2576 ma_indr
= (struct ireg_buf
*)temp_buff
.data
;
2577 for (i
= 0; i
< n
; i
++) {
2578 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
2579 u32
*buff
= ma_indr
->outbuf
;
2581 ma_fli
->ireg_addr
= t6_ma_ireg_array
[i
][0];
2582 ma_fli
->ireg_data
= t6_ma_ireg_array
[i
][1];
2583 ma_fli
->ireg_local_offset
= t6_ma_ireg_array
[i
][2];
2584 ma_fli
->ireg_offset_range
= t6_ma_ireg_array
[i
][3];
2585 t4_read_indirect(padap
, ma_fli
->ireg_addr
, ma_fli
->ireg_data
,
2586 buff
, ma_fli
->ireg_offset_range
,
2587 ma_fli
->ireg_local_offset
);
2591 n
= sizeof(t6_ma_ireg_array2
) / (IREG_NUM_ELEM
* sizeof(u32
));
2592 for (i
= 0; i
< n
; i
++) {
2593 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
2594 u32
*buff
= ma_indr
->outbuf
;
2596 ma_fli
->ireg_addr
= t6_ma_ireg_array2
[i
][0];
2597 ma_fli
->ireg_data
= t6_ma_ireg_array2
[i
][1];
2598 ma_fli
->ireg_local_offset
= t6_ma_ireg_array2
[i
][2];
2599 for (j
= 0; j
< t6_ma_ireg_array2
[i
][3]; j
++) {
2600 t4_read_indirect(padap
, ma_fli
->ireg_addr
,
2601 ma_fli
->ireg_data
, buff
, 1,
2602 ma_fli
->ireg_local_offset
);
2604 ma_fli
->ireg_local_offset
+= 0x20;
2608 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2611 int cudbg_collect_ulptx_la(struct cudbg_init
*pdbg_init
,
2612 struct cudbg_buffer
*dbg_buff
,
2613 struct cudbg_error
*cudbg_err
)
2615 struct adapter
*padap
= pdbg_init
->adap
;
2616 struct cudbg_buffer temp_buff
= { 0 };
2617 struct cudbg_ulptx_la
*ulptx_la_buff
;
2618 struct cudbg_ver_hdr
*ver_hdr
;
2622 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
2623 sizeof(struct cudbg_ver_hdr
) +
2624 sizeof(struct cudbg_ulptx_la
),
2629 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
2630 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
2631 ver_hdr
->revision
= CUDBG_ULPTX_LA_REV
;
2632 ver_hdr
->size
= sizeof(struct cudbg_ulptx_la
);
2634 ulptx_la_buff
= (struct cudbg_ulptx_la
*)(temp_buff
.data
+
2636 for (i
= 0; i
< CUDBG_NUM_ULPTX
; i
++) {
2637 ulptx_la_buff
->rdptr
[i
] = t4_read_reg(padap
,
2638 ULP_TX_LA_RDPTR_0_A
+
2640 ulptx_la_buff
->wrptr
[i
] = t4_read_reg(padap
,
2641 ULP_TX_LA_WRPTR_0_A
+
2643 ulptx_la_buff
->rddata
[i
] = t4_read_reg(padap
,
2644 ULP_TX_LA_RDDATA_0_A
+
2646 for (j
= 0; j
< CUDBG_NUM_ULPTX_READ
; j
++)
2647 ulptx_la_buff
->rd_data
[i
][j
] =
2649 ULP_TX_LA_RDDATA_0_A
+ 0x10 * i
);
2652 for (i
= 0; i
< CUDBG_NUM_ULPTX_ASIC_READ
; i
++) {
2653 t4_write_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
, 0x1);
2654 ulptx_la_buff
->rdptr_asic
[i
] =
2655 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
);
2656 ulptx_la_buff
->rddata_asic
[i
][0] =
2657 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_0_A
);
2658 ulptx_la_buff
->rddata_asic
[i
][1] =
2659 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_1_A
);
2660 ulptx_la_buff
->rddata_asic
[i
][2] =
2661 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_2_A
);
2662 ulptx_la_buff
->rddata_asic
[i
][3] =
2663 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_3_A
);
2664 ulptx_la_buff
->rddata_asic
[i
][4] =
2665 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_4_A
);
2666 ulptx_la_buff
->rddata_asic
[i
][5] =
2667 t4_read_reg(padap
, PM_RX_BASE_ADDR
);
2670 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2673 int cudbg_collect_up_cim_indirect(struct cudbg_init
*pdbg_init
,
2674 struct cudbg_buffer
*dbg_buff
,
2675 struct cudbg_error
*cudbg_err
)
2677 struct adapter
*padap
= pdbg_init
->adap
;
2678 struct cudbg_buffer temp_buff
= { 0 };
2679 u32 local_offset
, local_range
;
2680 struct ireg_buf
*up_cim
;
2685 if (is_t5(padap
->params
.chip
))
2686 n
= sizeof(t5_up_cim_reg_array
) /
2687 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
2688 else if (is_t6(padap
->params
.chip
))
2689 n
= sizeof(t6_up_cim_reg_array
) /
2690 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
2692 return CUDBG_STATUS_NOT_IMPLEMENTED
;
2694 size
= sizeof(struct ireg_buf
) * n
;
2695 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2699 up_cim
= (struct ireg_buf
*)temp_buff
.data
;
2700 for (i
= 0; i
< n
; i
++) {
2701 struct ireg_field
*up_cim_reg
= &up_cim
->tp_pio
;
2702 u32
*buff
= up_cim
->outbuf
;
2704 if (is_t5(padap
->params
.chip
)) {
2705 up_cim_reg
->ireg_addr
= t5_up_cim_reg_array
[i
][0];
2706 up_cim_reg
->ireg_data
= t5_up_cim_reg_array
[i
][1];
2707 up_cim_reg
->ireg_local_offset
=
2708 t5_up_cim_reg_array
[i
][2];
2709 up_cim_reg
->ireg_offset_range
=
2710 t5_up_cim_reg_array
[i
][3];
2711 instance
= t5_up_cim_reg_array
[i
][4];
2712 } else if (is_t6(padap
->params
.chip
)) {
2713 up_cim_reg
->ireg_addr
= t6_up_cim_reg_array
[i
][0];
2714 up_cim_reg
->ireg_data
= t6_up_cim_reg_array
[i
][1];
2715 up_cim_reg
->ireg_local_offset
=
2716 t6_up_cim_reg_array
[i
][2];
2717 up_cim_reg
->ireg_offset_range
=
2718 t6_up_cim_reg_array
[i
][3];
2719 instance
= t6_up_cim_reg_array
[i
][4];
2723 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES
:
2724 iter
= up_cim_reg
->ireg_offset_range
;
2725 local_offset
= 0x120;
2728 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES
:
2729 iter
= up_cim_reg
->ireg_offset_range
;
2730 local_offset
= 0x10;
2736 local_range
= up_cim_reg
->ireg_offset_range
;
2740 for (j
= 0; j
< iter
; j
++, buff
++) {
2741 rc
= t4_cim_read(padap
,
2742 up_cim_reg
->ireg_local_offset
+
2743 (j
* local_offset
), local_range
, buff
);
2745 cudbg_put_buff(pdbg_init
, &temp_buff
);
2751 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2754 int cudbg_collect_pbt_tables(struct cudbg_init
*pdbg_init
,
2755 struct cudbg_buffer
*dbg_buff
,
2756 struct cudbg_error
*cudbg_err
)
2758 struct adapter
*padap
= pdbg_init
->adap
;
2759 struct cudbg_buffer temp_buff
= { 0 };
2760 struct cudbg_pbt_tables
*pbt
;
2764 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
2765 sizeof(struct cudbg_pbt_tables
),
2770 pbt
= (struct cudbg_pbt_tables
*)temp_buff
.data
;
2771 /* PBT dynamic entries */
2772 addr
= CUDBG_CHAC_PBT_ADDR
;
2773 for (i
= 0; i
< CUDBG_PBT_DYNAMIC_ENTRIES
; i
++) {
2774 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2775 &pbt
->pbt_dynamic
[i
]);
2777 cudbg_err
->sys_err
= rc
;
2778 cudbg_put_buff(pdbg_init
, &temp_buff
);
2783 /* PBT static entries */
2784 /* static entries start when bit 6 is set */
2785 addr
= CUDBG_CHAC_PBT_ADDR
+ (1 << 6);
2786 for (i
= 0; i
< CUDBG_PBT_STATIC_ENTRIES
; i
++) {
2787 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2788 &pbt
->pbt_static
[i
]);
2790 cudbg_err
->sys_err
= rc
;
2791 cudbg_put_buff(pdbg_init
, &temp_buff
);
2797 addr
= CUDBG_CHAC_PBT_LRF
;
2798 for (i
= 0; i
< CUDBG_LRF_ENTRIES
; i
++) {
2799 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2800 &pbt
->lrf_table
[i
]);
2802 cudbg_err
->sys_err
= rc
;
2803 cudbg_put_buff(pdbg_init
, &temp_buff
);
2808 /* PBT data entries */
2809 addr
= CUDBG_CHAC_PBT_DATA
;
2810 for (i
= 0; i
< CUDBG_PBT_DATA_ENTRIES
; i
++) {
2811 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2814 cudbg_err
->sys_err
= rc
;
2815 cudbg_put_buff(pdbg_init
, &temp_buff
);
2819 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2822 int cudbg_collect_mbox_log(struct cudbg_init
*pdbg_init
,
2823 struct cudbg_buffer
*dbg_buff
,
2824 struct cudbg_error
*cudbg_err
)
2826 struct adapter
*padap
= pdbg_init
->adap
;
2827 struct cudbg_mbox_log
*mboxlog
= NULL
;
2828 struct cudbg_buffer temp_buff
= { 0 };
2829 struct mbox_cmd_log
*log
= NULL
;
2830 struct mbox_cmd
*entry
;
2831 unsigned int entry_idx
;
2837 log
= padap
->mbox_log
;
2838 mbox_cmds
= padap
->mbox_log
->size
;
2839 size
= sizeof(struct cudbg_mbox_log
) * mbox_cmds
;
2840 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2844 mboxlog
= (struct cudbg_mbox_log
*)temp_buff
.data
;
2845 for (k
= 0; k
< mbox_cmds
; k
++) {
2846 entry_idx
= log
->cursor
+ k
;
2847 if (entry_idx
>= log
->size
)
2848 entry_idx
-= log
->size
;
2850 entry
= mbox_cmd_log_entry(log
, entry_idx
);
2851 /* skip over unused entries */
2852 if (entry
->timestamp
== 0)
2855 memcpy(&mboxlog
->entry
, entry
, sizeof(struct mbox_cmd
));
2856 for (i
= 0; i
< MBOX_LEN
/ 8; i
++) {
2857 flit
= entry
->cmd
[i
];
2858 mboxlog
->hi
[i
] = (u32
)(flit
>> 32);
2859 mboxlog
->lo
[i
] = (u32
)flit
;
2863 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2866 int cudbg_collect_hma_indirect(struct cudbg_init
*pdbg_init
,
2867 struct cudbg_buffer
*dbg_buff
,
2868 struct cudbg_error
*cudbg_err
)
2870 struct adapter
*padap
= pdbg_init
->adap
;
2871 struct cudbg_buffer temp_buff
= { 0 };
2872 struct ireg_buf
*hma_indr
;
2876 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
2877 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2879 n
= sizeof(t6_hma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2880 size
= sizeof(struct ireg_buf
) * n
;
2881 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2885 hma_indr
= (struct ireg_buf
*)temp_buff
.data
;
2886 for (i
= 0; i
< n
; i
++) {
2887 struct ireg_field
*hma_fli
= &hma_indr
->tp_pio
;
2888 u32
*buff
= hma_indr
->outbuf
;
2890 hma_fli
->ireg_addr
= t6_hma_ireg_array
[i
][0];
2891 hma_fli
->ireg_data
= t6_hma_ireg_array
[i
][1];
2892 hma_fli
->ireg_local_offset
= t6_hma_ireg_array
[i
][2];
2893 hma_fli
->ireg_offset_range
= t6_hma_ireg_array
[i
][3];
2894 t4_read_indirect(padap
, hma_fli
->ireg_addr
, hma_fli
->ireg_data
,
2895 buff
, hma_fli
->ireg_offset_range
,
2896 hma_fli
->ireg_local_offset
);
2899 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2902 void cudbg_fill_qdesc_num_and_size(const struct adapter
*padap
,
2903 u32
*num
, u32
*size
)
2905 u32 tot_entries
= 0, tot_size
= 0;
2907 /* NIC TXQ, RXQ, FLQ, and CTRLQ */
2908 tot_entries
+= MAX_ETH_QSETS
* 3;
2909 tot_entries
+= MAX_CTRL_QUEUES
;
2911 tot_size
+= MAX_ETH_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2912 tot_size
+= MAX_ETH_QSETS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
2913 tot_size
+= MAX_ETH_QSETS
* MAX_RX_BUFFERS
* MAX_FL_DESC_SIZE
;
2914 tot_size
+= MAX_CTRL_QUEUES
* MAX_CTRL_TXQ_ENTRIES
*
2915 MAX_CTRL_TXQ_DESC_SIZE
;
2917 /* FW_EVTQ and INTRQ */
2918 tot_entries
+= INGQ_EXTRAS
;
2919 tot_size
+= INGQ_EXTRAS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
2923 tot_size
+= MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2925 /* ULD TXQ, RXQ, and FLQ */
2926 tot_entries
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
;
2927 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* 2;
2929 tot_size
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
*
2931 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RSPQ_ENTRIES
*
2933 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RX_BUFFERS
*
2937 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
;
2938 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* SGE_MAX_IQ_SIZE
*
2941 /* ETHOFLD TXQ, RXQ, and FLQ */
2942 tot_entries
+= MAX_OFLD_QSETS
* 3;
2943 tot_size
+= MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2945 tot_size
+= sizeof(struct cudbg_ver_hdr
) +
2946 sizeof(struct cudbg_qdesc_info
) +
2947 sizeof(struct cudbg_qdesc_entry
) * tot_entries
;
2956 int cudbg_collect_qdesc(struct cudbg_init
*pdbg_init
,
2957 struct cudbg_buffer
*dbg_buff
,
2958 struct cudbg_error
*cudbg_err
)
2960 u32 num_queues
= 0, tot_entries
= 0, size
= 0;
2961 struct adapter
*padap
= pdbg_init
->adap
;
2962 struct cudbg_buffer temp_buff
= { 0 };
2963 struct cudbg_qdesc_entry
*qdesc_entry
;
2964 struct cudbg_qdesc_info
*qdesc_info
;
2965 struct cudbg_ver_hdr
*ver_hdr
;
2966 struct sge
*s
= &padap
->sge
;
2967 u32 i
, j
, cur_off
, tot_len
;
2971 cudbg_fill_qdesc_num_and_size(padap
, &tot_entries
, &size
);
2972 size
= min_t(u32
, size
, CUDBG_DUMP_BUFF_SIZE
);
2974 data
= kvzalloc(size
, GFP_KERNEL
);
2978 ver_hdr
= (struct cudbg_ver_hdr
*)data
;
2979 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
2980 ver_hdr
->revision
= CUDBG_QDESC_REV
;
2981 ver_hdr
->size
= sizeof(struct cudbg_qdesc_info
);
2982 size
-= sizeof(*ver_hdr
);
2984 qdesc_info
= (struct cudbg_qdesc_info
*)(data
+
2986 size
-= sizeof(*qdesc_info
);
2987 qdesc_entry
= (struct cudbg_qdesc_entry
*)qdesc_info
->data
;
2989 #define QDESC_GET(q, desc, type, label) do { \
2994 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
2995 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
2997 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
3001 #define QDESC_GET_TXQ(q, type, label) do { \
3002 struct sge_txq *txq = (struct sge_txq *)q; \
3003 QDESC_GET(txq, txq->desc, type, label); \
3006 #define QDESC_GET_RXQ(q, type, label) do { \
3007 struct sge_rspq *rxq = (struct sge_rspq *)q; \
3008 QDESC_GET(rxq, rxq->desc, type, label); \
3011 #define QDESC_GET_FLQ(q, type, label) do { \
3012 struct sge_fl *flq = (struct sge_fl *)q; \
3013 QDESC_GET(flq, flq->desc, type, label); \
3017 for (i
= 0; i
< s
->ethqsets
; i
++)
3018 QDESC_GET_TXQ(&s
->ethtxq
[i
].q
, CUDBG_QTYPE_NIC_TXQ
, out
);
3021 for (i
= 0; i
< s
->ethqsets
; i
++)
3022 QDESC_GET_RXQ(&s
->ethrxq
[i
].rspq
, CUDBG_QTYPE_NIC_RXQ
, out
);
3025 for (i
= 0; i
< s
->ethqsets
; i
++)
3026 QDESC_GET_FLQ(&s
->ethrxq
[i
].fl
, CUDBG_QTYPE_NIC_FLQ
, out
);
3029 for (i
= 0; i
< padap
->params
.nports
; i
++)
3030 QDESC_GET_TXQ(&s
->ctrlq
[i
].q
, CUDBG_QTYPE_CTRLQ
, out
);
3033 QDESC_GET_RXQ(&s
->fw_evtq
, CUDBG_QTYPE_FWEVTQ
, out
);
3036 QDESC_GET_RXQ(&s
->intrq
, CUDBG_QTYPE_INTRQ
, out
);
3039 QDESC_GET_TXQ(&s
->ptptxq
.q
, CUDBG_QTYPE_PTP_TXQ
, out
);
3042 mutex_lock(&uld_mutex
);
3044 if (s
->uld_txq_info
) {
3045 struct sge_uld_txq_info
*utxq
;
3048 for (j
= 0; j
< CXGB4_TX_MAX
; j
++) {
3049 if (!s
->uld_txq_info
[j
])
3052 utxq
= s
->uld_txq_info
[j
];
3053 for (i
= 0; i
< utxq
->ntxq
; i
++)
3054 QDESC_GET_TXQ(&utxq
->uldtxq
[i
].q
,
3055 cudbg_uld_txq_to_qtype(j
),
3060 if (s
->uld_rxq_info
) {
3061 struct sge_uld_rxq_info
*urxq
;
3065 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3066 if (!s
->uld_rxq_info
[j
])
3069 urxq
= s
->uld_rxq_info
[j
];
3070 for (i
= 0; i
< urxq
->nrxq
; i
++)
3071 QDESC_GET_RXQ(&urxq
->uldrxq
[i
].rspq
,
3072 cudbg_uld_rxq_to_qtype(j
),
3077 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3078 if (!s
->uld_rxq_info
[j
])
3081 urxq
= s
->uld_rxq_info
[j
];
3082 for (i
= 0; i
< urxq
->nrxq
; i
++)
3083 QDESC_GET_FLQ(&urxq
->uldrxq
[i
].fl
,
3084 cudbg_uld_flq_to_qtype(j
),
3089 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3090 if (!s
->uld_rxq_info
[j
])
3093 urxq
= s
->uld_rxq_info
[j
];
3095 for (i
= 0; i
< urxq
->nciq
; i
++)
3096 QDESC_GET_RXQ(&urxq
->uldrxq
[base
+ i
].rspq
,
3097 cudbg_uld_ciq_to_qtype(j
),
3104 for (i
= 0; i
< s
->eoqsets
; i
++)
3105 QDESC_GET_TXQ(&s
->eohw_txq
[i
].q
,
3106 CUDBG_QTYPE_ETHOFLD_TXQ
, out
);
3108 /* ETHOFLD RXQ and FLQ */
3110 for (i
= 0; i
< s
->eoqsets
; i
++)
3111 QDESC_GET_RXQ(&s
->eohw_rxq
[i
].rspq
,
3112 CUDBG_QTYPE_ETHOFLD_RXQ
, out
);
3114 for (i
= 0; i
< s
->eoqsets
; i
++)
3115 QDESC_GET_FLQ(&s
->eohw_rxq
[i
].fl
,
3116 CUDBG_QTYPE_ETHOFLD_FLQ
, out
);
3120 mutex_unlock(&uld_mutex
);
3123 qdesc_info
->qdesc_entry_size
= sizeof(*qdesc_entry
);
3124 qdesc_info
->num_queues
= num_queues
;
3127 u32 chunk_size
= min_t(u32
, tot_len
, CUDBG_CHUNK_SIZE
);
3129 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, chunk_size
,
3132 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3136 memcpy(temp_buff
.data
, data
+ cur_off
, chunk_size
);
3137 tot_len
-= chunk_size
;
3138 cur_off
+= chunk_size
;
3139 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
3142 cudbg_put_buff(pdbg_init
, &temp_buff
);
3143 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3152 #undef QDESC_GET_FLQ
3153 #undef QDESC_GET_RXQ
3154 #undef QDESC_GET_TXQ