1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2022 Marvell.
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
16 #define DRV_NAME "Marvell MCS Driver"
18 #define PCI_CFG_REG_BAR_NUM 0
20 static const struct pci_device_id mcs_id_table
[] = {
21 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_CN10K_MCS
) },
22 { 0, } /* end of table */
25 static LIST_HEAD(mcs_list
);
27 void mcs_get_tx_secy_stats(struct mcs
*mcs
, struct mcs_secy_stats
*stats
, int id
)
31 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id
);
32 stats
->ctl_pkt_bcast_cnt
= mcs_reg_read(mcs
, reg
);
34 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id
);
35 stats
->ctl_pkt_mcast_cnt
= mcs_reg_read(mcs
, reg
);
37 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id
);
38 stats
->ctl_octet_cnt
= mcs_reg_read(mcs
, reg
);
40 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id
);
41 stats
->ctl_pkt_ucast_cnt
= mcs_reg_read(mcs
, reg
);
43 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id
);
44 stats
->unctl_pkt_bcast_cnt
= mcs_reg_read(mcs
, reg
);
46 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id
);
47 stats
->unctl_pkt_mcast_cnt
= mcs_reg_read(mcs
, reg
);
49 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id
);
50 stats
->unctl_octet_cnt
= mcs_reg_read(mcs
, reg
);
52 reg
= MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id
);
53 stats
->unctl_pkt_ucast_cnt
= mcs_reg_read(mcs
, reg
);
55 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id
);
56 stats
->octet_encrypted_cnt
= mcs_reg_read(mcs
, reg
);
58 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id
);
59 stats
->octet_protected_cnt
= mcs_reg_read(mcs
, reg
);
61 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id
);
62 stats
->pkt_noactivesa_cnt
= mcs_reg_read(mcs
, reg
);
64 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id
);
65 stats
->pkt_toolong_cnt
= mcs_reg_read(mcs
, reg
);
67 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id
);
68 stats
->pkt_untagged_cnt
= mcs_reg_read(mcs
, reg
);
71 void mcs_get_rx_secy_stats(struct mcs
*mcs
, struct mcs_secy_stats
*stats
, int id
)
75 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id
);
76 stats
->ctl_pkt_bcast_cnt
= mcs_reg_read(mcs
, reg
);
78 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id
);
79 stats
->ctl_pkt_mcast_cnt
= mcs_reg_read(mcs
, reg
);
81 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id
);
82 stats
->ctl_octet_cnt
= mcs_reg_read(mcs
, reg
);
84 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id
);
85 stats
->ctl_pkt_ucast_cnt
= mcs_reg_read(mcs
, reg
);
87 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id
);
88 stats
->unctl_pkt_bcast_cnt
= mcs_reg_read(mcs
, reg
);
90 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id
);
91 stats
->unctl_pkt_mcast_cnt
= mcs_reg_read(mcs
, reg
);
93 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id
);
94 stats
->unctl_octet_cnt
= mcs_reg_read(mcs
, reg
);
96 reg
= MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id
);
97 stats
->unctl_pkt_ucast_cnt
= mcs_reg_read(mcs
, reg
);
99 reg
= MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id
);
100 stats
->octet_decrypted_cnt
= mcs_reg_read(mcs
, reg
);
102 reg
= MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id
);
103 stats
->octet_validated_cnt
= mcs_reg_read(mcs
, reg
);
105 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id
);
106 stats
->pkt_port_disabled_cnt
= mcs_reg_read(mcs
, reg
);
108 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id
);
109 stats
->pkt_badtag_cnt
= mcs_reg_read(mcs
, reg
);
111 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id
);
112 stats
->pkt_nosa_cnt
= mcs_reg_read(mcs
, reg
);
114 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id
);
115 stats
->pkt_nosaerror_cnt
= mcs_reg_read(mcs
, reg
);
117 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id
);
118 stats
->pkt_tagged_ctl_cnt
= mcs_reg_read(mcs
, reg
);
120 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id
);
121 stats
->pkt_untaged_cnt
= mcs_reg_read(mcs
, reg
);
123 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id
);
124 stats
->pkt_ctl_cnt
= mcs_reg_read(mcs
, reg
);
126 if (mcs
->hw
->mcs_blks
> 1) {
127 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id
);
128 stats
->pkt_notag_cnt
= mcs_reg_read(mcs
, reg
);
132 void mcs_get_flowid_stats(struct mcs
*mcs
, struct mcs_flowid_stats
*stats
,
138 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id
);
140 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id
);
142 stats
->tcam_hit_cnt
= mcs_reg_read(mcs
, reg
);
145 void mcs_get_port_stats(struct mcs
*mcs
, struct mcs_port_stats
*stats
,
151 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id
);
152 stats
->tcam_miss_cnt
= mcs_reg_read(mcs
, reg
);
154 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id
);
155 stats
->parser_err_cnt
= mcs_reg_read(mcs
, reg
);
156 if (mcs
->hw
->mcs_blks
> 1) {
157 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id
);
158 stats
->preempt_err_cnt
= mcs_reg_read(mcs
, reg
);
161 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id
);
162 stats
->tcam_miss_cnt
= mcs_reg_read(mcs
, reg
);
164 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id
);
165 stats
->parser_err_cnt
= mcs_reg_read(mcs
, reg
);
167 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id
);
168 stats
->sectag_insert_err_cnt
= mcs_reg_read(mcs
, reg
);
172 void mcs_get_sa_stats(struct mcs
*mcs
, struct mcs_sa_stats
*stats
, int id
, int dir
)
177 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id
);
178 stats
->pkt_invalid_cnt
= mcs_reg_read(mcs
, reg
);
180 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id
);
181 stats
->pkt_nosaerror_cnt
= mcs_reg_read(mcs
, reg
);
183 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id
);
184 stats
->pkt_notvalid_cnt
= mcs_reg_read(mcs
, reg
);
186 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id
);
187 stats
->pkt_ok_cnt
= mcs_reg_read(mcs
, reg
);
189 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id
);
190 stats
->pkt_nosa_cnt
= mcs_reg_read(mcs
, reg
);
192 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id
);
193 stats
->pkt_encrypt_cnt
= mcs_reg_read(mcs
, reg
);
195 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id
);
196 stats
->pkt_protected_cnt
= mcs_reg_read(mcs
, reg
);
200 void mcs_get_sc_stats(struct mcs
*mcs
, struct mcs_sc_stats
*stats
,
206 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id
);
207 stats
->hit_cnt
= mcs_reg_read(mcs
, reg
);
209 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id
);
210 stats
->pkt_invalid_cnt
= mcs_reg_read(mcs
, reg
);
212 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id
);
213 stats
->pkt_late_cnt
= mcs_reg_read(mcs
, reg
);
215 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id
);
216 stats
->pkt_notvalid_cnt
= mcs_reg_read(mcs
, reg
);
218 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id
);
219 stats
->pkt_unchecked_cnt
= mcs_reg_read(mcs
, reg
);
221 if (mcs
->hw
->mcs_blks
> 1) {
222 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id
);
223 stats
->pkt_delay_cnt
= mcs_reg_read(mcs
, reg
);
225 reg
= MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id
);
226 stats
->pkt_ok_cnt
= mcs_reg_read(mcs
, reg
);
228 if (mcs
->hw
->mcs_blks
== 1) {
229 reg
= MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id
);
230 stats
->octet_decrypt_cnt
= mcs_reg_read(mcs
, reg
);
232 reg
= MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id
);
233 stats
->octet_validate_cnt
= mcs_reg_read(mcs
, reg
);
236 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id
);
237 stats
->pkt_encrypt_cnt
= mcs_reg_read(mcs
, reg
);
239 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id
);
240 stats
->pkt_protected_cnt
= mcs_reg_read(mcs
, reg
);
242 if (mcs
->hw
->mcs_blks
== 1) {
243 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id
);
244 stats
->octet_encrypt_cnt
= mcs_reg_read(mcs
, reg
);
246 reg
= MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id
);
247 stats
->octet_protected_cnt
= mcs_reg_read(mcs
, reg
);
252 void mcs_clear_stats(struct mcs
*mcs
, u8 type
, u8 id
, int dir
)
254 struct mcs_flowid_stats flowid_st
;
255 struct mcs_port_stats port_st
;
256 struct mcs_secy_stats secy_st
;
257 struct mcs_sc_stats sc_st
;
258 struct mcs_sa_stats sa_st
;
262 reg
= MCSX_CSE_RX_SLAVE_CTRL
;
264 reg
= MCSX_CSE_TX_SLAVE_CTRL
;
266 mcs_reg_write(mcs
, reg
, BIT_ULL(0));
269 case MCS_FLOWID_STATS
:
270 mcs_get_flowid_stats(mcs
, &flowid_st
, id
, dir
);
274 mcs_get_rx_secy_stats(mcs
, &secy_st
, id
);
276 mcs_get_tx_secy_stats(mcs
, &secy_st
, id
);
279 mcs_get_sc_stats(mcs
, &sc_st
, id
, dir
);
282 mcs_get_sa_stats(mcs
, &sa_st
, id
, dir
);
285 mcs_get_port_stats(mcs
, &port_st
, id
, dir
);
289 mcs_reg_write(mcs
, reg
, 0x0);
292 int mcs_clear_all_stats(struct mcs
*mcs
, u16 pcifunc
, int dir
)
294 struct mcs_rsrc_map
*map
;
302 /* Clear FLOWID stats */
303 for (id
= 0; id
< map
->flow_ids
.max
; id
++) {
304 if (map
->flowid2pf_map
[id
] != pcifunc
)
306 mcs_clear_stats(mcs
, MCS_FLOWID_STATS
, id
, dir
);
309 /* Clear SECY stats */
310 for (id
= 0; id
< map
->secy
.max
; id
++) {
311 if (map
->secy2pf_map
[id
] != pcifunc
)
313 mcs_clear_stats(mcs
, MCS_SECY_STATS
, id
, dir
);
317 for (id
= 0; id
< map
->secy
.max
; id
++) {
318 if (map
->sc2pf_map
[id
] != pcifunc
)
320 mcs_clear_stats(mcs
, MCS_SC_STATS
, id
, dir
);
324 for (id
= 0; id
< map
->sa
.max
; id
++) {
325 if (map
->sa2pf_map
[id
] != pcifunc
)
327 mcs_clear_stats(mcs
, MCS_SA_STATS
, id
, dir
);
332 void mcs_pn_table_write(struct mcs
*mcs
, u8 pn_id
, u64 next_pn
, u8 dir
)
337 reg
= MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id
);
339 reg
= MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id
);
340 mcs_reg_write(mcs
, reg
, next_pn
);
343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs
*mcs
, struct mcs_tx_sc_sa_map
*map
)
347 val
= (map
->sa_index0
& 0xFF) |
348 (map
->sa_index1
& 0xFF) << 9 |
349 (map
->rekey_ena
& 0x1) << 18 |
350 (map
->sa_index0_vld
& 0x1) << 19 |
351 (map
->sa_index1_vld
& 0x1) << 20 |
352 (map
->tx_sa_active
& 0x1) << 21 |
353 map
->sectag_sci
<< 22;
354 reg
= MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map
->sc_id
);
355 mcs_reg_write(mcs
, reg
, val
);
357 val
= map
->sectag_sci
>> 42;
358 reg
= MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map
->sc_id
);
359 mcs_reg_write(mcs
, reg
, val
);
362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs
*mcs
, struct mcs_rx_sc_sa_map
*map
)
366 val
= (map
->sa_index
& 0xFF) | map
->sa_in_use
<< 9;
368 reg
= MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map
->sc_id
) + map
->an
);
369 mcs_reg_write(mcs
, reg
, val
);
372 void mcs_sa_plcy_write(struct mcs
*mcs
, u64
*plcy
, int sa_id
, int dir
)
378 for (reg_id
= 0; reg_id
< 8; reg_id
++) {
379 reg
= MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id
, sa_id
);
380 mcs_reg_write(mcs
, reg
, plcy
[reg_id
]);
383 for (reg_id
= 0; reg_id
< 9; reg_id
++) {
384 reg
= MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id
, sa_id
);
385 mcs_reg_write(mcs
, reg
, plcy
[reg_id
]);
390 void mcs_ena_dis_sc_cam_entry(struct mcs
*mcs
, int sc_id
, int ena
)
394 reg
= MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
396 reg
= MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
399 val
= mcs_reg_read(mcs
, reg
) | BIT_ULL(sc_id
);
401 val
= mcs_reg_read(mcs
, reg
) & ~BIT_ULL(sc_id
);
403 mcs_reg_write(mcs
, reg
, val
);
406 void mcs_rx_sc_cam_write(struct mcs
*mcs
, u64 sci
, u64 secy
, int sc_id
)
408 mcs_reg_write(mcs
, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id
), sci
);
409 mcs_reg_write(mcs
, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id
), secy
);
411 mcs_ena_dis_sc_cam_entry(mcs
, sc_id
, true);
414 void mcs_secy_plcy_write(struct mcs
*mcs
, u64 plcy
, int secy_id
, int dir
)
419 reg
= MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id
);
421 reg
= MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id
);
423 mcs_reg_write(mcs
, reg
, plcy
);
425 if (mcs
->hw
->mcs_blks
== 1 && dir
== MCS_RX
)
426 mcs_reg_write(mcs
, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id
), 0x0ull
);
429 void cn10kb_mcs_flowid_secy_map(struct mcs
*mcs
, struct secy_mem_map
*map
, int dir
)
433 val
= (map
->secy
& 0x7F) | (map
->ctrl_pkt
& 0x1) << 8;
435 reg
= MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map
->flow_id
);
437 val
|= (map
->sc
& 0x7F) << 9;
438 reg
= MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map
->flow_id
);
441 mcs_reg_write(mcs
, reg
, val
);
444 void mcs_ena_dis_flowid_entry(struct mcs
*mcs
, int flow_id
, int dir
, int ena
)
449 reg
= MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0
;
451 reg
= MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1
;
453 reg
= MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0
;
455 reg
= MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1
;
458 /* Enable/Disable the tcam entry */
460 val
= mcs_reg_read(mcs
, reg
) | BIT_ULL(flow_id
);
462 val
= mcs_reg_read(mcs
, reg
) & ~BIT_ULL(flow_id
);
464 mcs_reg_write(mcs
, reg
, val
);
467 void mcs_flowid_entry_write(struct mcs
*mcs
, u64
*data
, u64
*mask
, int flow_id
, int dir
)
473 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
474 reg
= MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id
, flow_id
);
475 mcs_reg_write(mcs
, reg
, data
[reg_id
]);
477 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
478 reg
= MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id
, flow_id
);
479 mcs_reg_write(mcs
, reg
, mask
[reg_id
]);
482 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
483 reg
= MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id
, flow_id
);
484 mcs_reg_write(mcs
, reg
, data
[reg_id
]);
486 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
487 reg
= MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id
, flow_id
);
488 mcs_reg_write(mcs
, reg
, mask
[reg_id
]);
493 int mcs_install_flowid_bypass_entry(struct mcs
*mcs
)
495 int flow_id
, secy_id
, reg_id
;
496 struct secy_mem_map map
;
500 flow_id
= mcs
->hw
->tcam_entries
- MCS_RSRC_RSVD_CNT
;
501 __set_bit(flow_id
, mcs
->rx
.flow_ids
.bmap
);
502 __set_bit(flow_id
, mcs
->tx
.flow_ids
.bmap
);
504 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
505 reg
= MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id
, flow_id
);
506 mcs_reg_write(mcs
, reg
, GENMASK_ULL(63, 0));
508 for (reg_id
= 0; reg_id
< 4; reg_id
++) {
509 reg
= MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id
, flow_id
);
510 mcs_reg_write(mcs
, reg
, GENMASK_ULL(63, 0));
513 secy_id
= mcs
->hw
->secy_entries
- MCS_RSRC_RSVD_CNT
;
514 __set_bit(secy_id
, mcs
->rx
.secy
.bmap
);
515 __set_bit(secy_id
, mcs
->tx
.secy
.bmap
);
517 /* Set validate frames to NULL and enable control port */
519 if (mcs
->hw
->mcs_blks
> 1)
520 plcy
= BIT_ULL(0) | 0x3ull
<< 4;
521 mcs_secy_plcy_write(mcs
, plcy
, secy_id
, MCS_RX
);
523 /* Enable control port and set mtu to max */
524 plcy
= BIT_ULL(0) | GENMASK_ULL(43, 28);
525 if (mcs
->hw
->mcs_blks
> 1)
526 plcy
= BIT_ULL(0) | GENMASK_ULL(63, 48);
527 mcs_secy_plcy_write(mcs
, plcy
, secy_id
, MCS_TX
);
529 /* Map flowid to secy */
532 map
.flow_id
= flow_id
;
533 mcs
->mcs_ops
->mcs_flowid_secy_map(mcs
, &map
, MCS_RX
);
535 mcs
->mcs_ops
->mcs_flowid_secy_map(mcs
, &map
, MCS_TX
);
537 /* Enable Flowid entry */
538 mcs_ena_dis_flowid_entry(mcs
, flow_id
, MCS_RX
, true);
539 mcs_ena_dis_flowid_entry(mcs
, flow_id
, MCS_TX
, true);
544 void mcs_clear_secy_plcy(struct mcs
*mcs
, int secy_id
, int dir
)
546 struct mcs_rsrc_map
*map
;
554 /* Clear secy memory to zero */
555 mcs_secy_plcy_write(mcs
, 0, secy_id
, dir
);
557 /* Disable the tcam entry using this secy */
558 for (flow_id
= 0; flow_id
< map
->flow_ids
.max
; flow_id
++) {
559 if (map
->flowid2secy_map
[flow_id
] != secy_id
)
561 mcs_ena_dis_flowid_entry(mcs
, flow_id
, dir
, false);
565 int mcs_alloc_ctrlpktrule(struct rsrc_bmap
*rsrc
, u16
*pf_map
, u16 offset
, u16 pcifunc
)
572 rsrc_id
= bitmap_find_next_zero_area(rsrc
->bmap
, rsrc
->max
, offset
, 1, 0);
573 if (rsrc_id
>= rsrc
->max
)
576 bitmap_set(rsrc
->bmap
, rsrc_id
, 1);
577 pf_map
[rsrc_id
] = pcifunc
;
582 int mcs_free_ctrlpktrule(struct mcs
*mcs
, struct mcs_free_ctrl_pkt_rule_req
*req
)
584 u16 pcifunc
= req
->hdr
.pcifunc
;
585 struct mcs_rsrc_map
*map
;
589 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE
: MCSX_PEX_TX_SLAVE_RULE_ENABLE
;
590 map
= (req
->dir
== MCS_RX
) ? &mcs
->rx
: &mcs
->tx
;
593 for (id
= 0; id
< map
->ctrlpktrule
.max
; id
++) {
594 if (map
->ctrlpktrule2pf_map
[id
] != pcifunc
)
596 mcs_free_rsrc(&map
->ctrlpktrule
, map
->ctrlpktrule2pf_map
, id
, pcifunc
);
597 dis
= mcs_reg_read(mcs
, reg
);
599 mcs_reg_write(mcs
, reg
, dis
);
604 rc
= mcs_free_rsrc(&map
->ctrlpktrule
, map
->ctrlpktrule2pf_map
, req
->rule_idx
, pcifunc
);
605 dis
= mcs_reg_read(mcs
, reg
);
606 dis
&= ~BIT_ULL(req
->rule_idx
);
607 mcs_reg_write(mcs
, reg
, dis
);
612 int mcs_ctrlpktrule_write(struct mcs
*mcs
, struct mcs_ctrl_pkt_rule_write_req
*req
)
617 switch (req
->rule_type
) {
618 case MCS_CTRL_PKT_RULE_TYPE_ETH
:
619 req
->data0
&= GENMASK(15, 0);
620 if (req
->data0
!= ETH_P_PAE
)
623 idx
= req
->rule_idx
- MCS_CTRLPKT_ETYPE_RULE_OFFSET
;
624 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx
) :
625 MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx
);
627 mcs_reg_write(mcs
, reg
, req
->data0
);
629 case MCS_CTRL_PKT_RULE_TYPE_DA
:
630 if (!(req
->data0
& BIT_ULL(40)))
633 idx
= req
->rule_idx
- MCS_CTRLPKT_DA_RULE_OFFSET
;
634 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx
) :
635 MCSX_PEX_TX_SLAVE_RULE_DAX(idx
);
637 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
639 case MCS_CTRL_PKT_RULE_TYPE_RANGE
:
640 if (!(req
->data0
& BIT_ULL(40)) || !(req
->data1
& BIT_ULL(40)))
643 idx
= req
->rule_idx
- MCS_CTRLPKT_DA_RANGE_RULE_OFFSET
;
644 if (req
->dir
== MCS_RX
) {
645 reg
= MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx
);
646 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
647 reg
= MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx
);
648 mcs_reg_write(mcs
, reg
, req
->data1
& GENMASK_ULL(47, 0));
650 reg
= MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx
);
651 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
652 reg
= MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx
);
653 mcs_reg_write(mcs
, reg
, req
->data1
& GENMASK_ULL(47, 0));
656 case MCS_CTRL_PKT_RULE_TYPE_COMBO
:
657 req
->data2
&= GENMASK(15, 0);
658 if (req
->data2
!= ETH_P_PAE
|| !(req
->data0
& BIT_ULL(40)) ||
659 !(req
->data1
& BIT_ULL(40)))
662 idx
= req
->rule_idx
- MCS_CTRLPKT_COMBO_RULE_OFFSET
;
663 if (req
->dir
== MCS_RX
) {
664 reg
= MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx
);
665 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
666 reg
= MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx
);
667 mcs_reg_write(mcs
, reg
, req
->data1
& GENMASK_ULL(47, 0));
668 reg
= MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx
);
669 mcs_reg_write(mcs
, reg
, req
->data2
);
671 reg
= MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx
);
672 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
673 reg
= MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx
);
674 mcs_reg_write(mcs
, reg
, req
->data1
& GENMASK_ULL(47, 0));
675 reg
= MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx
);
676 mcs_reg_write(mcs
, reg
, req
->data2
);
679 case MCS_CTRL_PKT_RULE_TYPE_MAC
:
680 if (!(req
->data0
& BIT_ULL(40)))
683 idx
= req
->rule_idx
- MCS_CTRLPKT_MAC_EN_RULE_OFFSET
;
684 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_RULE_MAC
:
685 MCSX_PEX_TX_SLAVE_RULE_MAC
;
687 mcs_reg_write(mcs
, reg
, req
->data0
& GENMASK_ULL(47, 0));
691 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE
: MCSX_PEX_TX_SLAVE_RULE_ENABLE
;
693 enb
= mcs_reg_read(mcs
, reg
);
694 enb
|= BIT_ULL(req
->rule_idx
);
695 mcs_reg_write(mcs
, reg
, enb
);
700 int mcs_free_rsrc(struct rsrc_bmap
*rsrc
, u16
*pf_map
, int rsrc_id
, u16 pcifunc
)
702 /* Check if the rsrc_id is mapped to PF/VF */
703 if (pf_map
[rsrc_id
] != pcifunc
)
706 rvu_free_rsrc(rsrc
, rsrc_id
);
711 /* Free all the cam resources mapped to pf */
712 int mcs_free_all_rsrc(struct mcs
*mcs
, int dir
, u16 pcifunc
)
714 struct mcs_rsrc_map
*map
;
722 /* free tcam entries */
723 for (id
= 0; id
< map
->flow_ids
.max
; id
++) {
724 if (map
->flowid2pf_map
[id
] != pcifunc
)
726 mcs_free_rsrc(&map
->flow_ids
, map
->flowid2pf_map
,
728 mcs_ena_dis_flowid_entry(mcs
, id
, dir
, false);
731 /* free secy entries */
732 for (id
= 0; id
< map
->secy
.max
; id
++) {
733 if (map
->secy2pf_map
[id
] != pcifunc
)
735 mcs_free_rsrc(&map
->secy
, map
->secy2pf_map
,
737 mcs_clear_secy_plcy(mcs
, id
, dir
);
740 /* free sc entries */
741 for (id
= 0; id
< map
->secy
.max
; id
++) {
742 if (map
->sc2pf_map
[id
] != pcifunc
)
744 mcs_free_rsrc(&map
->sc
, map
->sc2pf_map
, id
, pcifunc
);
746 /* Disable SC CAM only on RX side */
748 mcs_ena_dis_sc_cam_entry(mcs
, id
, false);
751 /* free sa entries */
752 for (id
= 0; id
< map
->sa
.max
; id
++) {
753 if (map
->sa2pf_map
[id
] != pcifunc
)
755 mcs_free_rsrc(&map
->sa
, map
->sa2pf_map
, id
, pcifunc
);
760 int mcs_alloc_rsrc(struct rsrc_bmap
*rsrc
, u16
*pf_map
, u16 pcifunc
)
764 rsrc_id
= rvu_alloc_rsrc(rsrc
);
767 pf_map
[rsrc_id
] = pcifunc
;
771 int mcs_alloc_all_rsrc(struct mcs
*mcs
, u8
*flow_id
, u8
*secy_id
,
772 u8
*sc_id
, u8
*sa1_id
, u8
*sa2_id
, u16 pcifunc
, int dir
)
774 struct mcs_rsrc_map
*map
;
782 id
= mcs_alloc_rsrc(&map
->flow_ids
, map
->flowid2pf_map
, pcifunc
);
787 id
= mcs_alloc_rsrc(&map
->secy
, map
->secy2pf_map
, pcifunc
);
792 id
= mcs_alloc_rsrc(&map
->sc
, map
->sc2pf_map
, pcifunc
);
797 id
= mcs_alloc_rsrc(&map
->sa
, map
->sa2pf_map
, pcifunc
);
802 id
= mcs_alloc_rsrc(&map
->sa
, map
->sa2pf_map
, pcifunc
);
810 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs
*mcs
)
812 struct mcs_intr_event event
= { 0 };
813 struct rsrc_bmap
*sc_bmap
;
817 sc_bmap
= &mcs
->tx
.sc
;
819 event
.mcs_id
= mcs
->mcs_id
;
820 event
.intr_mask
= MCS_CPM_TX_PACKET_XPN_EQ0_INT
;
822 for_each_set_bit(sc
, sc_bmap
->bmap
, mcs
->hw
->sc_entries
) {
823 val
= mcs_reg_read(mcs
, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc
));
825 if (mcs
->tx_sa_active
[sc
])
826 /* SA_index1 was used and got expired */
827 event
.sa_id
= (val
>> 9) & 0xFF;
829 /* SA_index0 was used and got expired */
830 event
.sa_id
= val
& 0xFF;
832 event
.pcifunc
= mcs
->tx
.sa2pf_map
[event
.sa_id
];
833 mcs_add_intr_wq_entry(mcs
, &event
);
837 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs
*mcs
)
839 struct mcs_intr_event event
= { 0 };
840 struct rsrc_bmap
*sc_bmap
;
844 sc_bmap
= &mcs
->tx
.sc
;
846 event
.mcs_id
= mcs
->mcs_id
;
847 event
.intr_mask
= MCS_CPM_TX_PN_THRESH_REACHED_INT
;
849 /* TX SA interrupt is raised only if autorekey is enabled.
850 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
851 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
852 * SA in SA_index1 got expired else SA in SA_index0 got expired.
854 for_each_set_bit(sc
, sc_bmap
->bmap
, mcs
->hw
->sc_entries
) {
855 val
= mcs_reg_read(mcs
, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc
));
856 /* Auto rekey is enable */
857 if (!((val
>> 18) & 0x1))
860 status
= (val
>> 21) & 0x1;
862 /* Check if tx_sa_active status had changed */
863 if (status
== mcs
->tx_sa_active
[sc
])
865 /* SA_index0 is expired */
867 event
.sa_id
= val
& 0xFF;
869 event
.sa_id
= (val
>> 9) & 0xFF;
871 event
.pcifunc
= mcs
->tx
.sa2pf_map
[event
.sa_id
];
872 mcs_add_intr_wq_entry(mcs
, &event
);
876 static void mcs_rx_pn_thresh_reached_handler(struct mcs
*mcs
)
878 struct mcs_intr_event event
= { 0 };
882 /* Check expired SAs */
883 for (reg
= 0; reg
< (mcs
->hw
->sa_entries
/ 64); reg
++) {
884 /* Bit high in *PN_THRESH_REACHEDX implies
885 * corresponding SAs are expired.
887 intr
= mcs_reg_read(mcs
, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg
));
888 for (sa
= 0; sa
< 64; sa
++) {
889 if (!(intr
& BIT_ULL(sa
)))
892 event
.mcs_id
= mcs
->mcs_id
;
893 event
.intr_mask
= MCS_CPM_RX_PN_THRESH_REACHED_INT
;
894 event
.sa_id
= sa
+ (reg
* 64);
895 event
.pcifunc
= mcs
->rx
.sa2pf_map
[event
.sa_id
];
896 mcs_add_intr_wq_entry(mcs
, &event
);
901 static void mcs_rx_misc_intr_handler(struct mcs
*mcs
, u64 intr
)
903 struct mcs_intr_event event
= { 0 };
905 event
.mcs_id
= mcs
->mcs_id
;
906 event
.pcifunc
= mcs
->pf_map
[0];
908 if (intr
& MCS_CPM_RX_INT_SECTAG_V_EQ1
)
909 event
.intr_mask
= MCS_CPM_RX_SECTAG_V_EQ1_INT
;
910 if (intr
& MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1
)
911 event
.intr_mask
|= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT
;
912 if (intr
& MCS_CPM_RX_INT_SL_GTE48
)
913 event
.intr_mask
|= MCS_CPM_RX_SECTAG_SL_GTE48_INT
;
914 if (intr
& MCS_CPM_RX_INT_ES_EQ1_SC_EQ1
)
915 event
.intr_mask
|= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT
;
916 if (intr
& MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1
)
917 event
.intr_mask
|= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT
;
918 if (intr
& MCS_CPM_RX_INT_PACKET_XPN_EQ0
)
919 event
.intr_mask
|= MCS_CPM_RX_PACKET_XPN_EQ0_INT
;
921 mcs_add_intr_wq_entry(mcs
, &event
);
924 static void mcs_tx_misc_intr_handler(struct mcs
*mcs
, u64 intr
)
926 struct mcs_intr_event event
= { 0 };
928 if (!(intr
& MCS_CPM_TX_INT_SA_NOT_VALID
))
931 event
.mcs_id
= mcs
->mcs_id
;
932 event
.pcifunc
= mcs
->pf_map
[0];
934 event
.intr_mask
= MCS_CPM_TX_SA_NOT_VALID_INT
;
936 mcs_add_intr_wq_entry(mcs
, &event
);
939 void cn10kb_mcs_bbe_intr_handler(struct mcs
*mcs
, u64 intr
,
940 enum mcs_direction dir
)
945 if (!(intr
& 0x6ULL
))
948 if (intr
& BIT_ULL(1))
949 reg
= (dir
== MCS_RX
) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0
:
950 MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0
;
952 reg
= (dir
== MCS_RX
) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0
:
953 MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0
;
954 val
= mcs_reg_read(mcs
, reg
);
956 /* policy/data over flow occurred */
957 for (lmac
= 0; lmac
< mcs
->hw
->lmac_cnt
; lmac
++) {
958 if (!(val
& BIT_ULL(lmac
)))
960 dev_warn(mcs
->dev
, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac
);
964 void cn10kb_mcs_pab_intr_handler(struct mcs
*mcs
, u64 intr
,
965 enum mcs_direction dir
)
969 if (!(intr
& 0xFFFFFULL
))
972 for (lmac
= 0; lmac
< mcs
->hw
->lmac_cnt
; lmac
++) {
973 if (intr
& BIT_ULL(lmac
))
974 dev_warn(mcs
->dev
, "PAB: overflow occurred on lmac:%d\n", lmac
);
978 static irqreturn_t
mcs_ip_intr_handler(int irq
, void *mcs_irq
)
980 struct mcs
*mcs
= (struct mcs
*)mcs_irq
;
981 u64 intr
, cpm_intr
, bbe_intr
, pab_intr
;
983 /* Disable the interrupt */
984 mcs_reg_write(mcs
, MCSX_IP_INT_ENA_W1C
, BIT_ULL(0));
986 /* Check which block has interrupt*/
987 intr
= mcs_reg_read(mcs
, MCSX_TOP_SLAVE_INT_SUM
);
990 if (intr
& MCS_CPM_RX_INT_ENA
) {
991 /* Check for PN thresh interrupt bit */
992 cpm_intr
= mcs_reg_read(mcs
, MCSX_CPM_RX_SLAVE_RX_INT
);
994 if (cpm_intr
& MCS_CPM_RX_INT_PN_THRESH_REACHED
)
995 mcs_rx_pn_thresh_reached_handler(mcs
);
997 if (cpm_intr
& MCS_CPM_RX_INT_ALL
)
998 mcs_rx_misc_intr_handler(mcs
, cpm_intr
);
1000 /* Clear the interrupt */
1001 mcs_reg_write(mcs
, MCSX_CPM_RX_SLAVE_RX_INT
, cpm_intr
);
1005 if (intr
& MCS_CPM_TX_INT_ENA
) {
1006 cpm_intr
= mcs_reg_read(mcs
, MCSX_CPM_TX_SLAVE_TX_INT
);
1008 if (cpm_intr
& MCS_CPM_TX_INT_PN_THRESH_REACHED
) {
1009 if (mcs
->hw
->mcs_blks
> 1)
1010 cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs
);
1012 cn10kb_mcs_tx_pn_thresh_reached_handler(mcs
);
1015 if (cpm_intr
& MCS_CPM_TX_INT_SA_NOT_VALID
)
1016 mcs_tx_misc_intr_handler(mcs
, cpm_intr
);
1018 if (cpm_intr
& MCS_CPM_TX_INT_PACKET_XPN_EQ0
) {
1019 if (mcs
->hw
->mcs_blks
> 1)
1020 cnf10kb_mcs_tx_pn_wrapped_handler(mcs
);
1022 cn10kb_mcs_tx_pn_wrapped_handler(mcs
);
1024 /* Clear the interrupt */
1025 mcs_reg_write(mcs
, MCSX_CPM_TX_SLAVE_TX_INT
, cpm_intr
);
1029 if (intr
& MCS_BBE_RX_INT_ENA
) {
1030 bbe_intr
= mcs_reg_read(mcs
, MCSX_BBE_RX_SLAVE_BBE_INT
);
1031 mcs
->mcs_ops
->mcs_bbe_intr_handler(mcs
, bbe_intr
, MCS_RX
);
1033 /* Clear the interrupt */
1034 mcs_reg_write(mcs
, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW
, 0);
1035 mcs_reg_write(mcs
, MCSX_BBE_RX_SLAVE_BBE_INT
, bbe_intr
);
1039 if (intr
& MCS_BBE_TX_INT_ENA
) {
1040 bbe_intr
= mcs_reg_read(mcs
, MCSX_BBE_TX_SLAVE_BBE_INT
);
1041 mcs
->mcs_ops
->mcs_bbe_intr_handler(mcs
, bbe_intr
, MCS_TX
);
1043 /* Clear the interrupt */
1044 mcs_reg_write(mcs
, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW
, 0);
1045 mcs_reg_write(mcs
, MCSX_BBE_TX_SLAVE_BBE_INT
, bbe_intr
);
1049 if (intr
& MCS_PAB_RX_INT_ENA
) {
1050 pab_intr
= mcs_reg_read(mcs
, MCSX_PAB_RX_SLAVE_PAB_INT
);
1051 mcs
->mcs_ops
->mcs_pab_intr_handler(mcs
, pab_intr
, MCS_RX
);
1053 /* Clear the interrupt */
1054 mcs_reg_write(mcs
, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW
, 0);
1055 mcs_reg_write(mcs
, MCSX_PAB_RX_SLAVE_PAB_INT
, pab_intr
);
1059 if (intr
& MCS_PAB_TX_INT_ENA
) {
1060 pab_intr
= mcs_reg_read(mcs
, MCSX_PAB_TX_SLAVE_PAB_INT
);
1061 mcs
->mcs_ops
->mcs_pab_intr_handler(mcs
, pab_intr
, MCS_TX
);
1063 /* Clear the interrupt */
1064 mcs_reg_write(mcs
, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW
, 0);
1065 mcs_reg_write(mcs
, MCSX_PAB_TX_SLAVE_PAB_INT
, pab_intr
);
1068 /* Clear and enable the interrupt */
1069 mcs_reg_write(mcs
, MCSX_IP_INT
, BIT_ULL(0));
1070 mcs_reg_write(mcs
, MCSX_IP_INT_ENA_W1S
, BIT_ULL(0));
1075 static void *alloc_mem(struct mcs
*mcs
, int n
)
1077 return devm_kcalloc(mcs
->dev
, n
, sizeof(u16
), GFP_KERNEL
);
1080 static int mcs_alloc_struct_mem(struct mcs
*mcs
, struct mcs_rsrc_map
*res
)
1082 struct hwinfo
*hw
= mcs
->hw
;
1085 res
->flowid2pf_map
= alloc_mem(mcs
, hw
->tcam_entries
);
1086 if (!res
->flowid2pf_map
)
1089 res
->secy2pf_map
= alloc_mem(mcs
, hw
->secy_entries
);
1090 if (!res
->secy2pf_map
)
1093 res
->sc2pf_map
= alloc_mem(mcs
, hw
->sc_entries
);
1094 if (!res
->sc2pf_map
)
1097 res
->sa2pf_map
= alloc_mem(mcs
, hw
->sa_entries
);
1098 if (!res
->sa2pf_map
)
1101 res
->flowid2secy_map
= alloc_mem(mcs
, hw
->tcam_entries
);
1102 if (!res
->flowid2secy_map
)
1105 res
->ctrlpktrule2pf_map
= alloc_mem(mcs
, MCS_MAX_CTRLPKT_RULES
);
1106 if (!res
->ctrlpktrule2pf_map
)
1109 res
->flow_ids
.max
= hw
->tcam_entries
- MCS_RSRC_RSVD_CNT
;
1110 err
= rvu_alloc_bitmap(&res
->flow_ids
);
1114 res
->secy
.max
= hw
->secy_entries
- MCS_RSRC_RSVD_CNT
;
1115 err
= rvu_alloc_bitmap(&res
->secy
);
1119 res
->sc
.max
= hw
->sc_entries
;
1120 err
= rvu_alloc_bitmap(&res
->sc
);
1124 res
->sa
.max
= hw
->sa_entries
;
1125 err
= rvu_alloc_bitmap(&res
->sa
);
1129 res
->ctrlpktrule
.max
= MCS_MAX_CTRLPKT_RULES
;
1130 err
= rvu_alloc_bitmap(&res
->ctrlpktrule
);
1137 static int mcs_register_interrupts(struct mcs
*mcs
)
1141 mcs
->num_vec
= pci_msix_vec_count(mcs
->pdev
);
1143 ret
= pci_alloc_irq_vectors(mcs
->pdev
, mcs
->num_vec
,
1144 mcs
->num_vec
, PCI_IRQ_MSIX
);
1146 dev_err(mcs
->dev
, "MCS Request for %d msix vector failed err:%d\n",
1151 ret
= request_irq(pci_irq_vector(mcs
->pdev
, mcs
->hw
->ip_vec
),
1152 mcs_ip_intr_handler
, 0, "MCS_IP", mcs
);
1154 dev_err(mcs
->dev
, "MCS IP irq registration failed\n");
1158 /* MCS enable IP interrupts */
1159 mcs_reg_write(mcs
, MCSX_IP_INT_ENA_W1S
, BIT_ULL(0));
1161 /* Enable CPM Rx/Tx interrupts */
1162 mcs_reg_write(mcs
, MCSX_TOP_SLAVE_INT_SUM_ENB
,
1163 MCS_CPM_RX_INT_ENA
| MCS_CPM_TX_INT_ENA
|
1164 MCS_BBE_RX_INT_ENA
| MCS_BBE_TX_INT_ENA
|
1165 MCS_PAB_RX_INT_ENA
| MCS_PAB_TX_INT_ENA
);
1167 mcs_reg_write(mcs
, MCSX_CPM_TX_SLAVE_TX_INT_ENB
, 0x7ULL
);
1168 mcs_reg_write(mcs
, MCSX_CPM_RX_SLAVE_RX_INT_ENB
, 0x7FULL
);
1170 mcs_reg_write(mcs
, MCSX_BBE_RX_SLAVE_BBE_INT_ENB
, 0xFFULL
);
1171 mcs_reg_write(mcs
, MCSX_BBE_TX_SLAVE_BBE_INT_ENB
, 0xFFULL
);
1173 mcs_reg_write(mcs
, MCSX_PAB_RX_SLAVE_PAB_INT_ENB
, 0xFFFFFULL
);
1174 mcs_reg_write(mcs
, MCSX_PAB_TX_SLAVE_PAB_INT_ENB
, 0xFFFFFULL
);
1176 mcs
->tx_sa_active
= alloc_mem(mcs
, mcs
->hw
->sc_entries
);
1177 if (!mcs
->tx_sa_active
) {
1185 free_irq(pci_irq_vector(mcs
->pdev
, mcs
->hw
->ip_vec
), mcs
);
1187 pci_free_irq_vectors(mcs
->pdev
);
1192 int mcs_get_blkcnt(void)
1195 int idmax
= -ENODEV
;
1197 /* Check MCS block is present in hardware */
1198 if (!pci_dev_present(mcs_id_table
))
1201 list_for_each_entry(mcs
, &mcs_list
, mcs_list
)
1202 if (mcs
->mcs_id
> idmax
)
1203 idmax
= mcs
->mcs_id
;
1211 struct mcs
*mcs_get_pdata(int mcs_id
)
1213 struct mcs
*mcs_dev
;
1215 list_for_each_entry(mcs_dev
, &mcs_list
, mcs_list
) {
1216 if (mcs_dev
->mcs_id
== mcs_id
)
1222 void mcs_set_port_cfg(struct mcs
*mcs
, struct mcs_port_cfg_set_req
*req
)
1226 mcs_reg_write(mcs
, MCSX_PAB_RX_SLAVE_PORT_CFGX(req
->port_id
),
1227 req
->port_mode
& MCS_PORT_MODE_MASK
);
1229 req
->cstm_tag_rel_mode_sel
&= 0x3;
1231 if (mcs
->hw
->mcs_blks
> 1) {
1232 req
->fifo_skid
&= MCS_PORT_FIFO_SKID_MASK
;
1233 val
= (u32
)req
->fifo_skid
<< 0x10;
1234 val
|= req
->fifo_skid
;
1235 mcs_reg_write(mcs
, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req
->port_id
), val
);
1236 mcs_reg_write(mcs
, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req
->port_id
),
1237 req
->cstm_tag_rel_mode_sel
);
1238 val
= mcs_reg_read(mcs
, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION
);
1240 if (req
->custom_hdr_enb
)
1241 val
|= BIT_ULL(req
->port_id
);
1243 val
&= ~BIT_ULL(req
->port_id
);
1245 mcs_reg_write(mcs
, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION
, val
);
1247 val
= mcs_reg_read(mcs
, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req
->port_id
));
1248 val
|= (req
->cstm_tag_rel_mode_sel
<< 2);
1249 mcs_reg_write(mcs
, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req
->port_id
), val
);
1253 void mcs_get_port_cfg(struct mcs
*mcs
, struct mcs_port_cfg_get_req
*req
,
1254 struct mcs_port_cfg_get_rsp
*rsp
)
1258 rsp
->port_mode
= mcs_reg_read(mcs
, MCSX_PAB_RX_SLAVE_PORT_CFGX(req
->port_id
)) &
1261 if (mcs
->hw
->mcs_blks
> 1) {
1262 reg
= MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req
->port_id
);
1263 rsp
->fifo_skid
= mcs_reg_read(mcs
, reg
) & MCS_PORT_FIFO_SKID_MASK
;
1264 reg
= MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req
->port_id
);
1265 rsp
->cstm_tag_rel_mode_sel
= mcs_reg_read(mcs
, reg
) & 0x3;
1266 if (mcs_reg_read(mcs
, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION
) & BIT_ULL(req
->port_id
))
1267 rsp
->custom_hdr_enb
= 1;
1269 reg
= MCSX_PEX_TX_SLAVE_PORT_CONFIG(req
->port_id
);
1270 rsp
->cstm_tag_rel_mode_sel
= mcs_reg_read(mcs
, reg
) >> 2;
1273 rsp
->port_id
= req
->port_id
;
1274 rsp
->mcs_id
= req
->mcs_id
;
1277 void mcs_get_custom_tag_cfg(struct mcs
*mcs
, struct mcs_custom_tag_cfg_get_req
*req
,
1278 struct mcs_custom_tag_cfg_get_rsp
*rsp
)
1280 u64 reg
= 0, val
= 0;
1283 for (idx
= 0; idx
< MCS_MAX_CUSTOM_TAGS
; idx
++) {
1284 if (mcs
->hw
->mcs_blks
> 1)
1285 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx
) :
1286 MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx
);
1288 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx
) :
1289 MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx
);
1291 val
= mcs_reg_read(mcs
, reg
);
1292 if (mcs
->hw
->mcs_blks
> 1) {
1293 rsp
->cstm_etype
[idx
] = val
& GENMASK(15, 0);
1294 rsp
->cstm_indx
[idx
] = (val
>> 0x16) & 0x3;
1295 reg
= (req
->dir
== MCS_RX
) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE
:
1296 MCSX_PEX_TX_SLAVE_ETYPE_ENABLE
;
1297 rsp
->cstm_etype_en
= mcs_reg_read(mcs
, reg
) & 0xFF;
1299 rsp
->cstm_etype
[idx
] = (val
>> 0x1) & GENMASK(15, 0);
1300 rsp
->cstm_indx
[idx
] = (val
>> 0x11) & 0x3;
1301 rsp
->cstm_etype_en
|= (val
& 0x1) << idx
;
1305 rsp
->mcs_id
= req
->mcs_id
;
1306 rsp
->dir
= req
->dir
;
1309 void mcs_reset_port(struct mcs
*mcs
, u8 port_id
, u8 reset
)
1311 u64 reg
= MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id
);
1313 mcs_reg_write(mcs
, reg
, reset
& 0x1);
1316 /* Set lmac to bypass/operational mode */
1317 void mcs_set_lmac_mode(struct mcs
*mcs
, int lmac_id
, u8 mode
)
1320 int id
= lmac_id
* 2;
1322 reg
= MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id
);
1323 mcs_reg_write(mcs
, reg
, (u64
)mode
);
1324 reg
= MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id
+ 1));
1325 mcs_reg_write(mcs
, reg
, (u64
)mode
);
1328 void mcs_pn_threshold_set(struct mcs
*mcs
, struct mcs_set_pn_threshold
*pn
)
1332 if (pn
->dir
== MCS_RX
)
1333 reg
= pn
->xpn
? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD
: MCSX_CPM_RX_SLAVE_PN_THRESHOLD
;
1335 reg
= pn
->xpn
? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD
: MCSX_CPM_TX_SLAVE_PN_THRESHOLD
;
1337 mcs_reg_write(mcs
, reg
, pn
->threshold
);
1340 void cn10kb_mcs_parser_cfg(struct mcs
*mcs
)
1345 val
= BIT_ULL(0) | (0x8100ull
& 0xFFFF) << 1 | BIT_ULL(17);
1347 reg
= MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1348 mcs_reg_write(mcs
, reg
, val
);
1351 reg
= MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1352 mcs_reg_write(mcs
, reg
, val
);
1355 val
= BIT_ULL(0) | (0x88a8ull
& 0xFFFF) << 1 | BIT_ULL(18);
1357 reg
= MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1358 mcs_reg_write(mcs
, reg
, val
);
1361 reg
= MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1362 mcs_reg_write(mcs
, reg
, val
);
1365 static void mcs_lmac_init(struct mcs
*mcs
, int lmac_id
)
1369 /* Port mode 25GB */
1370 reg
= MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id
);
1371 mcs_reg_write(mcs
, reg
, 0);
1373 if (mcs
->hw
->mcs_blks
> 1) {
1374 reg
= MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id
);
1375 mcs_reg_write(mcs
, reg
, 0xe000e);
1379 reg
= MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id
);
1380 mcs_reg_write(mcs
, reg
, 0);
1383 int mcs_set_lmac_channels(int mcs_id
, u16 base
)
1389 mcs
= mcs_get_pdata(mcs_id
);
1392 for (lmac
= 0; lmac
< mcs
->hw
->lmac_cnt
; lmac
++) {
1393 cfg
= mcs_reg_read(mcs
, MCSX_LINK_LMACX_CFG(lmac
));
1394 cfg
&= ~(MCSX_LINK_LMAC_BASE_MASK
| MCSX_LINK_LMAC_RANGE_MASK
);
1395 cfg
|= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK
, ilog2(16));
1396 cfg
|= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK
, base
);
1397 mcs_reg_write(mcs
, MCSX_LINK_LMACX_CFG(lmac
), cfg
);
1403 static int mcs_x2p_calibration(struct mcs
*mcs
)
1405 unsigned long timeout
= jiffies
+ usecs_to_jiffies(20000);
1409 /* set X2P calibration */
1410 val
= mcs_reg_read(mcs
, MCSX_MIL_GLOBAL
);
1412 mcs_reg_write(mcs
, MCSX_MIL_GLOBAL
, val
);
1414 /* Wait for calibration to complete */
1415 while (!(mcs_reg_read(mcs
, MCSX_MIL_RX_GBL_STATUS
) & BIT_ULL(0))) {
1416 if (time_before(jiffies
, timeout
)) {
1417 usleep_range(80, 100);
1421 dev_err(mcs
->dev
, "MCS X2P calibration failed..ignoring\n");
1426 val
= mcs_reg_read(mcs
, MCSX_MIL_RX_GBL_STATUS
);
1427 for (i
= 0; i
< mcs
->hw
->mcs_x2p_intf
; i
++) {
1428 if (val
& BIT_ULL(1 + i
))
1431 dev_err(mcs
->dev
, "MCS:%d didn't respond to X2P calibration\n", i
);
1433 /* Clear X2P calibrate */
1434 mcs_reg_write(mcs
, MCSX_MIL_GLOBAL
, mcs_reg_read(mcs
, MCSX_MIL_GLOBAL
) & ~BIT_ULL(5));
1439 static void mcs_set_external_bypass(struct mcs
*mcs
, u8 bypass
)
1443 /* Set MCS to external bypass */
1444 val
= mcs_reg_read(mcs
, MCSX_MIL_GLOBAL
);
1449 mcs_reg_write(mcs
, MCSX_MIL_GLOBAL
, val
);
1452 static void mcs_global_cfg(struct mcs
*mcs
)
1454 /* Disable external bypass */
1455 mcs_set_external_bypass(mcs
, false);
1457 /* Reset TX/RX stats memory */
1458 mcs_reg_write(mcs
, MCSX_CSE_RX_SLAVE_STATS_CLEAR
, 0x1F);
1459 mcs_reg_write(mcs
, MCSX_CSE_TX_SLAVE_STATS_CLEAR
, 0x1F);
1461 /* Set MCS to perform standard IEEE802.1AE macsec processing */
1462 if (mcs
->hw
->mcs_blks
== 1) {
1463 mcs_reg_write(mcs
, MCSX_IP_MODE
, BIT_ULL(3));
1467 mcs_reg_write(mcs
, MCSX_BBE_RX_SLAVE_CAL_ENTRY
, 0xe4);
1468 mcs_reg_write(mcs
, MCSX_BBE_RX_SLAVE_CAL_LEN
, 4);
1471 void cn10kb_mcs_set_hw_capabilities(struct mcs
*mcs
)
1473 struct hwinfo
*hw
= mcs
->hw
;
1475 hw
->tcam_entries
= 128; /* TCAM entries */
1476 hw
->secy_entries
= 128; /* SecY entries */
1477 hw
->sc_entries
= 128; /* SC CAM entries */
1478 hw
->sa_entries
= 256; /* SA entries */
1479 hw
->lmac_cnt
= 20; /* lmacs/ports per mcs block */
1480 hw
->mcs_x2p_intf
= 5; /* x2p clabration intf */
1481 hw
->mcs_blks
= 1; /* MCS blocks */
1482 hw
->ip_vec
= MCS_CN10KB_INT_VEC_IP
; /* IP vector */
1485 static struct mcs_ops cn10kb_mcs_ops
= {
1486 .mcs_set_hw_capabilities
= cn10kb_mcs_set_hw_capabilities
,
1487 .mcs_parser_cfg
= cn10kb_mcs_parser_cfg
,
1488 .mcs_tx_sa_mem_map_write
= cn10kb_mcs_tx_sa_mem_map_write
,
1489 .mcs_rx_sa_mem_map_write
= cn10kb_mcs_rx_sa_mem_map_write
,
1490 .mcs_flowid_secy_map
= cn10kb_mcs_flowid_secy_map
,
1491 .mcs_bbe_intr_handler
= cn10kb_mcs_bbe_intr_handler
,
1492 .mcs_pab_intr_handler
= cn10kb_mcs_pab_intr_handler
,
1495 static int mcs_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1497 struct device
*dev
= &pdev
->dev
;
1501 mcs
= devm_kzalloc(dev
, sizeof(*mcs
), GFP_KERNEL
);
1505 mcs
->hw
= devm_kzalloc(dev
, sizeof(struct hwinfo
), GFP_KERNEL
);
1509 err
= pci_enable_device(pdev
);
1511 dev_err(dev
, "Failed to enable PCI device\n");
1512 pci_set_drvdata(pdev
, NULL
);
1516 err
= pci_request_regions(pdev
, DRV_NAME
);
1518 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1522 mcs
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1523 if (!mcs
->reg_base
) {
1524 dev_err(dev
, "mcs: Cannot map CSR memory space, aborting\n");
1529 pci_set_drvdata(pdev
, mcs
);
1531 mcs
->dev
= &pdev
->dev
;
1533 if (pdev
->subsystem_device
== PCI_SUBSYS_DEVID_CN10K_B
)
1534 mcs
->mcs_ops
= &cn10kb_mcs_ops
;
1536 mcs
->mcs_ops
= cnf10kb_get_mac_ops();
1538 /* Set hardware capabilities */
1539 mcs
->mcs_ops
->mcs_set_hw_capabilities(mcs
);
1541 mcs_global_cfg(mcs
);
1543 /* Perform X2P clibration */
1544 err
= mcs_x2p_calibration(mcs
);
1548 mcs
->mcs_id
= (pci_resource_start(pdev
, PCI_CFG_REG_BAR_NUM
) >> 24)
1551 /* Set mcs tx side resources */
1552 err
= mcs_alloc_struct_mem(mcs
, &mcs
->tx
);
1556 /* Set mcs rx side resources */
1557 err
= mcs_alloc_struct_mem(mcs
, &mcs
->rx
);
1561 /* per port config */
1562 for (lmac
= 0; lmac
< mcs
->hw
->lmac_cnt
; lmac
++)
1563 mcs_lmac_init(mcs
, lmac
);
1565 /* Parser configuration */
1566 mcs
->mcs_ops
->mcs_parser_cfg(mcs
);
1568 err
= mcs_register_interrupts(mcs
);
1572 list_add(&mcs
->mcs_list
, &mcs_list
);
1573 mutex_init(&mcs
->stats_lock
);
1578 /* Enable external bypass */
1579 mcs_set_external_bypass(mcs
, true);
1581 pci_release_regions(pdev
);
1582 pci_disable_device(pdev
);
1583 pci_set_drvdata(pdev
, NULL
);
1587 static void mcs_remove(struct pci_dev
*pdev
)
1589 struct mcs
*mcs
= pci_get_drvdata(pdev
);
1591 /* Set MCS to external bypass */
1592 mcs_set_external_bypass(mcs
, true);
1593 free_irq(pci_irq_vector(pdev
, mcs
->hw
->ip_vec
), mcs
);
1594 pci_free_irq_vectors(pdev
);
1595 pci_release_regions(pdev
);
1596 pci_disable_device(pdev
);
1597 pci_set_drvdata(pdev
, NULL
);
1600 struct pci_driver mcs_driver
= {
1602 .id_table
= mcs_id_table
,
1604 .remove
= mcs_remove
,