1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
11 #include "rvu_struct.h"
16 #include "lmac_common.h"
17 #include "rvu_npc_hash.h"
19 static void nix_free_tx_vtag_entries(struct rvu
*rvu
, u16 pcifunc
);
20 static int rvu_nix_get_bpid(struct rvu
*rvu
, struct nix_bp_cfg_req
*req
,
21 int type
, int chan_id
);
22 static int nix_update_mce_rule(struct rvu
*rvu
, u16 pcifunc
,
24 static int nix_setup_ipolicers(struct rvu
*rvu
,
25 struct nix_hw
*nix_hw
, int blkaddr
);
26 static void nix_ipolicer_freemem(struct rvu
*rvu
, struct nix_hw
*nix_hw
);
27 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req
*req
,
28 struct nix_hw
*nix_hw
, u16 pcifunc
);
29 static int nix_free_all_bandprof(struct rvu
*rvu
, u16 pcifunc
);
30 static void nix_clear_ratelimit_aggr(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
32 static const char *nix_get_ctx_name(int ctype
);
58 enum nix_makr_fmt_indexes
{
59 NIX_MARK_CFG_IP_DSCP_RED
,
60 NIX_MARK_CFG_IP_DSCP_YELLOW
,
61 NIX_MARK_CFG_IP_DSCP_YELLOW_RED
,
62 NIX_MARK_CFG_IP_ECN_RED
,
63 NIX_MARK_CFG_IP_ECN_YELLOW
,
64 NIX_MARK_CFG_IP_ECN_YELLOW_RED
,
65 NIX_MARK_CFG_VLAN_DEI_RED
,
66 NIX_MARK_CFG_VLAN_DEI_YELLOW
,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED
,
71 /* For now considering MC resources needed for broadcast
72 * pkt replication only. i.e 256 HWVFs + 12 PFs.
74 #define MC_TBL_SIZE MC_TBL_SZ_512
75 #define MC_BUF_CNT MC_BUF_CNT_128
78 struct hlist_node node
;
82 int rvu_get_next_nix_blkaddr(struct rvu
*rvu
, int blkaddr
)
86 /*If blkaddr is 0, return the first nix block address*/
88 return rvu
->nix_blkaddr
[blkaddr
];
90 while (i
+ 1 < MAX_NIX_BLKS
) {
91 if (rvu
->nix_blkaddr
[i
] == blkaddr
)
92 return rvu
->nix_blkaddr
[i
+ 1];
99 bool is_nixlf_attached(struct rvu
*rvu
, u16 pcifunc
)
101 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
104 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
105 if (!pfvf
->nixlf
|| blkaddr
< 0)
110 int rvu_get_nixlf_count(struct rvu
*rvu
)
112 int blkaddr
= 0, max
= 0;
113 struct rvu_block
*block
;
115 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
117 block
= &rvu
->hw
->block
[blkaddr
];
118 max
+= block
->lf
.max
;
119 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
124 int nix_get_nixlf(struct rvu
*rvu
, u16 pcifunc
, int *nixlf
, int *nix_blkaddr
)
126 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
127 struct rvu_hwinfo
*hw
= rvu
->hw
;
130 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
131 if (!pfvf
->nixlf
|| blkaddr
< 0)
132 return NIX_AF_ERR_AF_LF_INVALID
;
134 *nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
136 return NIX_AF_ERR_AF_LF_INVALID
;
139 *nix_blkaddr
= blkaddr
;
144 int nix_get_struct_ptrs(struct rvu
*rvu
, u16 pcifunc
,
145 struct nix_hw
**nix_hw
, int *blkaddr
)
147 struct rvu_pfvf
*pfvf
;
149 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
150 *blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
151 if (!pfvf
->nixlf
|| *blkaddr
< 0)
152 return NIX_AF_ERR_AF_LF_INVALID
;
154 *nix_hw
= get_nix_hw(rvu
->hw
, *blkaddr
);
156 return NIX_AF_ERR_INVALID_NIXBLK
;
160 static void nix_mce_list_init(struct nix_mce_list
*list
, int max
)
162 INIT_HLIST_HEAD(&list
->head
);
167 static u16
nix_alloc_mce_list(struct nix_mcast
*mcast
, int count
)
174 idx
= mcast
->next_free_mce
;
175 mcast
->next_free_mce
+= count
;
179 struct nix_hw
*get_nix_hw(struct rvu_hwinfo
*hw
, int blkaddr
)
181 int nix_blkaddr
= 0, i
= 0;
182 struct rvu
*rvu
= hw
->rvu
;
184 nix_blkaddr
= rvu_get_next_nix_blkaddr(rvu
, nix_blkaddr
);
185 while (nix_blkaddr
) {
186 if (blkaddr
== nix_blkaddr
&& hw
->nix
)
188 nix_blkaddr
= rvu_get_next_nix_blkaddr(rvu
, nix_blkaddr
);
194 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo
*hw
, int smq_link_type
)
196 if (hw
->cap
.nix_multiple_dwrr_mtu
)
197 return NIX_AF_DWRR_MTUX(smq_link_type
);
199 if (smq_link_type
== SMQ_LINK_TYPE_SDP
)
200 return NIX_AF_DWRR_SDP_MTU
;
202 /* Here it's same reg for RPM and LBK */
203 return NIX_AF_DWRR_RPM_MTU
;
206 u32
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu
)
210 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
211 * Value of 4 is reserved for MTU value of 9728 bytes.
212 * Value of 5 is reserved for MTU value of 10240 bytes.
220 return BIT_ULL(dwrr_mtu
);
226 u32
convert_bytes_to_dwrr_mtu(u32 bytes
)
228 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
229 * Value of 4 is reserved for MTU value of 9728 bytes.
230 * Value of 5 is reserved for MTU value of 10240 bytes.
232 if (bytes
> BIT_ULL(16))
247 static void nix_rx_sync(struct rvu
*rvu
, int blkaddr
)
251 /* Sync all in flight RX packets to LLC/DRAM */
252 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0));
253 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0), true);
255 dev_err(rvu
->dev
, "SYNC1: NIX RX software sync failed\n");
257 /* SW_SYNC ensures all existing transactions are finished and pkts
258 * are written to LLC/DRAM, queues should be teared down after
259 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
260 * an existing transaction might end after SW_SYNC operation. To
261 * ensure operation is fully done, do the SW_SYNC twice.
263 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0));
264 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0), true);
266 dev_err(rvu
->dev
, "SYNC2: NIX RX software sync failed\n");
269 static bool is_valid_txschq(struct rvu
*rvu
, int blkaddr
,
270 int lvl
, u16 pcifunc
, u16 schq
)
272 struct rvu_hwinfo
*hw
= rvu
->hw
;
273 struct nix_txsch
*txsch
;
274 struct nix_hw
*nix_hw
;
277 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
281 txsch
= &nix_hw
->txsch
[lvl
];
282 /* Check out of bounds */
283 if (schq
>= txsch
->schq
.max
)
286 mutex_lock(&rvu
->rsrc_lock
);
287 map_func
= TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]);
288 mutex_unlock(&rvu
->rsrc_lock
);
290 /* TLs aggegating traffic are shared across PF and VFs */
291 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
292 if (rvu_get_pf(map_func
) != rvu_get_pf(pcifunc
))
298 if (map_func
!= pcifunc
)
304 static int nix_interface_init(struct rvu
*rvu
, u16 pcifunc
, int type
, int nixlf
,
305 struct nix_lf_alloc_rsp
*rsp
, bool loop
)
307 struct rvu_pfvf
*parent_pf
, *pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
308 u16 req_chan_base
, req_chan_end
, req_chan_cnt
;
309 struct rvu_hwinfo
*hw
= rvu
->hw
;
310 struct sdp_node_info
*sdp_info
;
311 int pkind
, pf
, vf
, lbkid
, vfid
;
316 pf
= rvu_get_pf(pcifunc
);
317 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
&&
318 type
!= NIX_INTF_TYPE_SDP
)
322 case NIX_INTF_TYPE_CGX
:
323 pfvf
->cgx_lmac
= rvu
->pf2cgxlmac_map
[pf
];
324 rvu_get_cgx_lmac_id(pfvf
->cgx_lmac
, &cgx_id
, &lmac_id
);
326 pkind
= rvu_npc_get_pkind(rvu
, pf
);
329 "PF_Func 0x%x: Invalid pkind\n", pcifunc
);
332 pfvf
->rx_chan_base
= rvu_nix_chan_cgx(rvu
, cgx_id
, lmac_id
, 0);
333 pfvf
->tx_chan_base
= pfvf
->rx_chan_base
;
334 pfvf
->rx_chan_cnt
= 1;
335 pfvf
->tx_chan_cnt
= 1;
336 rsp
->tx_link
= cgx_id
* hw
->lmac_per_cgx
+ lmac_id
;
338 cgx_set_pkind(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, pkind
);
339 rvu_npc_set_pkind(rvu
, pkind
, pfvf
);
342 case NIX_INTF_TYPE_LBK
:
343 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
345 /* If NIX1 block is present on the silicon then NIXes are
346 * assigned alternatively for lbk interfaces. NIX0 should
347 * send packets on lbk link 1 channels and NIX1 should send
348 * on lbk link 0 channels for the communication between
352 if (rvu
->hw
->lbk_links
> 1)
353 lbkid
= vf
& 0x1 ? 0 : 1;
355 /* By default NIX0 is configured to send packet on lbk link 1
356 * (which corresponds to LBK1), same packet will receive on
357 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
358 * (which corresponds to LBK2) packet will receive on NIX0 lbk
360 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
361 * transmits and receives on lbk link 0, whick corresponds
362 * to LBK1 block, back to back connectivity between NIX and
363 * LBK can be achieved (which is similar to 96xx)
366 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
367 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
368 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
369 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
374 /* Note that AF's VFs work in pairs and talk over consecutive
375 * loopback channels.Therefore if odd number of AF VFs are
376 * enabled then the last VF remains with no pair.
378 pfvf
->rx_chan_base
= rvu_nix_chan_lbk(rvu
, lbkid
, vf
);
379 pfvf
->tx_chan_base
= vf
& 0x1 ?
380 rvu_nix_chan_lbk(rvu
, lbkid
, vf
- 1) :
381 rvu_nix_chan_lbk(rvu
, lbkid
, vf
+ 1);
382 pfvf
->rx_chan_cnt
= 1;
383 pfvf
->tx_chan_cnt
= 1;
384 rsp
->tx_link
= hw
->cgx_links
+ lbkid
;
386 rvu_npc_set_pkind(rvu
, NPC_RX_LBK_PKIND
, pfvf
);
387 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
392 case NIX_INTF_TYPE_SDP
:
393 from_vf
= !!(pcifunc
& RVU_PFVF_FUNC_MASK
);
394 parent_pf
= &rvu
->pf
[rvu_get_pf(pcifunc
)];
395 sdp_info
= parent_pf
->sdp_info
;
397 dev_err(rvu
->dev
, "Invalid sdp_info pointer\n");
401 req_chan_base
= rvu_nix_chan_sdp(rvu
, 0) + sdp_info
->pf_srn
+
402 sdp_info
->num_pf_rings
;
403 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
404 for (vfid
= 0; vfid
< vf
; vfid
++)
405 req_chan_base
+= sdp_info
->vf_rings
[vfid
];
406 req_chan_cnt
= sdp_info
->vf_rings
[vf
];
407 req_chan_end
= req_chan_base
+ req_chan_cnt
- 1;
408 if (req_chan_base
< rvu_nix_chan_sdp(rvu
, 0) ||
409 req_chan_end
> rvu_nix_chan_sdp(rvu
, 255)) {
411 "PF_Func 0x%x: Invalid channel base and count\n",
416 req_chan_base
= rvu_nix_chan_sdp(rvu
, 0) + sdp_info
->pf_srn
;
417 req_chan_cnt
= sdp_info
->num_pf_rings
;
420 pfvf
->rx_chan_base
= req_chan_base
;
421 pfvf
->rx_chan_cnt
= req_chan_cnt
;
422 pfvf
->tx_chan_base
= pfvf
->rx_chan_base
;
423 pfvf
->tx_chan_cnt
= pfvf
->rx_chan_cnt
;
425 rsp
->tx_link
= hw
->cgx_links
+ hw
->lbk_links
;
426 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
432 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
433 * RVU PF/VF's MAC address.
435 rvu_npc_install_ucast_entry(rvu
, pcifunc
, nixlf
,
436 pfvf
->rx_chan_base
, pfvf
->mac_addr
);
438 /* Add this PF_FUNC to bcast pkt replication list */
439 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_BCAST_ENTRY
, true);
442 "Bcast list, failed to enable PF_FUNC 0x%x\n",
446 /* Install MCAM rule matching Ethernet broadcast mac address */
447 rvu_npc_install_bcast_match_entry(rvu
, pcifunc
,
448 nixlf
, pfvf
->rx_chan_base
);
450 pfvf
->maxlen
= NIC_HW_MIN_FRS
;
451 pfvf
->minlen
= NIC_HW_MIN_FRS
;
456 static void nix_interface_deinit(struct rvu
*rvu
, u16 pcifunc
, u8 nixlf
)
458 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
464 /* Remove this PF_FUNC from bcast pkt replication list */
465 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_BCAST_ENTRY
, false);
468 "Bcast list, failed to disable PF_FUNC 0x%x\n",
472 /* Free and disable any MCAM entries used by this NIX LF */
473 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
475 /* Disable DMAC filters used */
476 rvu_cgx_disable_dmac_entries(rvu
, pcifunc
);
479 int rvu_mbox_handler_nix_bp_disable(struct rvu
*rvu
,
480 struct nix_bp_cfg_req
*req
,
483 u16 pcifunc
= req
->hdr
.pcifunc
;
484 struct rvu_pfvf
*pfvf
;
485 int blkaddr
, pf
, type
;
489 pf
= rvu_get_pf(pcifunc
);
490 type
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
491 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
)
494 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
495 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
497 chan_base
= pfvf
->rx_chan_base
+ req
->chan_base
;
498 for (chan
= chan_base
; chan
< (chan_base
+ req
->chan_cnt
); chan
++) {
499 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
));
500 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
),
506 static int rvu_nix_get_bpid(struct rvu
*rvu
, struct nix_bp_cfg_req
*req
,
507 int type
, int chan_id
)
509 int bpid
, blkaddr
, lmac_chan_cnt
, sdp_chan_cnt
;
510 u16 cgx_bpid_cnt
, lbk_bpid_cnt
, sdp_bpid_cnt
;
511 struct rvu_hwinfo
*hw
= rvu
->hw
;
512 struct rvu_pfvf
*pfvf
;
516 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, req
->hdr
.pcifunc
);
517 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST
);
518 lmac_chan_cnt
= cfg
& 0xFF;
520 cgx_bpid_cnt
= hw
->cgx_links
* lmac_chan_cnt
;
521 lbk_bpid_cnt
= hw
->lbk_links
* ((cfg
>> 16) & 0xFF);
523 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
524 sdp_chan_cnt
= cfg
& 0xFFF;
525 sdp_bpid_cnt
= hw
->sdp_links
* sdp_chan_cnt
;
527 pfvf
= rvu_get_pfvf(rvu
, req
->hdr
.pcifunc
);
529 /* Backpressure IDs range division
530 * CGX channles are mapped to (0 - 191) BPIDs
531 * LBK channles are mapped to (192 - 255) BPIDs
532 * SDP channles are mapped to (256 - 511) BPIDs
534 * Lmac channles and bpids mapped as follows
535 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
536 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
537 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
540 case NIX_INTF_TYPE_CGX
:
541 if ((req
->chan_base
+ req
->chan_cnt
) > 16)
543 rvu_get_cgx_lmac_id(pfvf
->cgx_lmac
, &cgx_id
, &lmac_id
);
544 /* Assign bpid based on cgx, lmac and chan id */
545 bpid
= (cgx_id
* hw
->lmac_per_cgx
* lmac_chan_cnt
) +
546 (lmac_id
* lmac_chan_cnt
) + req
->chan_base
;
548 if (req
->bpid_per_chan
)
550 if (bpid
> cgx_bpid_cnt
)
554 case NIX_INTF_TYPE_LBK
:
555 if ((req
->chan_base
+ req
->chan_cnt
) > 63)
557 bpid
= cgx_bpid_cnt
+ req
->chan_base
;
558 if (req
->bpid_per_chan
)
560 if (bpid
> (cgx_bpid_cnt
+ lbk_bpid_cnt
))
563 case NIX_INTF_TYPE_SDP
:
564 if ((req
->chan_base
+ req
->chan_cnt
) > 255)
567 bpid
= sdp_bpid_cnt
+ req
->chan_base
;
568 if (req
->bpid_per_chan
)
571 if (bpid
> (cgx_bpid_cnt
+ lbk_bpid_cnt
+ sdp_bpid_cnt
))
580 int rvu_mbox_handler_nix_bp_enable(struct rvu
*rvu
,
581 struct nix_bp_cfg_req
*req
,
582 struct nix_bp_cfg_rsp
*rsp
)
584 int blkaddr
, pf
, type
, chan_id
= 0;
585 u16 pcifunc
= req
->hdr
.pcifunc
;
586 struct rvu_pfvf
*pfvf
;
591 pf
= rvu_get_pf(pcifunc
);
592 type
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
593 if (is_sdp_pfvf(pcifunc
))
594 type
= NIX_INTF_TYPE_SDP
;
596 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
597 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
&&
598 type
!= NIX_INTF_TYPE_SDP
)
601 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
602 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
604 bpid_base
= rvu_nix_get_bpid(rvu
, req
, type
, chan_id
);
605 chan_base
= pfvf
->rx_chan_base
+ req
->chan_base
;
608 for (chan
= chan_base
; chan
< (chan_base
+ req
->chan_cnt
); chan
++) {
610 dev_warn(rvu
->dev
, "Fail to enable backpressure\n");
614 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
));
615 cfg
&= ~GENMASK_ULL(8, 0);
616 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
),
617 cfg
| (bpid
& GENMASK_ULL(8, 0)) | BIT_ULL(16));
619 bpid
= rvu_nix_get_bpid(rvu
, req
, type
, chan_id
);
622 for (chan
= 0; chan
< req
->chan_cnt
; chan
++) {
623 /* Map channel and bpid assign to it */
624 rsp
->chan_bpid
[chan
] = ((req
->chan_base
+ chan
) & 0x7F) << 10 |
626 if (req
->bpid_per_chan
)
629 rsp
->chan_cnt
= req
->chan_cnt
;
634 static void nix_setup_lso_tso_l3(struct rvu
*rvu
, int blkaddr
,
635 u64 format
, bool v4
, u64
*fidx
)
637 struct nix_lso_format field
= {0};
639 /* IP's Length field */
640 field
.layer
= NIX_TXLAYER_OL3
;
641 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
642 field
.offset
= v4
? 2 : 4;
643 field
.sizem1
= 1; /* i.e 2 bytes */
644 field
.alg
= NIX_LSOALG_ADD_PAYLEN
;
645 rvu_write64(rvu
, blkaddr
,
646 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
649 /* No ID field in IPv6 header */
654 field
.layer
= NIX_TXLAYER_OL3
;
656 field
.sizem1
= 1; /* i.e 2 bytes */
657 field
.alg
= NIX_LSOALG_ADD_SEGNUM
;
658 rvu_write64(rvu
, blkaddr
,
659 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
663 static void nix_setup_lso_tso_l4(struct rvu
*rvu
, int blkaddr
,
664 u64 format
, u64
*fidx
)
666 struct nix_lso_format field
= {0};
668 /* TCP's sequence number field */
669 field
.layer
= NIX_TXLAYER_OL4
;
671 field
.sizem1
= 3; /* i.e 4 bytes */
672 field
.alg
= NIX_LSOALG_ADD_OFFSET
;
673 rvu_write64(rvu
, blkaddr
,
674 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
677 /* TCP's flags field */
678 field
.layer
= NIX_TXLAYER_OL4
;
680 field
.sizem1
= 1; /* 2 bytes */
681 field
.alg
= NIX_LSOALG_TCP_FLAGS
;
682 rvu_write64(rvu
, blkaddr
,
683 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
687 static void nix_setup_lso(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
689 u64 cfg
, idx
, fidx
= 0;
691 /* Get max HW supported format indices */
692 cfg
= (rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
) >> 48) & 0xFF;
693 nix_hw
->lso
.total
= cfg
;
696 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LSO_CFG
);
697 /* For TSO, set first and middle segment flags to
698 * mask out PSH, RST & FIN flags in TCP packet
700 cfg
&= ~((0xFFFFULL
<< 32) | (0xFFFFULL
<< 16));
701 cfg
|= (0xFFF2ULL
<< 32) | (0xFFF2ULL
<< 16);
702 rvu_write64(rvu
, blkaddr
, NIX_AF_LSO_CFG
, cfg
| BIT_ULL(63));
704 /* Setup default static LSO formats
706 * Configure format fields for TCPv4 segmentation offload
708 idx
= NIX_LSO_FORMAT_IDX_TSOV4
;
709 nix_setup_lso_tso_l3(rvu
, blkaddr
, idx
, true, &fidx
);
710 nix_setup_lso_tso_l4(rvu
, blkaddr
, idx
, &fidx
);
712 /* Set rest of the fields to NOP */
713 for (; fidx
< 8; fidx
++) {
714 rvu_write64(rvu
, blkaddr
,
715 NIX_AF_LSO_FORMATX_FIELDX(idx
, fidx
), 0x0ULL
);
717 nix_hw
->lso
.in_use
++;
719 /* Configure format fields for TCPv6 segmentation offload */
720 idx
= NIX_LSO_FORMAT_IDX_TSOV6
;
722 nix_setup_lso_tso_l3(rvu
, blkaddr
, idx
, false, &fidx
);
723 nix_setup_lso_tso_l4(rvu
, blkaddr
, idx
, &fidx
);
725 /* Set rest of the fields to NOP */
726 for (; fidx
< 8; fidx
++) {
727 rvu_write64(rvu
, blkaddr
,
728 NIX_AF_LSO_FORMATX_FIELDX(idx
, fidx
), 0x0ULL
);
730 nix_hw
->lso
.in_use
++;
733 static void nix_ctx_free(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
)
735 kfree(pfvf
->rq_bmap
);
736 kfree(pfvf
->sq_bmap
);
737 kfree(pfvf
->cq_bmap
);
739 qmem_free(rvu
->dev
, pfvf
->rq_ctx
);
741 qmem_free(rvu
->dev
, pfvf
->sq_ctx
);
743 qmem_free(rvu
->dev
, pfvf
->cq_ctx
);
745 qmem_free(rvu
->dev
, pfvf
->rss_ctx
);
746 if (pfvf
->nix_qints_ctx
)
747 qmem_free(rvu
->dev
, pfvf
->nix_qints_ctx
);
748 if (pfvf
->cq_ints_ctx
)
749 qmem_free(rvu
->dev
, pfvf
->cq_ints_ctx
);
751 pfvf
->rq_bmap
= NULL
;
752 pfvf
->cq_bmap
= NULL
;
753 pfvf
->sq_bmap
= NULL
;
757 pfvf
->rss_ctx
= NULL
;
758 pfvf
->nix_qints_ctx
= NULL
;
759 pfvf
->cq_ints_ctx
= NULL
;
762 static int nixlf_rss_ctx_init(struct rvu
*rvu
, int blkaddr
,
763 struct rvu_pfvf
*pfvf
, int nixlf
,
764 int rss_sz
, int rss_grps
, int hwctx_size
,
765 u64 way_mask
, bool tag_lsb_as_adder
)
767 int err
, grp
, num_indices
;
770 /* RSS is not requested for this NIXLF */
773 num_indices
= rss_sz
* rss_grps
;
775 /* Alloc NIX RSS HW context memory and config the base */
776 err
= qmem_alloc(rvu
->dev
, &pfvf
->rss_ctx
, num_indices
, hwctx_size
);
780 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_BASE(nixlf
),
781 (u64
)pfvf
->rss_ctx
->iova
);
783 /* Config full RSS table size, enable RSS and caching */
784 val
= BIT_ULL(36) | BIT_ULL(4) | way_mask
<< 20 |
785 ilog2(num_indices
/ MAX_RSS_INDIR_TBL_SIZE
);
787 if (tag_lsb_as_adder
)
790 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_CFG(nixlf
), val
);
791 /* Config RSS group offset and sizes */
792 for (grp
= 0; grp
< rss_grps
; grp
++)
793 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_GRPX(nixlf
, grp
),
794 ((ilog2(rss_sz
) - 1) << 16) | (rss_sz
* grp
));
798 static int nix_aq_enqueue_wait(struct rvu
*rvu
, struct rvu_block
*block
,
799 struct nix_aq_inst_s
*inst
)
801 struct admin_queue
*aq
= block
->aq
;
802 struct nix_aq_res_s
*result
;
807 result
= (struct nix_aq_res_s
*)aq
->res
->base
;
809 /* Get current head pointer where to append this instruction */
810 reg
= rvu_read64(rvu
, block
->addr
, NIX_AF_AQ_STATUS
);
811 head
= (reg
>> 4) & AQ_PTR_MASK
;
813 memcpy((void *)(aq
->inst
->base
+ (head
* aq
->inst
->entry_sz
)),
814 (void *)inst
, aq
->inst
->entry_sz
);
815 memset(result
, 0, sizeof(*result
));
816 /* sync into memory */
819 /* Ring the doorbell and wait for result */
820 rvu_write64(rvu
, block
->addr
, NIX_AF_AQ_DOOR
, 1);
821 while (result
->compcode
== NIX_AQ_COMP_NOTDONE
) {
829 if (result
->compcode
!= NIX_AQ_COMP_GOOD
) {
830 /* TODO: Replace this with some error code */
831 if (result
->compcode
== NIX_AQ_COMP_CTX_FAULT
||
832 result
->compcode
== NIX_AQ_COMP_LOCKERR
||
833 result
->compcode
== NIX_AQ_COMP_CTX_POISON
) {
834 ret
= rvu_ndc_fix_locked_cacheline(rvu
, BLKADDR_NDC_NIX0_RX
);
835 ret
|= rvu_ndc_fix_locked_cacheline(rvu
, BLKADDR_NDC_NIX0_TX
);
836 ret
|= rvu_ndc_fix_locked_cacheline(rvu
, BLKADDR_NDC_NIX1_RX
);
837 ret
|= rvu_ndc_fix_locked_cacheline(rvu
, BLKADDR_NDC_NIX1_TX
);
840 "%s: Not able to unlock cachelines\n", __func__
);
849 static void nix_get_aq_req_smq(struct rvu
*rvu
, struct nix_aq_enq_req
*req
,
850 u16
*smq
, u16
*smq_mask
)
852 struct nix_cn10k_aq_enq_req
*aq_req
;
854 if (!is_rvu_otx2(rvu
)) {
855 aq_req
= (struct nix_cn10k_aq_enq_req
*)req
;
856 *smq
= aq_req
->sq
.smq
;
857 *smq_mask
= aq_req
->sq_mask
.smq
;
860 *smq_mask
= req
->sq_mask
.smq
;
864 static int rvu_nix_blk_aq_enq_inst(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
865 struct nix_aq_enq_req
*req
,
866 struct nix_aq_enq_rsp
*rsp
)
868 struct rvu_hwinfo
*hw
= rvu
->hw
;
869 u16 pcifunc
= req
->hdr
.pcifunc
;
870 int nixlf
, blkaddr
, rc
= 0;
871 struct nix_aq_inst_s inst
;
872 struct rvu_block
*block
;
873 struct admin_queue
*aq
;
874 struct rvu_pfvf
*pfvf
;
880 blkaddr
= nix_hw
->blkaddr
;
881 block
= &hw
->block
[blkaddr
];
884 dev_warn(rvu
->dev
, "%s: NIX AQ not initialized\n", __func__
);
885 return NIX_AF_ERR_AQ_ENQUEUE
;
888 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
889 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
891 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
892 * operations done by AF itself.
894 if (!((!rsp
&& req
->ctype
== NIX_AQ_CTYPE_MCE
) ||
895 (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
&& !pcifunc
))) {
896 if (!pfvf
->nixlf
|| nixlf
< 0)
897 return NIX_AF_ERR_AF_LF_INVALID
;
900 switch (req
->ctype
) {
901 case NIX_AQ_CTYPE_RQ
:
902 /* Check if index exceeds max no of queues */
903 if (!pfvf
->rq_ctx
|| req
->qidx
>= pfvf
->rq_ctx
->qsize
)
904 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
906 case NIX_AQ_CTYPE_SQ
:
907 if (!pfvf
->sq_ctx
|| req
->qidx
>= pfvf
->sq_ctx
->qsize
)
908 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
910 case NIX_AQ_CTYPE_CQ
:
911 if (!pfvf
->cq_ctx
|| req
->qidx
>= pfvf
->cq_ctx
->qsize
)
912 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
914 case NIX_AQ_CTYPE_RSS
:
915 /* Check if RSS is enabled and qidx is within range */
916 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_RSS_CFG(nixlf
));
917 if (!(cfg
& BIT_ULL(4)) || !pfvf
->rss_ctx
||
918 (req
->qidx
>= (256UL << (cfg
& 0xF))))
919 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
921 case NIX_AQ_CTYPE_MCE
:
922 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_MCAST_CFG
);
924 /* Check if index exceeds MCE list length */
925 if (!nix_hw
->mcast
.mce_ctx
||
926 (req
->qidx
>= (256UL << (cfg
& 0xF))))
927 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
929 /* Adding multicast lists for requests from PF/VFs is not
930 * yet supported, so ignore this.
933 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
935 case NIX_AQ_CTYPE_BANDPROF
:
936 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req
*)req
,
938 rc
= NIX_AF_ERR_INVALID_BANDPROF
;
941 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
947 nix_get_aq_req_smq(rvu
, req
, &smq
, &smq_mask
);
948 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
949 if (req
->ctype
== NIX_AQ_CTYPE_SQ
&&
950 ((req
->op
== NIX_AQ_INSTOP_INIT
&& req
->sq
.ena
) ||
951 (req
->op
== NIX_AQ_INSTOP_WRITE
&&
952 req
->sq_mask
.ena
&& req
->sq
.ena
&& smq_mask
))) {
953 if (!is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_SMQ
,
955 return NIX_AF_ERR_AQ_ENQUEUE
;
958 memset(&inst
, 0, sizeof(struct nix_aq_inst_s
));
960 inst
.cindex
= req
->qidx
;
961 inst
.ctype
= req
->ctype
;
963 /* Currently we are not supporting enqueuing multiple instructions,
964 * so always choose first entry in result memory.
966 inst
.res_addr
= (u64
)aq
->res
->iova
;
968 /* Hardware uses same aq->res->base for updating result of
969 * previous instruction hence wait here till it is done.
971 spin_lock(&aq
->lock
);
973 /* Clean result + context memory */
974 memset(aq
->res
->base
, 0, aq
->res
->entry_sz
);
975 /* Context needs to be written at RES_ADDR + 128 */
976 ctx
= aq
->res
->base
+ 128;
977 /* Mask needs to be written at RES_ADDR + 256 */
978 mask
= aq
->res
->base
+ 256;
981 case NIX_AQ_INSTOP_WRITE
:
982 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
983 memcpy(mask
, &req
->rq_mask
,
984 sizeof(struct nix_rq_ctx_s
));
985 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
986 memcpy(mask
, &req
->sq_mask
,
987 sizeof(struct nix_sq_ctx_s
));
988 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
989 memcpy(mask
, &req
->cq_mask
,
990 sizeof(struct nix_cq_ctx_s
));
991 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
992 memcpy(mask
, &req
->rss_mask
,
993 sizeof(struct nix_rsse_s
));
994 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
995 memcpy(mask
, &req
->mce_mask
,
996 sizeof(struct nix_rx_mce_s
));
997 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
998 memcpy(mask
, &req
->prof_mask
,
999 sizeof(struct nix_bandprof_s
));
1001 case NIX_AQ_INSTOP_INIT
:
1002 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
1003 memcpy(ctx
, &req
->rq
, sizeof(struct nix_rq_ctx_s
));
1004 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
1005 memcpy(ctx
, &req
->sq
, sizeof(struct nix_sq_ctx_s
));
1006 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
1007 memcpy(ctx
, &req
->cq
, sizeof(struct nix_cq_ctx_s
));
1008 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
1009 memcpy(ctx
, &req
->rss
, sizeof(struct nix_rsse_s
));
1010 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
1011 memcpy(ctx
, &req
->mce
, sizeof(struct nix_rx_mce_s
));
1012 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
1013 memcpy(ctx
, &req
->prof
, sizeof(struct nix_bandprof_s
));
1015 case NIX_AQ_INSTOP_NOP
:
1016 case NIX_AQ_INSTOP_READ
:
1017 case NIX_AQ_INSTOP_LOCK
:
1018 case NIX_AQ_INSTOP_UNLOCK
:
1021 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
1022 spin_unlock(&aq
->lock
);
1026 /* Submit the instruction to AQ */
1027 rc
= nix_aq_enqueue_wait(rvu
, block
, &inst
);
1029 spin_unlock(&aq
->lock
);
1033 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1034 if (req
->op
== NIX_AQ_INSTOP_INIT
) {
1035 if (req
->ctype
== NIX_AQ_CTYPE_RQ
&& req
->rq
.ena
)
1036 __set_bit(req
->qidx
, pfvf
->rq_bmap
);
1037 if (req
->ctype
== NIX_AQ_CTYPE_SQ
&& req
->sq
.ena
)
1038 __set_bit(req
->qidx
, pfvf
->sq_bmap
);
1039 if (req
->ctype
== NIX_AQ_CTYPE_CQ
&& req
->cq
.ena
)
1040 __set_bit(req
->qidx
, pfvf
->cq_bmap
);
1043 if (req
->op
== NIX_AQ_INSTOP_WRITE
) {
1044 if (req
->ctype
== NIX_AQ_CTYPE_RQ
) {
1045 ena
= (req
->rq
.ena
& req
->rq_mask
.ena
) |
1046 (test_bit(req
->qidx
, pfvf
->rq_bmap
) &
1049 __set_bit(req
->qidx
, pfvf
->rq_bmap
);
1051 __clear_bit(req
->qidx
, pfvf
->rq_bmap
);
1053 if (req
->ctype
== NIX_AQ_CTYPE_SQ
) {
1054 ena
= (req
->rq
.ena
& req
->sq_mask
.ena
) |
1055 (test_bit(req
->qidx
, pfvf
->sq_bmap
) &
1058 __set_bit(req
->qidx
, pfvf
->sq_bmap
);
1060 __clear_bit(req
->qidx
, pfvf
->sq_bmap
);
1062 if (req
->ctype
== NIX_AQ_CTYPE_CQ
) {
1063 ena
= (req
->rq
.ena
& req
->cq_mask
.ena
) |
1064 (test_bit(req
->qidx
, pfvf
->cq_bmap
) &
1067 __set_bit(req
->qidx
, pfvf
->cq_bmap
);
1069 __clear_bit(req
->qidx
, pfvf
->cq_bmap
);
1074 /* Copy read context into mailbox */
1075 if (req
->op
== NIX_AQ_INSTOP_READ
) {
1076 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
1077 memcpy(&rsp
->rq
, ctx
,
1078 sizeof(struct nix_rq_ctx_s
));
1079 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
1080 memcpy(&rsp
->sq
, ctx
,
1081 sizeof(struct nix_sq_ctx_s
));
1082 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
1083 memcpy(&rsp
->cq
, ctx
,
1084 sizeof(struct nix_cq_ctx_s
));
1085 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
1086 memcpy(&rsp
->rss
, ctx
,
1087 sizeof(struct nix_rsse_s
));
1088 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
1089 memcpy(&rsp
->mce
, ctx
,
1090 sizeof(struct nix_rx_mce_s
));
1091 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
1092 memcpy(&rsp
->prof
, ctx
,
1093 sizeof(struct nix_bandprof_s
));
1097 spin_unlock(&aq
->lock
);
1101 static int rvu_nix_verify_aq_ctx(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
1102 struct nix_aq_enq_req
*req
, u8 ctype
)
1104 struct nix_cn10k_aq_enq_req aq_req
;
1105 struct nix_cn10k_aq_enq_rsp aq_rsp
;
1108 if (req
->ctype
!= NIX_AQ_CTYPE_CQ
)
1111 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
,
1112 req
->hdr
.pcifunc
, ctype
, req
->qidx
);
1115 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1116 __func__
, nix_get_ctx_name(ctype
), req
->qidx
,
1121 /* Make copy of original context & mask which are required
1124 memcpy(&aq_req
.cq_mask
, &req
->cq_mask
, sizeof(struct nix_cq_ctx_s
));
1125 memcpy(&aq_req
.cq
, &req
->cq
, sizeof(struct nix_cq_ctx_s
));
1127 /* exclude fields which HW can update */
1128 aq_req
.cq_mask
.cq_err
= 0;
1129 aq_req
.cq_mask
.wrptr
= 0;
1130 aq_req
.cq_mask
.tail
= 0;
1131 aq_req
.cq_mask
.head
= 0;
1132 aq_req
.cq_mask
.avg_level
= 0;
1133 aq_req
.cq_mask
.update_time
= 0;
1134 aq_req
.cq_mask
.substream
= 0;
1136 /* Context mask (cq_mask) holds mask value of fields which
1137 * are changed in AQ WRITE operation.
1138 * for example cq.drop = 0xa;
1139 * cq_mask.drop = 0xff;
1140 * Below logic performs '&' between cq and cq_mask so that non
1141 * updated fields are masked out for request and response
1144 for (word
= 0; word
< sizeof(struct nix_cq_ctx_s
) / sizeof(u64
);
1146 *(u64
*)((u8
*)&aq_rsp
.cq
+ word
* 8) &=
1147 (*(u64
*)((u8
*)&aq_req
.cq_mask
+ word
* 8));
1148 *(u64
*)((u8
*)&aq_req
.cq
+ word
* 8) &=
1149 (*(u64
*)((u8
*)&aq_req
.cq_mask
+ word
* 8));
1152 if (memcmp(&aq_req
.cq
, &aq_rsp
.cq
, sizeof(struct nix_cq_ctx_s
)))
1153 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE
;
1158 static int rvu_nix_aq_enq_inst(struct rvu
*rvu
, struct nix_aq_enq_req
*req
,
1159 struct nix_aq_enq_rsp
*rsp
)
1161 struct nix_hw
*nix_hw
;
1162 int err
, retries
= 5;
1165 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, req
->hdr
.pcifunc
);
1167 return NIX_AF_ERR_AF_LF_INVALID
;
1169 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
1171 return NIX_AF_ERR_INVALID_NIXBLK
;
1174 err
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
, req
, rsp
);
1176 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1177 * As a work around perfrom CQ context read after each AQ write. If AQ
1178 * read shows AQ write is not updated perform AQ write again.
1180 if (!err
&& req
->op
== NIX_AQ_INSTOP_WRITE
) {
1181 err
= rvu_nix_verify_aq_ctx(rvu
, nix_hw
, req
, NIX_AQ_CTYPE_CQ
);
1182 if (err
== NIX_AF_ERR_AQ_CTX_RETRY_WRITE
) {
1186 return NIX_AF_ERR_CQ_CTX_WRITE_ERR
;
1193 static const char *nix_get_ctx_name(int ctype
)
1196 case NIX_AQ_CTYPE_CQ
:
1198 case NIX_AQ_CTYPE_SQ
:
1200 case NIX_AQ_CTYPE_RQ
:
1202 case NIX_AQ_CTYPE_RSS
:
1208 static int nix_lf_hwctx_disable(struct rvu
*rvu
, struct hwctx_disable_req
*req
)
1210 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, req
->hdr
.pcifunc
);
1211 struct nix_aq_enq_req aq_req
;
1212 unsigned long *bmap
;
1213 int qidx
, q_cnt
= 0;
1216 if (!pfvf
->cq_ctx
|| !pfvf
->sq_ctx
|| !pfvf
->rq_ctx
)
1217 return NIX_AF_ERR_AQ_ENQUEUE
;
1219 memset(&aq_req
, 0, sizeof(struct nix_aq_enq_req
));
1220 aq_req
.hdr
.pcifunc
= req
->hdr
.pcifunc
;
1222 if (req
->ctype
== NIX_AQ_CTYPE_CQ
) {
1224 aq_req
.cq_mask
.ena
= 1;
1225 aq_req
.cq
.bp_ena
= 0;
1226 aq_req
.cq_mask
.bp_ena
= 1;
1227 q_cnt
= pfvf
->cq_ctx
->qsize
;
1228 bmap
= pfvf
->cq_bmap
;
1230 if (req
->ctype
== NIX_AQ_CTYPE_SQ
) {
1232 aq_req
.sq_mask
.ena
= 1;
1233 q_cnt
= pfvf
->sq_ctx
->qsize
;
1234 bmap
= pfvf
->sq_bmap
;
1236 if (req
->ctype
== NIX_AQ_CTYPE_RQ
) {
1238 aq_req
.rq_mask
.ena
= 1;
1239 q_cnt
= pfvf
->rq_ctx
->qsize
;
1240 bmap
= pfvf
->rq_bmap
;
1243 aq_req
.ctype
= req
->ctype
;
1244 aq_req
.op
= NIX_AQ_INSTOP_WRITE
;
1246 for (qidx
= 0; qidx
< q_cnt
; qidx
++) {
1247 if (!test_bit(qidx
, bmap
))
1250 rc
= rvu_nix_aq_enq_inst(rvu
, &aq_req
, NULL
);
1253 dev_err(rvu
->dev
, "Failed to disable %s:%d context\n",
1254 nix_get_ctx_name(req
->ctype
), qidx
);
1261 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1262 static int nix_lf_hwctx_lockdown(struct rvu
*rvu
, struct nix_aq_enq_req
*req
)
1264 struct nix_aq_enq_req lock_ctx_req
;
1267 if (req
->op
!= NIX_AQ_INSTOP_INIT
)
1270 if (req
->ctype
== NIX_AQ_CTYPE_MCE
||
1271 req
->ctype
== NIX_AQ_CTYPE_DYNO
)
1274 memset(&lock_ctx_req
, 0, sizeof(struct nix_aq_enq_req
));
1275 lock_ctx_req
.hdr
.pcifunc
= req
->hdr
.pcifunc
;
1276 lock_ctx_req
.ctype
= req
->ctype
;
1277 lock_ctx_req
.op
= NIX_AQ_INSTOP_LOCK
;
1278 lock_ctx_req
.qidx
= req
->qidx
;
1279 err
= rvu_nix_aq_enq_inst(rvu
, &lock_ctx_req
, NULL
);
1282 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1284 nix_get_ctx_name(req
->ctype
), req
->qidx
);
1288 int rvu_mbox_handler_nix_aq_enq(struct rvu
*rvu
,
1289 struct nix_aq_enq_req
*req
,
1290 struct nix_aq_enq_rsp
*rsp
)
1294 err
= rvu_nix_aq_enq_inst(rvu
, req
, rsp
);
1296 err
= nix_lf_hwctx_lockdown(rvu
, req
);
1301 int rvu_mbox_handler_nix_aq_enq(struct rvu
*rvu
,
1302 struct nix_aq_enq_req
*req
,
1303 struct nix_aq_enq_rsp
*rsp
)
1305 return rvu_nix_aq_enq_inst(rvu
, req
, rsp
);
1308 /* CN10K mbox handler */
1309 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu
*rvu
,
1310 struct nix_cn10k_aq_enq_req
*req
,
1311 struct nix_cn10k_aq_enq_rsp
*rsp
)
1313 return rvu_nix_aq_enq_inst(rvu
, (struct nix_aq_enq_req
*)req
,
1314 (struct nix_aq_enq_rsp
*)rsp
);
1317 int rvu_mbox_handler_nix_hwctx_disable(struct rvu
*rvu
,
1318 struct hwctx_disable_req
*req
,
1319 struct msg_rsp
*rsp
)
1321 return nix_lf_hwctx_disable(rvu
, req
);
1324 int rvu_mbox_handler_nix_lf_alloc(struct rvu
*rvu
,
1325 struct nix_lf_alloc_req
*req
,
1326 struct nix_lf_alloc_rsp
*rsp
)
1328 int nixlf
, qints
, hwctx_size
, intf
, err
, rc
= 0;
1329 struct rvu_hwinfo
*hw
= rvu
->hw
;
1330 u16 pcifunc
= req
->hdr
.pcifunc
;
1331 struct rvu_block
*block
;
1332 struct rvu_pfvf
*pfvf
;
1336 if (!req
->rq_cnt
|| !req
->sq_cnt
|| !req
->cq_cnt
)
1337 return NIX_AF_ERR_PARAM
;
1340 req
->way_mask
&= 0xFFFF;
1342 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1343 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1344 if (!pfvf
->nixlf
|| blkaddr
< 0)
1345 return NIX_AF_ERR_AF_LF_INVALID
;
1347 block
= &hw
->block
[blkaddr
];
1348 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
1350 return NIX_AF_ERR_AF_LF_INVALID
;
1352 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1353 if (req
->npa_func
) {
1354 /* If default, use 'this' NIXLF's PFFUNC */
1355 if (req
->npa_func
== RVU_DEFAULT_PF_FUNC
)
1356 req
->npa_func
= pcifunc
;
1357 if (!is_pffunc_map_valid(rvu
, req
->npa_func
, BLKTYPE_NPA
))
1358 return NIX_AF_INVAL_NPA_PF_FUNC
;
1361 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1362 if (req
->sso_func
) {
1363 /* If default, use 'this' NIXLF's PFFUNC */
1364 if (req
->sso_func
== RVU_DEFAULT_PF_FUNC
)
1365 req
->sso_func
= pcifunc
;
1366 if (!is_pffunc_map_valid(rvu
, req
->sso_func
, BLKTYPE_SSO
))
1367 return NIX_AF_INVAL_SSO_PF_FUNC
;
1370 /* If RSS is being enabled, check if requested config is valid.
1371 * RSS table size should be power of two, otherwise
1372 * RSS_GRP::OFFSET + adder might go beyond that group or
1373 * won't be able to use entire table.
1375 if (req
->rss_sz
&& (req
->rss_sz
> MAX_RSS_INDIR_TBL_SIZE
||
1376 !is_power_of_2(req
->rss_sz
)))
1377 return NIX_AF_ERR_RSS_SIZE_INVALID
;
1380 (!req
->rss_grps
|| req
->rss_grps
> MAX_RSS_GROUPS
))
1381 return NIX_AF_ERR_RSS_GRPS_INVALID
;
1383 /* Reset this NIX LF */
1384 err
= rvu_lf_reset(rvu
, block
, nixlf
);
1386 dev_err(rvu
->dev
, "Failed to reset NIX%d LF%d\n",
1387 block
->addr
- BLKADDR_NIX0
, nixlf
);
1388 return NIX_AF_ERR_LF_RESET
;
1391 ctx_cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST3
);
1393 /* Alloc NIX RQ HW context memory and config the base */
1394 hwctx_size
= 1UL << ((ctx_cfg
>> 4) & 0xF);
1395 err
= qmem_alloc(rvu
->dev
, &pfvf
->rq_ctx
, req
->rq_cnt
, hwctx_size
);
1399 pfvf
->rq_bmap
= kcalloc(req
->rq_cnt
, sizeof(long), GFP_KERNEL
);
1403 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RQS_BASE(nixlf
),
1404 (u64
)pfvf
->rq_ctx
->iova
);
1406 /* Set caching and queue count in HW */
1407 cfg
= BIT_ULL(36) | (req
->rq_cnt
- 1) | req
->way_mask
<< 20;
1408 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RQS_CFG(nixlf
), cfg
);
1410 /* Alloc NIX SQ HW context memory and config the base */
1411 hwctx_size
= 1UL << (ctx_cfg
& 0xF);
1412 err
= qmem_alloc(rvu
->dev
, &pfvf
->sq_ctx
, req
->sq_cnt
, hwctx_size
);
1416 pfvf
->sq_bmap
= kcalloc(req
->sq_cnt
, sizeof(long), GFP_KERNEL
);
1420 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_SQS_BASE(nixlf
),
1421 (u64
)pfvf
->sq_ctx
->iova
);
1423 cfg
= BIT_ULL(36) | (req
->sq_cnt
- 1) | req
->way_mask
<< 20;
1424 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_SQS_CFG(nixlf
), cfg
);
1426 /* Alloc NIX CQ HW context memory and config the base */
1427 hwctx_size
= 1UL << ((ctx_cfg
>> 8) & 0xF);
1428 err
= qmem_alloc(rvu
->dev
, &pfvf
->cq_ctx
, req
->cq_cnt
, hwctx_size
);
1432 pfvf
->cq_bmap
= kcalloc(req
->cq_cnt
, sizeof(long), GFP_KERNEL
);
1436 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CQS_BASE(nixlf
),
1437 (u64
)pfvf
->cq_ctx
->iova
);
1439 cfg
= BIT_ULL(36) | (req
->cq_cnt
- 1) | req
->way_mask
<< 20;
1440 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CQS_CFG(nixlf
), cfg
);
1442 /* Initialize receive side scaling (RSS) */
1443 hwctx_size
= 1UL << ((ctx_cfg
>> 12) & 0xF);
1444 err
= nixlf_rss_ctx_init(rvu
, blkaddr
, pfvf
, nixlf
, req
->rss_sz
,
1445 req
->rss_grps
, hwctx_size
, req
->way_mask
,
1446 !!(req
->flags
& NIX_LF_RSS_TAG_LSB_AS_ADDER
));
1450 /* Alloc memory for CQINT's HW contexts */
1451 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1452 qints
= (cfg
>> 24) & 0xFFF;
1453 hwctx_size
= 1UL << ((ctx_cfg
>> 24) & 0xF);
1454 err
= qmem_alloc(rvu
->dev
, &pfvf
->cq_ints_ctx
, qints
, hwctx_size
);
1458 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CINTS_BASE(nixlf
),
1459 (u64
)pfvf
->cq_ints_ctx
->iova
);
1461 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CINTS_CFG(nixlf
),
1462 BIT_ULL(36) | req
->way_mask
<< 20);
1464 /* Alloc memory for QINT's HW contexts */
1465 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1466 qints
= (cfg
>> 12) & 0xFFF;
1467 hwctx_size
= 1UL << ((ctx_cfg
>> 20) & 0xF);
1468 err
= qmem_alloc(rvu
->dev
, &pfvf
->nix_qints_ctx
, qints
, hwctx_size
);
1472 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_QINTS_BASE(nixlf
),
1473 (u64
)pfvf
->nix_qints_ctx
->iova
);
1474 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_QINTS_CFG(nixlf
),
1475 BIT_ULL(36) | req
->way_mask
<< 20);
1477 /* Setup VLANX TPID's.
1478 * Use VLAN1 for 802.1Q
1479 * and VLAN0 for 802.1AD.
1481 cfg
= (0x8100ULL
<< 16) | 0x88A8ULL
;
1482 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
), cfg
);
1484 /* Enable LMTST for this NIX LF */
1485 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG2(nixlf
), BIT_ULL(0));
1487 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1489 cfg
= req
->npa_func
;
1491 cfg
|= (u64
)req
->sso_func
<< 16;
1493 cfg
|= (u64
)req
->xqe_sz
<< 33;
1494 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CFG(nixlf
), cfg
);
1496 /* Config Rx pkt length, csum checks and apad enable / disable */
1497 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
), req
->rx_cfg
);
1499 /* Configure pkind for TX parse config */
1500 cfg
= NPC_TX_DEF_PKIND
;
1501 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_PARSE_CFG(nixlf
), cfg
);
1503 intf
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
1504 if (is_sdp_pfvf(pcifunc
))
1505 intf
= NIX_INTF_TYPE_SDP
;
1507 err
= nix_interface_init(rvu
, pcifunc
, intf
, nixlf
, rsp
,
1508 !!(req
->flags
& NIX_LF_LBK_BLK_SEL
));
1512 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1513 rvu_npc_disable_default_entries(rvu
, pcifunc
, nixlf
);
1515 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1516 rvu_write64(rvu
, blkaddr
,
1517 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf
, NIX_AF_LFX_RX_VTAG_TYPE7
),
1518 VTAGSIZE_T4
| VTAG_STRIP
);
1523 nix_ctx_free(rvu
, pfvf
);
1527 /* Set macaddr of this PF/VF */
1528 ether_addr_copy(rsp
->mac_addr
, pfvf
->mac_addr
);
1530 /* set SQB size info */
1531 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SQ_CONST
);
1532 rsp
->sqb_size
= (cfg
>> 34) & 0xFFFF;
1533 rsp
->rx_chan_base
= pfvf
->rx_chan_base
;
1534 rsp
->tx_chan_base
= pfvf
->tx_chan_base
;
1535 rsp
->rx_chan_cnt
= pfvf
->rx_chan_cnt
;
1536 rsp
->tx_chan_cnt
= pfvf
->tx_chan_cnt
;
1537 rsp
->lso_tsov4_idx
= NIX_LSO_FORMAT_IDX_TSOV4
;
1538 rsp
->lso_tsov6_idx
= NIX_LSO_FORMAT_IDX_TSOV6
;
1539 /* Get HW supported stat count */
1540 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
1541 rsp
->lf_rx_stats
= ((cfg
>> 32) & 0xFF);
1542 rsp
->lf_tx_stats
= ((cfg
>> 24) & 0xFF);
1543 /* Get count of CQ IRQs and error IRQs supported per LF */
1544 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1545 rsp
->qints
= ((cfg
>> 12) & 0xFFF);
1546 rsp
->cints
= ((cfg
>> 24) & 0xFFF);
1547 rsp
->cgx_links
= hw
->cgx_links
;
1548 rsp
->lbk_links
= hw
->lbk_links
;
1549 rsp
->sdp_links
= hw
->sdp_links
;
1554 int rvu_mbox_handler_nix_lf_free(struct rvu
*rvu
, struct nix_lf_free_req
*req
,
1555 struct msg_rsp
*rsp
)
1557 struct rvu_hwinfo
*hw
= rvu
->hw
;
1558 u16 pcifunc
= req
->hdr
.pcifunc
;
1559 struct rvu_block
*block
;
1560 int blkaddr
, nixlf
, err
;
1561 struct rvu_pfvf
*pfvf
;
1563 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1564 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1565 if (!pfvf
->nixlf
|| blkaddr
< 0)
1566 return NIX_AF_ERR_AF_LF_INVALID
;
1568 block
= &hw
->block
[blkaddr
];
1569 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
1571 return NIX_AF_ERR_AF_LF_INVALID
;
1573 if (req
->flags
& NIX_LF_DISABLE_FLOWS
)
1574 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
1576 rvu_npc_free_mcam_entries(rvu
, pcifunc
, nixlf
);
1578 /* Free any tx vtag def entries used by this NIX LF */
1579 if (!(req
->flags
& NIX_LF_DONT_FREE_TX_VTAG
))
1580 nix_free_tx_vtag_entries(rvu
, pcifunc
);
1582 nix_interface_deinit(rvu
, pcifunc
, nixlf
);
1584 /* Reset this NIX LF */
1585 err
= rvu_lf_reset(rvu
, block
, nixlf
);
1587 dev_err(rvu
->dev
, "Failed to reset NIX%d LF%d\n",
1588 block
->addr
- BLKADDR_NIX0
, nixlf
);
1589 return NIX_AF_ERR_LF_RESET
;
1592 nix_ctx_free(rvu
, pfvf
);
1597 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu
*rvu
,
1598 struct nix_mark_format_cfg
*req
,
1599 struct nix_mark_format_cfg_rsp
*rsp
)
1601 u16 pcifunc
= req
->hdr
.pcifunc
;
1602 struct nix_hw
*nix_hw
;
1603 struct rvu_pfvf
*pfvf
;
1607 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1608 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1609 if (!pfvf
->nixlf
|| blkaddr
< 0)
1610 return NIX_AF_ERR_AF_LF_INVALID
;
1612 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
1614 return NIX_AF_ERR_INVALID_NIXBLK
;
1616 cfg
= (((u32
)req
->offset
& 0x7) << 16) |
1617 (((u32
)req
->y_mask
& 0xF) << 12) |
1618 (((u32
)req
->y_val
& 0xF) << 8) |
1619 (((u32
)req
->r_mask
& 0xF) << 4) | ((u32
)req
->r_val
& 0xF);
1621 rc
= rvu_nix_reserve_mark_format(rvu
, nix_hw
, blkaddr
, cfg
);
1623 dev_err(rvu
->dev
, "No mark_format_ctl for (pf:%d, vf:%d)",
1624 rvu_get_pf(pcifunc
), pcifunc
& RVU_PFVF_FUNC_MASK
);
1625 return NIX_AF_ERR_MARK_CFG_FAIL
;
1628 rsp
->mark_format_idx
= rc
;
1632 /* Handle shaper update specially for few revisions */
1634 handle_txschq_shaper_update(struct rvu
*rvu
, int blkaddr
, int nixlf
,
1635 int lvl
, u64 reg
, u64 regval
)
1637 u64 regbase
, oldval
, sw_xoff
= 0;
1638 u64 dbgval
, md_debug0
= 0;
1639 unsigned long poll_tmo
;
1643 regbase
= reg
& 0xFFFF;
1644 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
1646 /* Check for rate register */
1648 case NIX_TXSCH_LVL_TL1
:
1649 md_debug0
= NIX_AF_TL1X_MD_DEBUG0(schq
);
1650 sw_xoff
= NIX_AF_TL1X_SW_XOFF(schq
);
1652 rate_reg
= !!(regbase
== NIX_AF_TL1X_CIR(0));
1654 case NIX_TXSCH_LVL_TL2
:
1655 md_debug0
= NIX_AF_TL2X_MD_DEBUG0(schq
);
1656 sw_xoff
= NIX_AF_TL2X_SW_XOFF(schq
);
1658 rate_reg
= (regbase
== NIX_AF_TL2X_CIR(0) ||
1659 regbase
== NIX_AF_TL2X_PIR(0));
1661 case NIX_TXSCH_LVL_TL3
:
1662 md_debug0
= NIX_AF_TL3X_MD_DEBUG0(schq
);
1663 sw_xoff
= NIX_AF_TL3X_SW_XOFF(schq
);
1665 rate_reg
= (regbase
== NIX_AF_TL3X_CIR(0) ||
1666 regbase
== NIX_AF_TL3X_PIR(0));
1668 case NIX_TXSCH_LVL_TL4
:
1669 md_debug0
= NIX_AF_TL4X_MD_DEBUG0(schq
);
1670 sw_xoff
= NIX_AF_TL4X_SW_XOFF(schq
);
1672 rate_reg
= (regbase
== NIX_AF_TL4X_CIR(0) ||
1673 regbase
== NIX_AF_TL4X_PIR(0));
1675 case NIX_TXSCH_LVL_MDQ
:
1676 sw_xoff
= NIX_AF_MDQX_SW_XOFF(schq
);
1677 rate_reg
= (regbase
== NIX_AF_MDQX_CIR(0) ||
1678 regbase
== NIX_AF_MDQX_PIR(0));
1685 /* Nothing special to do when state is not toggled */
1686 oldval
= rvu_read64(rvu
, blkaddr
, reg
);
1687 if ((oldval
& 0x1) == (regval
& 0x1)) {
1688 rvu_write64(rvu
, blkaddr
, reg
, regval
);
1692 /* PIR/CIR disable */
1693 if (!(regval
& 0x1)) {
1694 rvu_write64(rvu
, blkaddr
, sw_xoff
, 1);
1695 rvu_write64(rvu
, blkaddr
, reg
, 0);
1697 rvu_write64(rvu
, blkaddr
, sw_xoff
, 0);
1701 /* PIR/CIR enable */
1702 rvu_write64(rvu
, blkaddr
, sw_xoff
, 1);
1704 poll_tmo
= jiffies
+ usecs_to_jiffies(10000);
1705 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1707 if (time_after(jiffies
, poll_tmo
)) {
1709 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1714 dbgval
= rvu_read64(rvu
, blkaddr
, md_debug0
);
1715 } while (!(dbgval
& BIT_ULL(32)) && (dbgval
& BIT_ULL(48)));
1717 rvu_write64(rvu
, blkaddr
, reg
, regval
);
1719 rvu_write64(rvu
, blkaddr
, sw_xoff
, 0);
1723 static void nix_reset_tx_schedule(struct rvu
*rvu
, int blkaddr
,
1726 u64 tlx_parent
= 0, tlx_schedule
= 0;
1729 case NIX_TXSCH_LVL_TL2
:
1730 tlx_parent
= NIX_AF_TL2X_PARENT(schq
);
1731 tlx_schedule
= NIX_AF_TL2X_SCHEDULE(schq
);
1733 case NIX_TXSCH_LVL_TL3
:
1734 tlx_parent
= NIX_AF_TL3X_PARENT(schq
);
1735 tlx_schedule
= NIX_AF_TL3X_SCHEDULE(schq
);
1737 case NIX_TXSCH_LVL_TL4
:
1738 tlx_parent
= NIX_AF_TL4X_PARENT(schq
);
1739 tlx_schedule
= NIX_AF_TL4X_SCHEDULE(schq
);
1741 case NIX_TXSCH_LVL_MDQ
:
1742 /* no need to reset SMQ_CFG as HW clears this CSR
1745 tlx_parent
= NIX_AF_MDQX_PARENT(schq
);
1746 tlx_schedule
= NIX_AF_MDQX_SCHEDULE(schq
);
1753 rvu_write64(rvu
, blkaddr
, tlx_parent
, 0x0);
1756 rvu_write64(rvu
, blkaddr
, tlx_schedule
, 0x0);
1759 /* Disable shaping of pkts by a scheduler queue
1760 * at a given scheduler level.
1762 static void nix_reset_tx_shaping(struct rvu
*rvu
, int blkaddr
,
1763 int nixlf
, int lvl
, int schq
)
1765 struct rvu_hwinfo
*hw
= rvu
->hw
;
1766 u64 cir_reg
= 0, pir_reg
= 0;
1770 case NIX_TXSCH_LVL_TL1
:
1771 cir_reg
= NIX_AF_TL1X_CIR(schq
);
1772 pir_reg
= 0; /* PIR not available at TL1 */
1774 case NIX_TXSCH_LVL_TL2
:
1775 cir_reg
= NIX_AF_TL2X_CIR(schq
);
1776 pir_reg
= NIX_AF_TL2X_PIR(schq
);
1778 case NIX_TXSCH_LVL_TL3
:
1779 cir_reg
= NIX_AF_TL3X_CIR(schq
);
1780 pir_reg
= NIX_AF_TL3X_PIR(schq
);
1782 case NIX_TXSCH_LVL_TL4
:
1783 cir_reg
= NIX_AF_TL4X_CIR(schq
);
1784 pir_reg
= NIX_AF_TL4X_PIR(schq
);
1786 case NIX_TXSCH_LVL_MDQ
:
1787 cir_reg
= NIX_AF_MDQX_CIR(schq
);
1788 pir_reg
= NIX_AF_MDQX_PIR(schq
);
1792 /* Shaper state toggle needs wait/poll */
1793 if (hw
->cap
.nix_shaper_toggle_wait
) {
1795 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
1798 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
1805 cfg
= rvu_read64(rvu
, blkaddr
, cir_reg
);
1806 rvu_write64(rvu
, blkaddr
, cir_reg
, cfg
& ~BIT_ULL(0));
1810 cfg
= rvu_read64(rvu
, blkaddr
, pir_reg
);
1811 rvu_write64(rvu
, blkaddr
, pir_reg
, cfg
& ~BIT_ULL(0));
1814 static void nix_reset_tx_linkcfg(struct rvu
*rvu
, int blkaddr
,
1817 struct rvu_hwinfo
*hw
= rvu
->hw
;
1821 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
1824 /* Reset TL4's SDP link config */
1825 if (lvl
== NIX_TXSCH_LVL_TL4
)
1826 rvu_write64(rvu
, blkaddr
, NIX_AF_TL4X_SDP_LINK_CFG(schq
), 0x00);
1828 link_level
= rvu_read64(rvu
, blkaddr
, NIX_AF_PSE_CHANNEL_LEVEL
) & 0x01 ?
1829 NIX_TXSCH_LVL_TL3
: NIX_TXSCH_LVL_TL2
;
1830 if (lvl
!= link_level
)
1833 /* Reset TL2's CGX or LBK link config */
1834 for (link
= 0; link
< (hw
->cgx_links
+ hw
->lbk_links
); link
++)
1835 rvu_write64(rvu
, blkaddr
,
1836 NIX_AF_TL3_TL2X_LINKX_CFG(schq
, link
), 0x00);
1839 static void nix_clear_tx_xoff(struct rvu
*rvu
, int blkaddr
,
1842 struct rvu_hwinfo
*hw
= rvu
->hw
;
1845 /* Skip this if shaping is not supported */
1846 if (!hw
->cap
.nix_shaping
)
1849 /* Clear level specific SW_XOFF */
1851 case NIX_TXSCH_LVL_TL1
:
1852 reg
= NIX_AF_TL1X_SW_XOFF(schq
);
1854 case NIX_TXSCH_LVL_TL2
:
1855 reg
= NIX_AF_TL2X_SW_XOFF(schq
);
1857 case NIX_TXSCH_LVL_TL3
:
1858 reg
= NIX_AF_TL3X_SW_XOFF(schq
);
1860 case NIX_TXSCH_LVL_TL4
:
1861 reg
= NIX_AF_TL4X_SW_XOFF(schq
);
1863 case NIX_TXSCH_LVL_MDQ
:
1864 reg
= NIX_AF_MDQX_SW_XOFF(schq
);
1870 rvu_write64(rvu
, blkaddr
, reg
, 0x0);
1873 static int nix_get_tx_link(struct rvu
*rvu
, u16 pcifunc
)
1875 struct rvu_hwinfo
*hw
= rvu
->hw
;
1876 int pf
= rvu_get_pf(pcifunc
);
1877 u8 cgx_id
= 0, lmac_id
= 0;
1879 if (is_afvf(pcifunc
)) {/* LBK links */
1880 return hw
->cgx_links
;
1881 } else if (is_pf_cgxmapped(rvu
, pf
)) {
1882 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
1883 return (cgx_id
* hw
->lmac_per_cgx
) + lmac_id
;
1887 return hw
->cgx_links
+ hw
->lbk_links
;
1890 static void nix_get_txschq_range(struct rvu
*rvu
, u16 pcifunc
,
1891 int link
, int *start
, int *end
)
1893 struct rvu_hwinfo
*hw
= rvu
->hw
;
1894 int pf
= rvu_get_pf(pcifunc
);
1896 if (is_afvf(pcifunc
)) { /* LBK links */
1897 *start
= hw
->cap
.nix_txsch_per_cgx_lmac
* link
;
1898 *end
= *start
+ hw
->cap
.nix_txsch_per_lbk_lmac
;
1899 } else if (is_pf_cgxmapped(rvu
, pf
)) { /* CGX links */
1900 *start
= hw
->cap
.nix_txsch_per_cgx_lmac
* link
;
1901 *end
= *start
+ hw
->cap
.nix_txsch_per_cgx_lmac
;
1902 } else { /* SDP link */
1903 *start
= (hw
->cap
.nix_txsch_per_cgx_lmac
* hw
->cgx_links
) +
1904 (hw
->cap
.nix_txsch_per_lbk_lmac
* hw
->lbk_links
);
1905 *end
= *start
+ hw
->cap
.nix_txsch_per_sdp_lmac
;
1909 static int nix_check_txschq_alloc_req(struct rvu
*rvu
, int lvl
, u16 pcifunc
,
1910 struct nix_hw
*nix_hw
,
1911 struct nix_txsch_alloc_req
*req
)
1913 struct rvu_hwinfo
*hw
= rvu
->hw
;
1914 int schq
, req_schq
, free_cnt
;
1915 struct nix_txsch
*txsch
;
1916 int link
, start
, end
;
1918 txsch
= &nix_hw
->txsch
[lvl
];
1919 req_schq
= req
->schq_contig
[lvl
] + req
->schq
[lvl
];
1924 link
= nix_get_tx_link(rvu
, pcifunc
);
1926 /* For traffic aggregating scheduler level, one queue is enough */
1927 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
1929 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1933 /* Get free SCHQ count and check if request can be accomodated */
1934 if (hw
->cap
.nix_fixed_txschq_mapping
) {
1935 nix_get_txschq_range(rvu
, pcifunc
, link
, &start
, &end
);
1936 schq
= start
+ (pcifunc
& RVU_PFVF_FUNC_MASK
);
1937 if (end
<= txsch
->schq
.max
&& schq
< end
&&
1938 !test_bit(schq
, txsch
->schq
.bmap
))
1943 free_cnt
= rvu_rsrc_free_count(&txsch
->schq
);
1946 if (free_cnt
< req_schq
|| req
->schq
[lvl
] > MAX_TXSCHQ_PER_FUNC
||
1947 req
->schq_contig
[lvl
] > MAX_TXSCHQ_PER_FUNC
)
1948 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1950 /* If contiguous queues are needed, check for availability */
1951 if (!hw
->cap
.nix_fixed_txschq_mapping
&& req
->schq_contig
[lvl
] &&
1952 !rvu_rsrc_check_contig(&txsch
->schq
, req
->schq_contig
[lvl
]))
1953 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1958 static void nix_txsch_alloc(struct rvu
*rvu
, struct nix_txsch
*txsch
,
1959 struct nix_txsch_alloc_rsp
*rsp
,
1960 int lvl
, int start
, int end
)
1962 struct rvu_hwinfo
*hw
= rvu
->hw
;
1963 u16 pcifunc
= rsp
->hdr
.pcifunc
;
1966 /* For traffic aggregating levels, queue alloc is based
1967 * on transmit link to which PF_FUNC is mapped to.
1969 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
1970 /* A single TL queue is allocated */
1971 if (rsp
->schq_contig
[lvl
]) {
1972 rsp
->schq_contig
[lvl
] = 1;
1973 rsp
->schq_contig_list
[lvl
][0] = start
;
1976 /* Both contig and non-contig reqs doesn't make sense here */
1977 if (rsp
->schq_contig
[lvl
])
1980 if (rsp
->schq
[lvl
]) {
1982 rsp
->schq_list
[lvl
][0] = start
;
1987 /* Adjust the queue request count if HW supports
1988 * only one queue per level configuration.
1990 if (hw
->cap
.nix_fixed_txschq_mapping
) {
1991 idx
= pcifunc
& RVU_PFVF_FUNC_MASK
;
1993 if (idx
>= (end
- start
) || test_bit(schq
, txsch
->schq
.bmap
)) {
1994 rsp
->schq_contig
[lvl
] = 0;
1999 if (rsp
->schq_contig
[lvl
]) {
2000 rsp
->schq_contig
[lvl
] = 1;
2001 set_bit(schq
, txsch
->schq
.bmap
);
2002 rsp
->schq_contig_list
[lvl
][0] = schq
;
2004 } else if (rsp
->schq
[lvl
]) {
2006 set_bit(schq
, txsch
->schq
.bmap
);
2007 rsp
->schq_list
[lvl
][0] = schq
;
2012 /* Allocate contiguous queue indices requesty first */
2013 if (rsp
->schq_contig
[lvl
]) {
2014 schq
= bitmap_find_next_zero_area(txsch
->schq
.bmap
,
2015 txsch
->schq
.max
, start
,
2016 rsp
->schq_contig
[lvl
], 0);
2018 rsp
->schq_contig
[lvl
] = 0;
2019 for (idx
= 0; idx
< rsp
->schq_contig
[lvl
]; idx
++) {
2020 set_bit(schq
, txsch
->schq
.bmap
);
2021 rsp
->schq_contig_list
[lvl
][idx
] = schq
;
2026 /* Allocate non-contiguous queue indices */
2027 if (rsp
->schq
[lvl
]) {
2029 for (schq
= start
; schq
< end
; schq
++) {
2030 if (!test_bit(schq
, txsch
->schq
.bmap
)) {
2031 set_bit(schq
, txsch
->schq
.bmap
);
2032 rsp
->schq_list
[lvl
][idx
++] = schq
;
2034 if (idx
== rsp
->schq
[lvl
])
2037 /* Update how many were allocated */
2038 rsp
->schq
[lvl
] = idx
;
2042 int rvu_mbox_handler_nix_txsch_alloc(struct rvu
*rvu
,
2043 struct nix_txsch_alloc_req
*req
,
2044 struct nix_txsch_alloc_rsp
*rsp
)
2046 struct rvu_hwinfo
*hw
= rvu
->hw
;
2047 u16 pcifunc
= req
->hdr
.pcifunc
;
2048 int link
, blkaddr
, rc
= 0;
2049 int lvl
, idx
, start
, end
;
2050 struct nix_txsch
*txsch
;
2051 struct nix_hw
*nix_hw
;
2056 rc
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
2060 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2062 return NIX_AF_ERR_INVALID_NIXBLK
;
2064 mutex_lock(&rvu
->rsrc_lock
);
2066 /* Check if request is valid as per HW capabilities
2067 * and can be accomodated.
2069 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2070 rc
= nix_check_txschq_alloc_req(rvu
, lvl
, pcifunc
, nix_hw
, req
);
2075 /* Allocate requested Tx scheduler queues */
2076 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2077 txsch
= &nix_hw
->txsch
[lvl
];
2078 pfvf_map
= txsch
->pfvf_map
;
2080 if (!req
->schq
[lvl
] && !req
->schq_contig
[lvl
])
2083 rsp
->schq
[lvl
] = req
->schq
[lvl
];
2084 rsp
->schq_contig
[lvl
] = req
->schq_contig
[lvl
];
2086 link
= nix_get_tx_link(rvu
, pcifunc
);
2088 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
2091 } else if (hw
->cap
.nix_fixed_txschq_mapping
) {
2092 nix_get_txschq_range(rvu
, pcifunc
, link
, &start
, &end
);
2095 end
= txsch
->schq
.max
;
2098 nix_txsch_alloc(rvu
, txsch
, rsp
, lvl
, start
, end
);
2100 /* Reset queue config */
2101 for (idx
= 0; idx
< req
->schq_contig
[lvl
]; idx
++) {
2102 schq
= rsp
->schq_contig_list
[lvl
][idx
];
2103 if (!(TXSCH_MAP_FLAGS(pfvf_map
[schq
]) &
2104 NIX_TXSCHQ_CFG_DONE
))
2105 pfvf_map
[schq
] = TXSCH_MAP(pcifunc
, 0);
2106 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
2107 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
2108 nix_reset_tx_schedule(rvu
, blkaddr
, lvl
, schq
);
2111 for (idx
= 0; idx
< req
->schq
[lvl
]; idx
++) {
2112 schq
= rsp
->schq_list
[lvl
][idx
];
2113 if (!(TXSCH_MAP_FLAGS(pfvf_map
[schq
]) &
2114 NIX_TXSCHQ_CFG_DONE
))
2115 pfvf_map
[schq
] = TXSCH_MAP(pcifunc
, 0);
2116 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
2117 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
2118 nix_reset_tx_schedule(rvu
, blkaddr
, lvl
, schq
);
2122 rsp
->aggr_level
= hw
->cap
.nix_tx_aggr_lvl
;
2123 rsp
->aggr_lvl_rr_prio
= TXSCH_TL1_DFLT_RR_PRIO
;
2124 rsp
->link_cfg_lvl
= rvu_read64(rvu
, blkaddr
,
2125 NIX_AF_PSE_CHANNEL_LEVEL
) & 0x01 ?
2126 NIX_TXSCH_LVL_TL3
: NIX_TXSCH_LVL_TL2
;
2129 rc
= NIX_AF_ERR_TLX_ALLOC_FAIL
;
2131 mutex_unlock(&rvu
->rsrc_lock
);
2135 static void nix_smq_flush_fill_ctx(struct rvu
*rvu
, int blkaddr
, int smq
,
2136 struct nix_smq_flush_ctx
*smq_flush_ctx
)
2138 struct nix_smq_tree_ctx
*smq_tree_ctx
;
2139 u64 parent_off
, regval
;
2143 smq_flush_ctx
->smq
= smq
;
2146 for (lvl
= NIX_TXSCH_LVL_SMQ
; lvl
<= NIX_TXSCH_LVL_TL1
; lvl
++) {
2147 smq_tree_ctx
= &smq_flush_ctx
->smq_tree_ctx
[lvl
];
2148 if (lvl
== NIX_TXSCH_LVL_TL1
) {
2149 smq_flush_ctx
->tl1_schq
= schq
;
2150 smq_tree_ctx
->cir_off
= NIX_AF_TL1X_CIR(schq
);
2151 smq_tree_ctx
->pir_off
= 0;
2152 smq_tree_ctx
->pir_val
= 0;
2154 } else if (lvl
== NIX_TXSCH_LVL_TL2
) {
2155 smq_flush_ctx
->tl2_schq
= schq
;
2156 smq_tree_ctx
->cir_off
= NIX_AF_TL2X_CIR(schq
);
2157 smq_tree_ctx
->pir_off
= NIX_AF_TL2X_PIR(schq
);
2158 parent_off
= NIX_AF_TL2X_PARENT(schq
);
2159 } else if (lvl
== NIX_TXSCH_LVL_TL3
) {
2160 smq_tree_ctx
->cir_off
= NIX_AF_TL3X_CIR(schq
);
2161 smq_tree_ctx
->pir_off
= NIX_AF_TL3X_PIR(schq
);
2162 parent_off
= NIX_AF_TL3X_PARENT(schq
);
2163 } else if (lvl
== NIX_TXSCH_LVL_TL4
) {
2164 smq_tree_ctx
->cir_off
= NIX_AF_TL4X_CIR(schq
);
2165 smq_tree_ctx
->pir_off
= NIX_AF_TL4X_PIR(schq
);
2166 parent_off
= NIX_AF_TL4X_PARENT(schq
);
2167 } else if (lvl
== NIX_TXSCH_LVL_MDQ
) {
2168 smq_tree_ctx
->cir_off
= NIX_AF_MDQX_CIR(schq
);
2169 smq_tree_ctx
->pir_off
= NIX_AF_MDQX_PIR(schq
);
2170 parent_off
= NIX_AF_MDQX_PARENT(schq
);
2172 /* save cir/pir register values */
2173 smq_tree_ctx
->cir_val
= rvu_read64(rvu
, blkaddr
, smq_tree_ctx
->cir_off
);
2174 if (smq_tree_ctx
->pir_off
)
2175 smq_tree_ctx
->pir_val
= rvu_read64(rvu
, blkaddr
, smq_tree_ctx
->pir_off
);
2177 /* get parent txsch node */
2179 regval
= rvu_read64(rvu
, blkaddr
, parent_off
);
2180 schq
= (regval
>> 16) & 0x1FF;
2185 static void nix_smq_flush_enadis_xoff(struct rvu
*rvu
, int blkaddr
,
2186 struct nix_smq_flush_ctx
*smq_flush_ctx
, bool enable
)
2188 struct nix_txsch
*txsch
;
2189 struct nix_hw
*nix_hw
;
2193 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2197 /* loop through all TL2s with matching PF_FUNC */
2198 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
];
2199 for (tl2
= 0; tl2
< txsch
->schq
.max
; tl2
++) {
2200 /* skip the smq(flush) TL2 */
2201 if (tl2
== smq_flush_ctx
->tl2_schq
)
2203 /* skip unused TL2s */
2204 if (TXSCH_MAP_FLAGS(txsch
->pfvf_map
[tl2
]) & NIX_TXSCHQ_FREE
)
2206 /* skip if PF_FUNC doesn't match */
2207 if ((TXSCH_MAP_FUNC(txsch
->pfvf_map
[tl2
]) & ~RVU_PFVF_FUNC_MASK
) !=
2208 (TXSCH_MAP_FUNC(txsch
->pfvf_map
[smq_flush_ctx
->tl2_schq
] &
2209 ~RVU_PFVF_FUNC_MASK
)))
2211 /* enable/disable XOFF */
2212 regoff
= NIX_AF_TL2X_SW_XOFF(tl2
);
2214 rvu_write64(rvu
, blkaddr
, regoff
, 0x1);
2216 rvu_write64(rvu
, blkaddr
, regoff
, 0x0);
2220 static void nix_smq_flush_enadis_rate(struct rvu
*rvu
, int blkaddr
,
2221 struct nix_smq_flush_ctx
*smq_flush_ctx
, bool enable
)
2223 u64 cir_off
, pir_off
, cir_val
, pir_val
;
2224 struct nix_smq_tree_ctx
*smq_tree_ctx
;
2227 for (lvl
= NIX_TXSCH_LVL_SMQ
; lvl
<= NIX_TXSCH_LVL_TL1
; lvl
++) {
2228 smq_tree_ctx
= &smq_flush_ctx
->smq_tree_ctx
[lvl
];
2229 cir_off
= smq_tree_ctx
->cir_off
;
2230 cir_val
= smq_tree_ctx
->cir_val
;
2231 pir_off
= smq_tree_ctx
->pir_off
;
2232 pir_val
= smq_tree_ctx
->pir_val
;
2235 rvu_write64(rvu
, blkaddr
, cir_off
, cir_val
);
2236 if (lvl
!= NIX_TXSCH_LVL_TL1
)
2237 rvu_write64(rvu
, blkaddr
, pir_off
, pir_val
);
2239 rvu_write64(rvu
, blkaddr
, cir_off
, 0x0);
2240 if (lvl
!= NIX_TXSCH_LVL_TL1
)
2241 rvu_write64(rvu
, blkaddr
, pir_off
, 0x0);
2246 static int nix_smq_flush(struct rvu
*rvu
, int blkaddr
,
2247 int smq
, u16 pcifunc
, int nixlf
)
2249 struct nix_smq_flush_ctx
*smq_flush_ctx
;
2250 int pf
= rvu_get_pf(pcifunc
);
2251 u8 cgx_id
= 0, lmac_id
= 0;
2252 int err
, restore_tx_en
= 0;
2255 if (!is_rvu_otx2(rvu
)) {
2256 /* Skip SMQ flush if pkt count is zero */
2257 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_MDQX_IN_MD_COUNT(smq
));
2262 /* enable cgx tx if disabled */
2263 if (is_pf_cgxmapped(rvu
, pf
)) {
2264 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
2265 restore_tx_en
= !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id
, rvu
),
2269 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2270 smq_flush_ctx
= kzalloc(sizeof(*smq_flush_ctx
), GFP_KERNEL
);
2273 nix_smq_flush_fill_ctx(rvu
, blkaddr
, smq
, smq_flush_ctx
);
2274 nix_smq_flush_enadis_xoff(rvu
, blkaddr
, smq_flush_ctx
, true);
2275 nix_smq_flush_enadis_rate(rvu
, blkaddr
, smq_flush_ctx
, false);
2277 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(smq
));
2278 /* Do SMQ flush and set enqueue xoff */
2279 cfg
|= BIT_ULL(50) | BIT_ULL(49);
2280 rvu_write64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(smq
), cfg
);
2282 /* Disable backpressure from physical link,
2283 * otherwise SMQ flush may stall.
2285 rvu_cgx_enadis_rx_bp(rvu
, pf
, false);
2287 /* Wait for flush to complete */
2288 err
= rvu_poll_reg(rvu
, blkaddr
,
2289 NIX_AF_SMQX_CFG(smq
), BIT_ULL(49), true);
2292 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2295 /* clear XOFF on TL2s */
2296 nix_smq_flush_enadis_rate(rvu
, blkaddr
, smq_flush_ctx
, true);
2297 nix_smq_flush_enadis_xoff(rvu
, blkaddr
, smq_flush_ctx
, false);
2298 kfree(smq_flush_ctx
);
2300 rvu_cgx_enadis_rx_bp(rvu
, pf
, true);
2301 /* restore cgx tx state */
2303 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, false);
2307 static int nix_txschq_free(struct rvu
*rvu
, u16 pcifunc
)
2309 int blkaddr
, nixlf
, lvl
, schq
, err
;
2310 struct rvu_hwinfo
*hw
= rvu
->hw
;
2311 struct nix_txsch
*txsch
;
2312 struct nix_hw
*nix_hw
;
2315 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2317 return NIX_AF_ERR_AF_LF_INVALID
;
2319 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2321 return NIX_AF_ERR_INVALID_NIXBLK
;
2323 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
2325 return NIX_AF_ERR_AF_LF_INVALID
;
2327 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2328 mutex_lock(&rvu
->rsrc_lock
);
2329 for (lvl
= NIX_TXSCH_LVL_MDQ
; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2330 txsch
= &nix_hw
->txsch
[lvl
];
2332 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
2335 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2336 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2338 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
2339 nix_clear_tx_xoff(rvu
, blkaddr
, lvl
, schq
);
2340 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
2343 nix_clear_tx_xoff(rvu
, blkaddr
, NIX_TXSCH_LVL_TL1
,
2344 nix_get_tx_link(rvu
, pcifunc
));
2346 /* On PF cleanup, clear cfg done flag as
2347 * PF would have changed default config.
2349 if (!(pcifunc
& RVU_PFVF_FUNC_MASK
)) {
2350 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL1
];
2351 schq
= nix_get_tx_link(rvu
, pcifunc
);
2352 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2353 * VF might be using this TL1 queue
2355 map_func
= TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]);
2356 txsch
->pfvf_map
[schq
] = TXSCH_SET_FLAG(map_func
, 0x0);
2360 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
2361 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2362 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2364 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
);
2367 /* Now free scheduler queues to free pool */
2368 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2369 /* TLs above aggregation level are shared across all PF
2370 * and it's VFs, hence skip freeing them.
2372 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
2375 txsch
= &nix_hw
->txsch
[lvl
];
2376 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2377 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2379 nix_reset_tx_schedule(rvu
, blkaddr
, lvl
, schq
);
2380 rvu_free_rsrc(&txsch
->schq
, schq
);
2381 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
2384 mutex_unlock(&rvu
->rsrc_lock
);
2386 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2387 rvu_write64(rvu
, blkaddr
, NIX_AF_NDC_TX_SYNC
, BIT_ULL(12) | nixlf
);
2388 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_NDC_TX_SYNC
, BIT_ULL(12), true);
2390 dev_err(rvu
->dev
, "NDC-TX sync failed for NIXLF %d\n", nixlf
);
2395 static int nix_txschq_free_one(struct rvu
*rvu
,
2396 struct nix_txsch_free_req
*req
)
2398 struct rvu_hwinfo
*hw
= rvu
->hw
;
2399 u16 pcifunc
= req
->hdr
.pcifunc
;
2400 int lvl
, schq
, nixlf
, blkaddr
;
2401 struct nix_txsch
*txsch
;
2402 struct nix_hw
*nix_hw
;
2406 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2408 return NIX_AF_ERR_AF_LF_INVALID
;
2410 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2412 return NIX_AF_ERR_INVALID_NIXBLK
;
2414 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
2416 return NIX_AF_ERR_AF_LF_INVALID
;
2418 lvl
= req
->schq_lvl
;
2420 txsch
= &nix_hw
->txsch
[lvl
];
2422 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
|| schq
>= txsch
->schq
.max
)
2425 pfvf_map
= txsch
->pfvf_map
;
2426 mutex_lock(&rvu
->rsrc_lock
);
2428 if (TXSCH_MAP_FUNC(pfvf_map
[schq
]) != pcifunc
) {
2429 rc
= NIX_AF_ERR_TLX_INVALID
;
2433 /* Clear SW_XOFF of this resource only.
2434 * For SMQ level, all path XOFF's
2435 * need to be made clear by user
2437 nix_clear_tx_xoff(rvu
, blkaddr
, lvl
, schq
);
2439 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
2440 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
2442 /* Flush if it is a SMQ. Onus of disabling
2443 * TL2/3 queue links before SMQ flush is on user
2445 if (lvl
== NIX_TXSCH_LVL_SMQ
&&
2446 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
)) {
2447 rc
= NIX_AF_SMQ_FLUSH_FAILED
;
2451 nix_reset_tx_schedule(rvu
, blkaddr
, lvl
, schq
);
2453 /* Free the resource */
2454 rvu_free_rsrc(&txsch
->schq
, schq
);
2455 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
2456 mutex_unlock(&rvu
->rsrc_lock
);
2459 mutex_unlock(&rvu
->rsrc_lock
);
2463 int rvu_mbox_handler_nix_txsch_free(struct rvu
*rvu
,
2464 struct nix_txsch_free_req
*req
,
2465 struct msg_rsp
*rsp
)
2467 if (req
->flags
& TXSCHQ_FREE_ALL
)
2468 return nix_txschq_free(rvu
, req
->hdr
.pcifunc
);
2470 return nix_txschq_free_one(rvu
, req
);
2473 static bool is_txschq_hierarchy_valid(struct rvu
*rvu
, u16 pcifunc
, int blkaddr
,
2474 int lvl
, u64 reg
, u64 regval
)
2476 u64 regbase
= reg
& 0xFFFF;
2479 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP
, lvl
, reg
))
2482 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2483 /* Check if this schq belongs to this PF/VF or not */
2484 if (!is_valid_txschq(rvu
, blkaddr
, lvl
, pcifunc
, schq
))
2487 parent
= (regval
>> 16) & 0x1FF;
2488 /* Validate MDQ's TL4 parent */
2489 if (regbase
== NIX_AF_MDQX_PARENT(0) &&
2490 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL4
, pcifunc
, parent
))
2493 /* Validate TL4's TL3 parent */
2494 if (regbase
== NIX_AF_TL4X_PARENT(0) &&
2495 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL3
, pcifunc
, parent
))
2498 /* Validate TL3's TL2 parent */
2499 if (regbase
== NIX_AF_TL3X_PARENT(0) &&
2500 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL2
, pcifunc
, parent
))
2503 /* Validate TL2's TL1 parent */
2504 if (regbase
== NIX_AF_TL2X_PARENT(0) &&
2505 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL1
, pcifunc
, parent
))
2511 static bool is_txschq_shaping_valid(struct rvu_hwinfo
*hw
, int lvl
, u64 reg
)
2515 if (hw
->cap
.nix_shaping
)
2518 /* If shaping and coloring is not supported, then
2519 * *_CIR and *_PIR registers should not be configured.
2521 regbase
= reg
& 0xFFFF;
2524 case NIX_TXSCH_LVL_TL1
:
2525 if (regbase
== NIX_AF_TL1X_CIR(0))
2528 case NIX_TXSCH_LVL_TL2
:
2529 if (regbase
== NIX_AF_TL2X_CIR(0) ||
2530 regbase
== NIX_AF_TL2X_PIR(0))
2533 case NIX_TXSCH_LVL_TL3
:
2534 if (regbase
== NIX_AF_TL3X_CIR(0) ||
2535 regbase
== NIX_AF_TL3X_PIR(0))
2538 case NIX_TXSCH_LVL_TL4
:
2539 if (regbase
== NIX_AF_TL4X_CIR(0) ||
2540 regbase
== NIX_AF_TL4X_PIR(0))
2543 case NIX_TXSCH_LVL_MDQ
:
2544 if (regbase
== NIX_AF_MDQX_CIR(0) ||
2545 regbase
== NIX_AF_MDQX_PIR(0))
2552 static void nix_tl1_default_cfg(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2553 u16 pcifunc
, int blkaddr
)
2558 schq
= nix_get_tx_link(rvu
, pcifunc
);
2559 pfvf_map
= nix_hw
->txsch
[NIX_TXSCH_LVL_TL1
].pfvf_map
;
2560 /* Skip if PF has already done the config */
2561 if (TXSCH_MAP_FLAGS(pfvf_map
[schq
]) & NIX_TXSCHQ_CFG_DONE
)
2563 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_TOPOLOGY(schq
),
2564 (TXSCH_TL1_DFLT_RR_PRIO
<< 1));
2566 /* On OcteonTx2 the config was in bytes and newer silcons
2567 * it's changed to weight.
2569 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
)
2570 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SCHEDULE(schq
),
2571 TXSCH_TL1_DFLT_RR_QTM
);
2573 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SCHEDULE(schq
),
2574 CN10K_MAX_DWRR_WEIGHT
);
2576 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_CIR(schq
), 0x00);
2577 pfvf_map
[schq
] = TXSCH_SET_FLAG(pfvf_map
[schq
], NIX_TXSCHQ_CFG_DONE
);
2580 /* Register offset - [15:0]
2581 * Scheduler Queue number - [25:16]
2583 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2585 static int nix_txschq_cfg_read(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2586 int blkaddr
, struct nix_txschq_config
*req
,
2587 struct nix_txschq_config
*rsp
)
2589 u16 pcifunc
= req
->hdr
.pcifunc
;
2593 for (idx
= 0; idx
< req
->num_regs
; idx
++) {
2594 reg
= req
->reg
[idx
];
2595 reg
&= NIX_TX_SCHQ_MASK
;
2596 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2597 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP
, req
->lvl
, reg
) ||
2598 !is_valid_txschq(rvu
, blkaddr
, req
->lvl
, pcifunc
, schq
))
2599 return NIX_AF_INVAL_TXSCHQ_CFG
;
2600 rsp
->regval
[idx
] = rvu_read64(rvu
, blkaddr
, reg
);
2602 rsp
->lvl
= req
->lvl
;
2603 rsp
->num_regs
= req
->num_regs
;
2607 void rvu_nix_tx_tl2_cfg(struct rvu
*rvu
, int blkaddr
, u16 pcifunc
,
2608 struct nix_txsch
*txsch
, bool enable
)
2610 struct rvu_hwinfo
*hw
= rvu
->hw
;
2611 int lbk_link_start
, lbk_links
;
2612 u8 pf
= rvu_get_pf(pcifunc
);
2616 if (!is_pf_cgxmapped(rvu
, pf
))
2619 cfg
= enable
? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN
) : 0;
2620 lbk_link_start
= hw
->cgx_links
;
2622 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2623 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2625 /* Enable all LBK links with channel 63 by default so that
2626 * packets can be sent to LBK with a NPC TX MCAM rule
2628 lbk_links
= hw
->lbk_links
;
2630 rvu_write64(rvu
, blkaddr
,
2631 NIX_AF_TL3_TL2X_LINKX_CFG(schq
,
2637 int rvu_mbox_handler_nix_txschq_cfg(struct rvu
*rvu
,
2638 struct nix_txschq_config
*req
,
2639 struct nix_txschq_config
*rsp
)
2641 u64 reg
, val
, regval
, schq_regbase
, val_mask
;
2642 struct rvu_hwinfo
*hw
= rvu
->hw
;
2643 u16 pcifunc
= req
->hdr
.pcifunc
;
2644 struct nix_txsch
*txsch
;
2645 struct nix_hw
*nix_hw
;
2646 int blkaddr
, idx
, err
;
2650 if (req
->lvl
>= NIX_TXSCH_LVL_CNT
||
2651 req
->num_regs
> MAX_REGS_PER_MBOX_MSG
)
2652 return NIX_AF_INVAL_TXSCHQ_CFG
;
2654 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
2658 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2660 return NIX_AF_ERR_INVALID_NIXBLK
;
2663 return nix_txschq_cfg_read(rvu
, nix_hw
, blkaddr
, req
, rsp
);
2665 txsch
= &nix_hw
->txsch
[req
->lvl
];
2666 pfvf_map
= txsch
->pfvf_map
;
2668 if (req
->lvl
>= hw
->cap
.nix_tx_aggr_lvl
&&
2669 pcifunc
& RVU_PFVF_FUNC_MASK
) {
2670 mutex_lock(&rvu
->rsrc_lock
);
2671 if (req
->lvl
== NIX_TXSCH_LVL_TL1
)
2672 nix_tl1_default_cfg(rvu
, nix_hw
, pcifunc
, blkaddr
);
2673 mutex_unlock(&rvu
->rsrc_lock
);
2677 for (idx
= 0; idx
< req
->num_regs
; idx
++) {
2678 reg
= req
->reg
[idx
];
2679 reg
&= NIX_TX_SCHQ_MASK
;
2680 regval
= req
->regval
[idx
];
2681 schq_regbase
= reg
& 0xFFFF;
2682 val_mask
= req
->regval_mask
[idx
];
2684 if (!is_txschq_hierarchy_valid(rvu
, pcifunc
, blkaddr
,
2685 txsch
->lvl
, reg
, regval
))
2686 return NIX_AF_INVAL_TXSCHQ_CFG
;
2688 /* Check if shaping and coloring is supported */
2689 if (!is_txschq_shaping_valid(hw
, req
->lvl
, reg
))
2692 val
= rvu_read64(rvu
, blkaddr
, reg
);
2693 regval
= (val
& val_mask
) | (regval
& ~val_mask
);
2695 /* Handle shaping state toggle specially */
2696 if (hw
->cap
.nix_shaper_toggle_wait
&&
2697 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
2698 req
->lvl
, reg
, regval
))
2701 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2702 if (schq_regbase
== NIX_AF_SMQX_CFG(0)) {
2703 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
],
2705 regval
&= ~(0x7FULL
<< 24);
2706 regval
|= ((u64
)nixlf
<< 24);
2709 /* Clear 'BP_ENA' config, if it's not allowed */
2710 if (!hw
->cap
.nix_tx_link_bp
) {
2711 if (schq_regbase
== NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2712 (schq_regbase
& 0xFF00) ==
2713 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2714 regval
&= ~BIT_ULL(13);
2717 /* Mark config as done for TL1 by PF */
2718 if (schq_regbase
>= NIX_AF_TL1X_SCHEDULE(0) &&
2719 schq_regbase
<= NIX_AF_TL1X_GREEN_BYTES(0)) {
2720 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2721 mutex_lock(&rvu
->rsrc_lock
);
2722 pfvf_map
[schq
] = TXSCH_SET_FLAG(pfvf_map
[schq
],
2723 NIX_TXSCHQ_CFG_DONE
);
2724 mutex_unlock(&rvu
->rsrc_lock
);
2727 /* SMQ flush is special hence split register writes such
2728 * that flush first and write rest of the bits later.
2730 if (schq_regbase
== NIX_AF_SMQX_CFG(0) &&
2731 (regval
& BIT_ULL(49))) {
2732 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2733 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
);
2734 regval
&= ~BIT_ULL(49);
2736 rvu_write64(rvu
, blkaddr
, reg
, regval
);
2742 static int nix_rx_vtag_cfg(struct rvu
*rvu
, int nixlf
, int blkaddr
,
2743 struct nix_vtag_config
*req
)
2745 u64 regval
= req
->vtag_size
;
2747 if (req
->rx
.vtag_type
> NIX_AF_LFX_RX_VTAG_TYPE7
||
2748 req
->vtag_size
> VTAGSIZE_T8
)
2751 /* RX VTAG Type 7 reserved for vf vlan */
2752 if (req
->rx
.vtag_type
== NIX_AF_LFX_RX_VTAG_TYPE7
)
2753 return NIX_AF_ERR_RX_VTAG_INUSE
;
2755 if (req
->rx
.capture_vtag
)
2756 regval
|= BIT_ULL(5);
2757 if (req
->rx
.strip_vtag
)
2758 regval
|= BIT_ULL(4);
2760 rvu_write64(rvu
, blkaddr
,
2761 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf
, req
->rx
.vtag_type
), regval
);
2765 static int nix_tx_vtag_free(struct rvu
*rvu
, int blkaddr
,
2766 u16 pcifunc
, int index
)
2768 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2769 struct nix_txvlan
*vlan
;
2772 return NIX_AF_ERR_INVALID_NIXBLK
;
2774 vlan
= &nix_hw
->txvlan
;
2775 if (vlan
->entry2pfvf_map
[index
] != pcifunc
)
2776 return NIX_AF_ERR_PARAM
;
2778 rvu_write64(rvu
, blkaddr
,
2779 NIX_AF_TX_VTAG_DEFX_DATA(index
), 0x0ull
);
2780 rvu_write64(rvu
, blkaddr
,
2781 NIX_AF_TX_VTAG_DEFX_CTL(index
), 0x0ull
);
2783 vlan
->entry2pfvf_map
[index
] = 0;
2784 rvu_free_rsrc(&vlan
->rsrc
, index
);
2789 static void nix_free_tx_vtag_entries(struct rvu
*rvu
, u16 pcifunc
)
2791 struct nix_txvlan
*vlan
;
2792 struct nix_hw
*nix_hw
;
2795 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2799 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2803 vlan
= &nix_hw
->txvlan
;
2805 mutex_lock(&vlan
->rsrc_lock
);
2806 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2807 for (index
= 0; index
< vlan
->rsrc
.max
; index
++) {
2808 if (vlan
->entry2pfvf_map
[index
] == pcifunc
)
2809 nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, index
);
2811 mutex_unlock(&vlan
->rsrc_lock
);
2814 static int nix_tx_vtag_alloc(struct rvu
*rvu
, int blkaddr
,
2817 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2818 struct nix_txvlan
*vlan
;
2823 return NIX_AF_ERR_INVALID_NIXBLK
;
2825 vlan
= &nix_hw
->txvlan
;
2827 mutex_lock(&vlan
->rsrc_lock
);
2829 index
= rvu_alloc_rsrc(&vlan
->rsrc
);
2831 mutex_unlock(&vlan
->rsrc_lock
);
2835 mutex_unlock(&vlan
->rsrc_lock
);
2837 regval
= size
? vtag
: vtag
<< 32;
2839 rvu_write64(rvu
, blkaddr
,
2840 NIX_AF_TX_VTAG_DEFX_DATA(index
), regval
);
2841 rvu_write64(rvu
, blkaddr
,
2842 NIX_AF_TX_VTAG_DEFX_CTL(index
), size
);
2847 static int nix_tx_vtag_decfg(struct rvu
*rvu
, int blkaddr
,
2848 struct nix_vtag_config
*req
)
2850 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2851 u16 pcifunc
= req
->hdr
.pcifunc
;
2852 int idx0
= req
->tx
.vtag0_idx
;
2853 int idx1
= req
->tx
.vtag1_idx
;
2854 struct nix_txvlan
*vlan
;
2858 return NIX_AF_ERR_INVALID_NIXBLK
;
2860 vlan
= &nix_hw
->txvlan
;
2861 if (req
->tx
.free_vtag0
&& req
->tx
.free_vtag1
)
2862 if (vlan
->entry2pfvf_map
[idx0
] != pcifunc
||
2863 vlan
->entry2pfvf_map
[idx1
] != pcifunc
)
2864 return NIX_AF_ERR_PARAM
;
2866 mutex_lock(&vlan
->rsrc_lock
);
2868 if (req
->tx
.free_vtag0
) {
2869 err
= nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, idx0
);
2874 if (req
->tx
.free_vtag1
)
2875 err
= nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, idx1
);
2878 mutex_unlock(&vlan
->rsrc_lock
);
2882 static int nix_tx_vtag_cfg(struct rvu
*rvu
, int blkaddr
,
2883 struct nix_vtag_config
*req
,
2884 struct nix_vtag_config_rsp
*rsp
)
2886 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2887 struct nix_txvlan
*vlan
;
2888 u16 pcifunc
= req
->hdr
.pcifunc
;
2891 return NIX_AF_ERR_INVALID_NIXBLK
;
2893 vlan
= &nix_hw
->txvlan
;
2894 if (req
->tx
.cfg_vtag0
) {
2896 nix_tx_vtag_alloc(rvu
, blkaddr
,
2897 req
->tx
.vtag0
, req
->vtag_size
);
2899 if (rsp
->vtag0_idx
< 0)
2900 return NIX_AF_ERR_TX_VTAG_NOSPC
;
2902 vlan
->entry2pfvf_map
[rsp
->vtag0_idx
] = pcifunc
;
2905 if (req
->tx
.cfg_vtag1
) {
2907 nix_tx_vtag_alloc(rvu
, blkaddr
,
2908 req
->tx
.vtag1
, req
->vtag_size
);
2910 if (rsp
->vtag1_idx
< 0)
2913 vlan
->entry2pfvf_map
[rsp
->vtag1_idx
] = pcifunc
;
2919 if (req
->tx
.cfg_vtag0
)
2920 nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, rsp
->vtag0_idx
);
2922 return NIX_AF_ERR_TX_VTAG_NOSPC
;
2925 int rvu_mbox_handler_nix_vtag_cfg(struct rvu
*rvu
,
2926 struct nix_vtag_config
*req
,
2927 struct nix_vtag_config_rsp
*rsp
)
2929 u16 pcifunc
= req
->hdr
.pcifunc
;
2930 int blkaddr
, nixlf
, err
;
2932 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
2936 if (req
->cfg_type
) {
2937 /* rx vtag configuration */
2938 err
= nix_rx_vtag_cfg(rvu
, nixlf
, blkaddr
, req
);
2940 return NIX_AF_ERR_PARAM
;
2942 /* tx vtag configuration */
2943 if ((req
->tx
.cfg_vtag0
|| req
->tx
.cfg_vtag1
) &&
2944 (req
->tx
.free_vtag0
|| req
->tx
.free_vtag1
))
2945 return NIX_AF_ERR_PARAM
;
2947 if (req
->tx
.cfg_vtag0
|| req
->tx
.cfg_vtag1
)
2948 return nix_tx_vtag_cfg(rvu
, blkaddr
, req
, rsp
);
2950 if (req
->tx
.free_vtag0
|| req
->tx
.free_vtag1
)
2951 return nix_tx_vtag_decfg(rvu
, blkaddr
, req
);
2957 static int nix_blk_setup_mce(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2958 int mce
, u8 op
, u16 pcifunc
, int next
, bool eol
)
2960 struct nix_aq_enq_req aq_req
;
2963 aq_req
.hdr
.pcifunc
= 0;
2964 aq_req
.ctype
= NIX_AQ_CTYPE_MCE
;
2968 /* Use RSS with RSS index 0 */
2970 aq_req
.mce
.index
= 0;
2971 aq_req
.mce
.eol
= eol
;
2972 aq_req
.mce
.pf_func
= pcifunc
;
2973 aq_req
.mce
.next
= next
;
2975 /* All fields valid */
2976 *(u64
*)(&aq_req
.mce_mask
) = ~0ULL;
2978 err
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
, &aq_req
, NULL
);
2980 dev_err(rvu
->dev
, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2981 rvu_get_pf(pcifunc
), pcifunc
& RVU_PFVF_FUNC_MASK
);
2987 static int nix_update_mce_list_entry(struct nix_mce_list
*mce_list
,
2988 u16 pcifunc
, bool add
)
2990 struct mce
*mce
, *tail
= NULL
;
2991 bool delete = false;
2993 /* Scan through the current list */
2994 hlist_for_each_entry(mce
, &mce_list
->head
, node
) {
2995 /* If already exists, then delete */
2996 if (mce
->pcifunc
== pcifunc
&& !add
) {
2999 } else if (mce
->pcifunc
== pcifunc
&& add
) {
3000 /* entry already exists */
3007 hlist_del(&mce
->node
);
3016 /* Add a new one to the list, at the tail */
3017 mce
= kzalloc(sizeof(*mce
), GFP_KERNEL
);
3020 mce
->pcifunc
= pcifunc
;
3022 hlist_add_head(&mce
->node
, &mce_list
->head
);
3024 hlist_add_behind(&mce
->node
, &tail
->node
);
3029 int nix_update_mce_list(struct rvu
*rvu
, u16 pcifunc
,
3030 struct nix_mce_list
*mce_list
,
3031 int mce_idx
, int mcam_index
, bool add
)
3033 int err
= 0, idx
, next_idx
, last_idx
, blkaddr
, npc_blkaddr
;
3034 struct npc_mcam
*mcam
= &rvu
->hw
->mcam
;
3035 struct nix_mcast
*mcast
;
3036 struct nix_hw
*nix_hw
;
3042 /* Get this PF/VF func's MCE index */
3043 idx
= mce_idx
+ (pcifunc
& RVU_PFVF_FUNC_MASK
);
3045 if (idx
> (mce_idx
+ mce_list
->max
)) {
3047 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3048 __func__
, idx
, mce_list
->max
,
3049 pcifunc
>> RVU_PFVF_PF_SHIFT
);
3053 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
3057 mcast
= &nix_hw
->mcast
;
3058 mutex_lock(&mcast
->mce_lock
);
3060 err
= nix_update_mce_list_entry(mce_list
, pcifunc
, add
);
3064 /* Disable MCAM entry in NPC */
3065 if (!mce_list
->count
) {
3066 npc_blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPC
, 0);
3067 npc_enable_mcam_entry(rvu
, mcam
, npc_blkaddr
, mcam_index
, false);
3071 /* Dump the updated list to HW */
3073 last_idx
= idx
+ mce_list
->count
- 1;
3074 hlist_for_each_entry(mce
, &mce_list
->head
, node
) {
3079 /* EOL should be set in last MCE */
3080 err
= nix_blk_setup_mce(rvu
, nix_hw
, idx
, NIX_AQ_INSTOP_WRITE
,
3081 mce
->pcifunc
, next_idx
,
3082 (next_idx
> last_idx
) ? true : false);
3089 mutex_unlock(&mcast
->mce_lock
);
3093 void nix_get_mce_list(struct rvu
*rvu
, u16 pcifunc
, int type
,
3094 struct nix_mce_list
**mce_list
, int *mce_idx
)
3096 struct rvu_hwinfo
*hw
= rvu
->hw
;
3097 struct rvu_pfvf
*pfvf
;
3099 if (!hw
->cap
.nix_rx_multicast
||
3100 !is_pf_cgxmapped(rvu
, rvu_get_pf(pcifunc
& ~RVU_PFVF_FUNC_MASK
))) {
3106 /* Get this PF/VF func's MCE index */
3107 pfvf
= rvu_get_pfvf(rvu
, pcifunc
& ~RVU_PFVF_FUNC_MASK
);
3109 if (type
== NIXLF_BCAST_ENTRY
) {
3110 *mce_list
= &pfvf
->bcast_mce_list
;
3111 *mce_idx
= pfvf
->bcast_mce_idx
;
3112 } else if (type
== NIXLF_ALLMULTI_ENTRY
) {
3113 *mce_list
= &pfvf
->mcast_mce_list
;
3114 *mce_idx
= pfvf
->mcast_mce_idx
;
3115 } else if (type
== NIXLF_PROMISC_ENTRY
) {
3116 *mce_list
= &pfvf
->promisc_mce_list
;
3117 *mce_idx
= pfvf
->promisc_mce_idx
;
3124 static int nix_update_mce_rule(struct rvu
*rvu
, u16 pcifunc
,
3127 int err
= 0, nixlf
, blkaddr
, mcam_index
, mce_idx
;
3128 struct npc_mcam
*mcam
= &rvu
->hw
->mcam
;
3129 struct rvu_hwinfo
*hw
= rvu
->hw
;
3130 struct nix_mce_list
*mce_list
;
3133 /* skip multicast pkt replication for AF's VFs & SDP links */
3134 if (is_afvf(pcifunc
) || is_sdp_pfvf(pcifunc
))
3137 if (!hw
->cap
.nix_rx_multicast
)
3140 pf
= rvu_get_pf(pcifunc
);
3141 if (!is_pf_cgxmapped(rvu
, pf
))
3144 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
3148 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
3152 nix_get_mce_list(rvu
, pcifunc
, type
, &mce_list
, &mce_idx
);
3154 mcam_index
= npc_get_nixlf_mcam_index(mcam
,
3155 pcifunc
& ~RVU_PFVF_FUNC_MASK
,
3157 err
= nix_update_mce_list(rvu
, pcifunc
, mce_list
,
3158 mce_idx
, mcam_index
, add
);
3162 static int nix_setup_mce_tables(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
3164 struct nix_mcast
*mcast
= &nix_hw
->mcast
;
3165 int err
, pf
, numvfs
, idx
;
3166 struct rvu_pfvf
*pfvf
;
3170 /* Skip PF0 (i.e AF) */
3171 for (pf
= 1; pf
< (rvu
->cgx_mapped_pfs
+ 1); pf
++) {
3172 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
3173 /* If PF is not enabled, nothing to do */
3174 if (!((cfg
>> 20) & 0x01))
3176 /* Get numVFs attached to this PF */
3177 numvfs
= (cfg
>> 12) & 0xFF;
3179 pfvf
= &rvu
->pf
[pf
];
3181 /* This NIX0/1 block mapped to PF ? */
3182 if (pfvf
->nix_blkaddr
!= nix_hw
->blkaddr
)
3185 /* save start idx of broadcast mce list */
3186 pfvf
->bcast_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
3187 nix_mce_list_init(&pfvf
->bcast_mce_list
, numvfs
+ 1);
3189 /* save start idx of multicast mce list */
3190 pfvf
->mcast_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
3191 nix_mce_list_init(&pfvf
->mcast_mce_list
, numvfs
+ 1);
3193 /* save the start idx of promisc mce list */
3194 pfvf
->promisc_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
3195 nix_mce_list_init(&pfvf
->promisc_mce_list
, numvfs
+ 1);
3197 for (idx
= 0; idx
< (numvfs
+ 1); idx
++) {
3198 /* idx-0 is for PF, followed by VFs */
3199 pcifunc
= (pf
<< RVU_PFVF_PF_SHIFT
);
3201 /* Add dummy entries now, so that we don't have to check
3202 * for whether AQ_OP should be INIT/WRITE later on.
3203 * Will be updated when a NIXLF is attached/detached to
3206 err
= nix_blk_setup_mce(rvu
, nix_hw
,
3207 pfvf
->bcast_mce_idx
+ idx
,
3213 /* add dummy entries to multicast mce list */
3214 err
= nix_blk_setup_mce(rvu
, nix_hw
,
3215 pfvf
->mcast_mce_idx
+ idx
,
3221 /* add dummy entries to promisc mce list */
3222 err
= nix_blk_setup_mce(rvu
, nix_hw
,
3223 pfvf
->promisc_mce_idx
+ idx
,
3233 static int nix_setup_mcast(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
3235 struct nix_mcast
*mcast
= &nix_hw
->mcast
;
3236 struct rvu_hwinfo
*hw
= rvu
->hw
;
3239 size
= (rvu_read64(rvu
, blkaddr
, NIX_AF_CONST3
) >> 16) & 0x0F;
3240 size
= (1ULL << size
);
3242 /* Alloc memory for multicast/mirror replication entries */
3243 err
= qmem_alloc(rvu
->dev
, &mcast
->mce_ctx
,
3244 (256UL << MC_TBL_SIZE
), size
);
3248 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BASE
,
3249 (u64
)mcast
->mce_ctx
->iova
);
3251 /* Set max list length equal to max no of VFs per PF + PF itself */
3252 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_CFG
,
3253 BIT_ULL(36) | (hw
->max_vfs_per_pf
<< 4) | MC_TBL_SIZE
);
3255 /* Alloc memory for multicast replication buffers */
3256 size
= rvu_read64(rvu
, blkaddr
, NIX_AF_MC_MIRROR_CONST
) & 0xFFFF;
3257 err
= qmem_alloc(rvu
->dev
, &mcast
->mcast_buf
,
3258 (8UL << MC_BUF_CNT
), size
);
3262 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BUF_BASE
,
3263 (u64
)mcast
->mcast_buf
->iova
);
3265 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3266 mcast
->replay_pkind
= rvu_alloc_rsrc(&hw
->pkind
.rsrc
);
3268 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BUF_CFG
,
3269 BIT_ULL(63) | (mcast
->replay_pkind
<< 24) |
3270 BIT_ULL(20) | MC_BUF_CNT
);
3272 mutex_init(&mcast
->mce_lock
);
3274 return nix_setup_mce_tables(rvu
, nix_hw
);
3277 static int nix_setup_txvlan(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
3279 struct nix_txvlan
*vlan
= &nix_hw
->txvlan
;
3282 /* Allocate resource bimap for tx vtag def registers*/
3283 vlan
->rsrc
.max
= NIX_TX_VTAG_DEF_MAX
;
3284 err
= rvu_alloc_bitmap(&vlan
->rsrc
);
3288 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3289 vlan
->entry2pfvf_map
= devm_kcalloc(rvu
->dev
, vlan
->rsrc
.max
,
3290 sizeof(u16
), GFP_KERNEL
);
3291 if (!vlan
->entry2pfvf_map
)
3294 mutex_init(&vlan
->rsrc_lock
);
3298 kfree(vlan
->rsrc
.bmap
);
3302 static int nix_setup_txschq(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
3304 struct nix_txsch
*txsch
;
3308 /* Get scheduler queue count of each type and alloc
3309 * bitmap for each for alloc/free/attach operations.
3311 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
3312 txsch
= &nix_hw
->txsch
[lvl
];
3315 case NIX_TXSCH_LVL_SMQ
:
3316 reg
= NIX_AF_MDQ_CONST
;
3318 case NIX_TXSCH_LVL_TL4
:
3319 reg
= NIX_AF_TL4_CONST
;
3321 case NIX_TXSCH_LVL_TL3
:
3322 reg
= NIX_AF_TL3_CONST
;
3324 case NIX_TXSCH_LVL_TL2
:
3325 reg
= NIX_AF_TL2_CONST
;
3327 case NIX_TXSCH_LVL_TL1
:
3328 reg
= NIX_AF_TL1_CONST
;
3331 cfg
= rvu_read64(rvu
, blkaddr
, reg
);
3332 txsch
->schq
.max
= cfg
& 0xFFFF;
3333 err
= rvu_alloc_bitmap(&txsch
->schq
);
3337 /* Allocate memory for scheduler queues to
3338 * PF/VF pcifunc mapping info.
3340 txsch
->pfvf_map
= devm_kcalloc(rvu
->dev
, txsch
->schq
.max
,
3341 sizeof(u32
), GFP_KERNEL
);
3342 if (!txsch
->pfvf_map
)
3344 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++)
3345 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
3348 /* Setup a default value of 8192 as DWRR MTU */
3349 if (rvu
->hw
->cap
.nix_common_dwrr_mtu
||
3350 rvu
->hw
->cap
.nix_multiple_dwrr_mtu
) {
3351 rvu_write64(rvu
, blkaddr
,
3352 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_RPM
),
3353 convert_bytes_to_dwrr_mtu(8192));
3354 rvu_write64(rvu
, blkaddr
,
3355 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_LBK
),
3356 convert_bytes_to_dwrr_mtu(8192));
3357 rvu_write64(rvu
, blkaddr
,
3358 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_SDP
),
3359 convert_bytes_to_dwrr_mtu(8192));
3365 int rvu_nix_reserve_mark_format(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
3366 int blkaddr
, u32 cfg
)
3370 for (fmt_idx
= 0; fmt_idx
< nix_hw
->mark_format
.in_use
; fmt_idx
++) {
3371 if (nix_hw
->mark_format
.cfg
[fmt_idx
] == cfg
)
3374 if (fmt_idx
>= nix_hw
->mark_format
.total
)
3377 rvu_write64(rvu
, blkaddr
, NIX_AF_MARK_FORMATX_CTL(fmt_idx
), cfg
);
3378 nix_hw
->mark_format
.cfg
[fmt_idx
] = cfg
;
3379 nix_hw
->mark_format
.in_use
++;
3383 static int nix_af_mark_format_setup(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
3387 [NIX_MARK_CFG_IP_DSCP_RED
] = 0x10003,
3388 [NIX_MARK_CFG_IP_DSCP_YELLOW
] = 0x11200,
3389 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED
] = 0x11203,
3390 [NIX_MARK_CFG_IP_ECN_RED
] = 0x6000c,
3391 [NIX_MARK_CFG_IP_ECN_YELLOW
] = 0x60c00,
3392 [NIX_MARK_CFG_IP_ECN_YELLOW_RED
] = 0x60c0c,
3393 [NIX_MARK_CFG_VLAN_DEI_RED
] = 0x30008,
3394 [NIX_MARK_CFG_VLAN_DEI_YELLOW
] = 0x30800,
3395 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED
] = 0x30808,
3400 total
= (rvu_read64(rvu
, blkaddr
, NIX_AF_PSE_CONST
) & 0xFF00) >> 8;
3401 nix_hw
->mark_format
.total
= (u8
)total
;
3402 nix_hw
->mark_format
.cfg
= devm_kcalloc(rvu
->dev
, total
, sizeof(u32
),
3404 if (!nix_hw
->mark_format
.cfg
)
3406 for (i
= 0; i
< NIX_MARK_CFG_MAX
; i
++) {
3407 rc
= rvu_nix_reserve_mark_format(rvu
, nix_hw
, blkaddr
, cfgs
[i
]);
3409 dev_err(rvu
->dev
, "Err %d in setup mark format %d\n",
3416 static void rvu_get_lbk_link_max_frs(struct rvu
*rvu
, u16
*max_mtu
)
3418 /* CN10K supports LBK FIFO size 72 KB */
3419 if (rvu
->hw
->lbk_bufsize
== 0x12000)
3420 *max_mtu
= CN10K_LBK_LINK_MAX_FRS
;
3422 *max_mtu
= NIC_HW_MAX_FRS
;
3425 static void rvu_get_lmac_link_max_frs(struct rvu
*rvu
, u16
*max_mtu
)
3427 int fifo_size
= rvu_cgx_get_fifolen(rvu
);
3429 /* RPM supports FIFO len 128 KB and RPM2 supports double the
3430 * FIFO len to accommodate 8 LMACS
3432 if (fifo_size
== 0x20000 || fifo_size
== 0x40000)
3433 *max_mtu
= CN10K_LMAC_LINK_MAX_FRS
;
3435 *max_mtu
= NIC_HW_MAX_FRS
;
3438 int rvu_mbox_handler_nix_get_hw_info(struct rvu
*rvu
, struct msg_req
*req
,
3439 struct nix_hw_info
*rsp
)
3441 u16 pcifunc
= req
->hdr
.pcifunc
;
3445 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
3447 return NIX_AF_ERR_AF_LF_INVALID
;
3449 if (is_afvf(pcifunc
))
3450 rvu_get_lbk_link_max_frs(rvu
, &rsp
->max_mtu
);
3452 rvu_get_lmac_link_max_frs(rvu
, &rsp
->max_mtu
);
3454 rsp
->min_mtu
= NIC_HW_MIN_FRS
;
3456 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
&&
3457 !rvu
->hw
->cap
.nix_multiple_dwrr_mtu
) {
3458 /* Return '1' on OTx2 */
3459 rsp
->rpm_dwrr_mtu
= 1;
3460 rsp
->sdp_dwrr_mtu
= 1;
3461 rsp
->lbk_dwrr_mtu
= 1;
3465 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3466 dwrr_mtu
= rvu_read64(rvu
, blkaddr
,
3467 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_RPM
));
3468 rsp
->rpm_dwrr_mtu
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
3470 dwrr_mtu
= rvu_read64(rvu
, blkaddr
,
3471 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_SDP
));
3472 rsp
->sdp_dwrr_mtu
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
3474 dwrr_mtu
= rvu_read64(rvu
, blkaddr
,
3475 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_LBK
));
3476 rsp
->lbk_dwrr_mtu
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
3481 int rvu_mbox_handler_nix_stats_rst(struct rvu
*rvu
, struct msg_req
*req
,
3482 struct msg_rsp
*rsp
)
3484 u16 pcifunc
= req
->hdr
.pcifunc
;
3485 int i
, nixlf
, blkaddr
, err
;
3488 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3492 /* Get stats count supported by HW */
3493 stats
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
3495 /* Reset tx stats */
3496 for (i
= 0; i
< ((stats
>> 24) & 0xFF); i
++)
3497 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_STATX(nixlf
, i
), 0);
3499 /* Reset rx stats */
3500 for (i
= 0; i
< ((stats
>> 32) & 0xFF); i
++)
3501 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_STATX(nixlf
, i
), 0);
3506 /* Returns the ALG index to be set into NPC_RX_ACTION */
3507 static int get_flowkey_alg_idx(struct nix_hw
*nix_hw
, u32 flow_cfg
)
3511 /* Scan over exiting algo entries to find a match */
3512 for (i
= 0; i
< nix_hw
->flowkey
.in_use
; i
++)
3513 if (nix_hw
->flowkey
.flowkey
[i
] == flow_cfg
)
3519 static int set_flowkey_fields(struct nix_rx_flowkey_alg
*alg
, u32 flow_cfg
)
3521 int idx
, nr_field
, key_off
, field_marker
, keyoff_marker
;
3522 int max_key_off
, max_bit_pos
, group_member
;
3523 struct nix_rx_flowkey_alg
*field
;
3524 struct nix_rx_flowkey_alg tmp
;
3525 u32 key_type
, valid_key
;
3527 int l4_key_offset
= 0;
3532 #define FIELDS_PER_ALG 5
3533 #define MAX_KEY_OFF 40
3534 /* Clear all fields */
3535 memset(alg
, 0, sizeof(uint64_t) * FIELDS_PER_ALG
);
3537 /* Each of the 32 possible flow key algorithm definitions should
3538 * fall into above incremental config (except ALG0). Otherwise a
3539 * single NPC MCAM entry is not sufficient for supporting RSS.
3541 * If a different definition or combination needed then NPC MCAM
3542 * has to be programmed to filter such pkts and it's action should
3543 * point to this definition to calculate flowtag or hash.
3545 * The `for loop` goes over _all_ protocol field and the following
3546 * variables depicts the state machine forward progress logic.
3548 * keyoff_marker - Enabled when hash byte length needs to be accounted
3549 * in field->key_offset update.
3550 * field_marker - Enabled when a new field needs to be selected.
3551 * group_member - Enabled when protocol is part of a group.
3554 /* Last 4 bits (31:28) are reserved to specify SRC, DST
3555 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3556 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3557 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3559 l3_l4_src_dst
= flow_cfg
;
3560 /* Reset these 4 bits, so that these won't be part of key */
3561 flow_cfg
&= NIX_FLOW_KEY_TYPE_L3_L4_MASK
;
3563 keyoff_marker
= 0; max_key_off
= 0; group_member
= 0;
3564 nr_field
= 0; key_off
= 0; field_marker
= 1;
3565 field
= &tmp
; max_bit_pos
= fls(flow_cfg
);
3567 idx
< max_bit_pos
&& nr_field
< FIELDS_PER_ALG
&&
3568 key_off
< MAX_KEY_OFF
; idx
++) {
3569 key_type
= BIT(idx
);
3570 valid_key
= flow_cfg
& key_type
;
3571 /* Found a field marker, reset the field values */
3573 memset(&tmp
, 0, sizeof(tmp
));
3575 field_marker
= true;
3576 keyoff_marker
= true;
3578 case NIX_FLOW_KEY_TYPE_PORT
:
3579 field
->sel_chan
= true;
3580 /* This should be set to 1, when SEL_CHAN is set */
3583 case NIX_FLOW_KEY_TYPE_IPV4_PROTO
:
3584 field
->lid
= NPC_LID_LC
;
3585 field
->hdr_offset
= 9; /* offset */
3586 field
->bytesm1
= 0; /* 1 byte */
3587 field
->ltype_match
= NPC_LT_LC_IP
;
3588 field
->ltype_mask
= 0xF;
3590 case NIX_FLOW_KEY_TYPE_IPV4
:
3591 case NIX_FLOW_KEY_TYPE_INNR_IPV4
:
3592 field
->lid
= NPC_LID_LC
;
3593 field
->ltype_match
= NPC_LT_LC_IP
;
3594 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_IPV4
) {
3595 field
->lid
= NPC_LID_LG
;
3596 field
->ltype_match
= NPC_LT_LG_TU_IP
;
3598 field
->hdr_offset
= 12; /* SIP offset */
3599 field
->bytesm1
= 7; /* SIP + DIP, 8 bytes */
3602 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L3_SRC_ONLY
)
3603 field
->bytesm1
= 3; /* SIP, 4 bytes */
3605 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L3_DST_ONLY
) {
3606 /* Both SIP + DIP */
3607 if (field
->bytesm1
== 3) {
3608 field
->bytesm1
= 7; /* SIP + DIP, 8B */
3611 field
->hdr_offset
= 16; /* DIP off */
3612 field
->bytesm1
= 3; /* DIP, 4 bytes */
3616 field
->ltype_mask
= 0xF; /* Match only IPv4 */
3617 keyoff_marker
= false;
3619 case NIX_FLOW_KEY_TYPE_IPV6
:
3620 case NIX_FLOW_KEY_TYPE_INNR_IPV6
:
3621 field
->lid
= NPC_LID_LC
;
3622 field
->ltype_match
= NPC_LT_LC_IP6
;
3623 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_IPV6
) {
3624 field
->lid
= NPC_LID_LG
;
3625 field
->ltype_match
= NPC_LT_LG_TU_IP6
;
3627 field
->hdr_offset
= 8; /* SIP offset */
3628 field
->bytesm1
= 31; /* SIP + DIP, 32 bytes */
3631 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L3_SRC_ONLY
)
3632 field
->bytesm1
= 15; /* SIP, 16 bytes */
3634 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L3_DST_ONLY
) {
3635 /* Both SIP + DIP */
3636 if (field
->bytesm1
== 15) {
3637 /* SIP + DIP, 32 bytes */
3638 field
->bytesm1
= 31;
3641 field
->hdr_offset
= 24; /* DIP off */
3642 field
->bytesm1
= 15; /* DIP,16 bytes */
3645 field
->ltype_mask
= 0xF; /* Match only IPv6 */
3647 case NIX_FLOW_KEY_TYPE_TCP
:
3648 case NIX_FLOW_KEY_TYPE_UDP
:
3649 case NIX_FLOW_KEY_TYPE_SCTP
:
3650 case NIX_FLOW_KEY_TYPE_INNR_TCP
:
3651 case NIX_FLOW_KEY_TYPE_INNR_UDP
:
3652 case NIX_FLOW_KEY_TYPE_INNR_SCTP
:
3653 field
->lid
= NPC_LID_LD
;
3654 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_TCP
||
3655 key_type
== NIX_FLOW_KEY_TYPE_INNR_UDP
||
3656 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
)
3657 field
->lid
= NPC_LID_LH
;
3658 field
->bytesm1
= 3; /* Sport + Dport, 4 bytes */
3660 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L4_SRC_ONLY
)
3661 field
->bytesm1
= 1; /* SRC, 2 bytes */
3663 if (l3_l4_src_dst
& NIX_FLOW_KEY_TYPE_L4_DST_ONLY
) {
3664 /* Both SRC + DST */
3665 if (field
->bytesm1
== 1) {
3666 /* SRC + DST, 4 bytes */
3670 field
->hdr_offset
= 2; /* DST off */
3671 field
->bytesm1
= 1; /* DST, 2 bytes */
3675 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3676 * so no need to change the ltype_match, just change
3677 * the lid for inner protocols
3679 BUILD_BUG_ON((int)NPC_LT_LD_TCP
!=
3680 (int)NPC_LT_LH_TU_TCP
);
3681 BUILD_BUG_ON((int)NPC_LT_LD_UDP
!=
3682 (int)NPC_LT_LH_TU_UDP
);
3683 BUILD_BUG_ON((int)NPC_LT_LD_SCTP
!=
3684 (int)NPC_LT_LH_TU_SCTP
);
3686 if ((key_type
== NIX_FLOW_KEY_TYPE_TCP
||
3687 key_type
== NIX_FLOW_KEY_TYPE_INNR_TCP
) &&
3689 field
->ltype_match
|= NPC_LT_LD_TCP
;
3690 group_member
= true;
3691 } else if ((key_type
== NIX_FLOW_KEY_TYPE_UDP
||
3692 key_type
== NIX_FLOW_KEY_TYPE_INNR_UDP
) &&
3694 field
->ltype_match
|= NPC_LT_LD_UDP
;
3695 group_member
= true;
3696 } else if ((key_type
== NIX_FLOW_KEY_TYPE_SCTP
||
3697 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
) &&
3699 field
->ltype_match
|= NPC_LT_LD_SCTP
;
3700 group_member
= true;
3702 field
->ltype_mask
= ~field
->ltype_match
;
3703 if (key_type
== NIX_FLOW_KEY_TYPE_SCTP
||
3704 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
) {
3705 /* Handle the case where any of the group item
3706 * is enabled in the group but not the final one
3710 group_member
= false;
3713 field_marker
= false;
3714 keyoff_marker
= false;
3717 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3718 * remember the TCP key offset of 40 byte hash key.
3720 if (key_type
== NIX_FLOW_KEY_TYPE_TCP
)
3721 l4_key_offset
= key_off
;
3723 case NIX_FLOW_KEY_TYPE_NVGRE
:
3724 field
->lid
= NPC_LID_LD
;
3725 field
->hdr_offset
= 4; /* VSID offset */
3727 field
->ltype_match
= NPC_LT_LD_NVGRE
;
3728 field
->ltype_mask
= 0xF;
3730 case NIX_FLOW_KEY_TYPE_VXLAN
:
3731 case NIX_FLOW_KEY_TYPE_GENEVE
:
3732 field
->lid
= NPC_LID_LE
;
3734 field
->hdr_offset
= 4;
3735 field
->ltype_mask
= 0xF;
3736 field_marker
= false;
3737 keyoff_marker
= false;
3739 if (key_type
== NIX_FLOW_KEY_TYPE_VXLAN
&& valid_key
) {
3740 field
->ltype_match
|= NPC_LT_LE_VXLAN
;
3741 group_member
= true;
3744 if (key_type
== NIX_FLOW_KEY_TYPE_GENEVE
&& valid_key
) {
3745 field
->ltype_match
|= NPC_LT_LE_GENEVE
;
3746 group_member
= true;
3749 if (key_type
== NIX_FLOW_KEY_TYPE_GENEVE
) {
3751 field
->ltype_mask
= ~field
->ltype_match
;
3752 field_marker
= true;
3753 keyoff_marker
= true;
3755 group_member
= false;
3759 case NIX_FLOW_KEY_TYPE_ETH_DMAC
:
3760 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC
:
3761 field
->lid
= NPC_LID_LA
;
3762 field
->ltype_match
= NPC_LT_LA_ETHER
;
3763 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC
) {
3764 field
->lid
= NPC_LID_LF
;
3765 field
->ltype_match
= NPC_LT_LF_TU_ETHER
;
3767 field
->hdr_offset
= 0;
3768 field
->bytesm1
= 5; /* DMAC 6 Byte */
3769 field
->ltype_mask
= 0xF;
3771 case NIX_FLOW_KEY_TYPE_IPV6_EXT
:
3772 field
->lid
= NPC_LID_LC
;
3773 field
->hdr_offset
= 40; /* IPV6 hdr */
3774 field
->bytesm1
= 0; /* 1 Byte ext hdr*/
3775 field
->ltype_match
= NPC_LT_LC_IP6_EXT
;
3776 field
->ltype_mask
= 0xF;
3778 case NIX_FLOW_KEY_TYPE_GTPU
:
3779 field
->lid
= NPC_LID_LE
;
3780 field
->hdr_offset
= 4;
3781 field
->bytesm1
= 3; /* 4 bytes TID*/
3782 field
->ltype_match
= NPC_LT_LE_GTPU
;
3783 field
->ltype_mask
= 0xF;
3785 case NIX_FLOW_KEY_TYPE_VLAN
:
3786 field
->lid
= NPC_LID_LB
;
3787 field
->hdr_offset
= 2; /* Skip TPID (2-bytes) */
3788 field
->bytesm1
= 1; /* 2 Bytes (Actually 12 bits) */
3789 field
->ltype_match
= NPC_LT_LB_CTAG
;
3790 field
->ltype_mask
= 0xF;
3791 field
->fn_mask
= 1; /* Mask out the first nibble */
3793 case NIX_FLOW_KEY_TYPE_AH
:
3794 case NIX_FLOW_KEY_TYPE_ESP
:
3795 field
->hdr_offset
= 0;
3796 field
->bytesm1
= 7; /* SPI + sequence number */
3797 field
->ltype_mask
= 0xF;
3798 field
->lid
= NPC_LID_LE
;
3799 field
->ltype_match
= NPC_LT_LE_ESP
;
3800 if (key_type
== NIX_FLOW_KEY_TYPE_AH
) {
3801 field
->lid
= NPC_LID_LD
;
3802 field
->ltype_match
= NPC_LT_LD_AH
;
3803 field
->hdr_offset
= 4;
3804 keyoff_marker
= false;
3810 /* Found a valid flow key type */
3812 /* Use the key offset of TCP/UDP/SCTP fields
3813 * for ESP/AH fields.
3815 if (key_type
== NIX_FLOW_KEY_TYPE_ESP
||
3816 key_type
== NIX_FLOW_KEY_TYPE_AH
)
3817 key_off
= l4_key_offset
;
3818 field
->key_offset
= key_off
;
3819 memcpy(&alg
[nr_field
], field
, sizeof(*field
));
3820 max_key_off
= max(max_key_off
, field
->bytesm1
+ 1);
3822 /* Found a field marker, get the next field */
3827 /* Found a keyoff marker, update the new key_off */
3828 if (keyoff_marker
) {
3829 key_off
+= max_key_off
;
3833 /* Processed all the flow key types */
3834 if (idx
== max_bit_pos
&& key_off
<= MAX_KEY_OFF
)
3837 return NIX_AF_ERR_RSS_NOSPC_FIELD
;
3840 static int reserve_flowkey_alg_idx(struct rvu
*rvu
, int blkaddr
, u32 flow_cfg
)
3842 u64 field
[FIELDS_PER_ALG
];
3846 hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3848 return NIX_AF_ERR_INVALID_NIXBLK
;
3850 /* No room to add new flow hash algoritham */
3851 if (hw
->flowkey
.in_use
>= NIX_FLOW_KEY_ALG_MAX
)
3852 return NIX_AF_ERR_RSS_NOSPC_ALGO
;
3854 /* Generate algo fields for the given flow_cfg */
3855 rc
= set_flowkey_fields((struct nix_rx_flowkey_alg
*)field
, flow_cfg
);
3859 /* Update ALGX_FIELDX register with generated fields */
3860 for (fid
= 0; fid
< FIELDS_PER_ALG
; fid
++)
3861 rvu_write64(rvu
, blkaddr
,
3862 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw
->flowkey
.in_use
,
3865 /* Store the flow_cfg for futher lookup */
3866 rc
= hw
->flowkey
.in_use
;
3867 hw
->flowkey
.flowkey
[rc
] = flow_cfg
;
3868 hw
->flowkey
.in_use
++;
3873 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu
*rvu
,
3874 struct nix_rss_flowkey_cfg
*req
,
3875 struct nix_rss_flowkey_cfg_rsp
*rsp
)
3877 u16 pcifunc
= req
->hdr
.pcifunc
;
3878 int alg_idx
, nixlf
, blkaddr
;
3879 struct nix_hw
*nix_hw
;
3882 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3886 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3888 return NIX_AF_ERR_INVALID_NIXBLK
;
3890 alg_idx
= get_flowkey_alg_idx(nix_hw
, req
->flowkey_cfg
);
3891 /* Failed to get algo index from the exiting list, reserve new */
3893 alg_idx
= reserve_flowkey_alg_idx(rvu
, blkaddr
,
3898 rsp
->alg_idx
= alg_idx
;
3899 rvu_npc_update_flowkey_alg_idx(rvu
, pcifunc
, nixlf
, req
->group
,
3900 alg_idx
, req
->mcam_index
);
3904 static int nix_rx_flowkey_alg_cfg(struct rvu
*rvu
, int blkaddr
)
3906 u32 flowkey_cfg
, minkey_cfg
;
3909 /* Disable all flow key algx fieldx */
3910 for (alg
= 0; alg
< NIX_FLOW_KEY_ALG_MAX
; alg
++) {
3911 for (fid
= 0; fid
< FIELDS_PER_ALG
; fid
++)
3912 rvu_write64(rvu
, blkaddr
,
3913 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg
, fid
),
3917 /* IPv4/IPv6 SIP/DIPs */
3918 flowkey_cfg
= NIX_FLOW_KEY_TYPE_IPV4
| NIX_FLOW_KEY_TYPE_IPV6
;
3919 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3923 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3924 minkey_cfg
= flowkey_cfg
;
3925 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
;
3926 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3930 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3931 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_UDP
;
3932 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3936 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3937 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_SCTP
;
3938 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3942 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3943 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3944 NIX_FLOW_KEY_TYPE_UDP
;
3945 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3949 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3950 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3951 NIX_FLOW_KEY_TYPE_SCTP
;
3952 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3956 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3957 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_UDP
|
3958 NIX_FLOW_KEY_TYPE_SCTP
;
3959 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3963 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3964 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3965 NIX_FLOW_KEY_TYPE_UDP
| NIX_FLOW_KEY_TYPE_SCTP
;
3966 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3973 int rvu_mbox_handler_nix_set_mac_addr(struct rvu
*rvu
,
3974 struct nix_set_mac_addr
*req
,
3975 struct msg_rsp
*rsp
)
3977 bool from_vf
= req
->hdr
.pcifunc
& RVU_PFVF_FUNC_MASK
;
3978 u16 pcifunc
= req
->hdr
.pcifunc
;
3979 int blkaddr
, nixlf
, err
;
3980 struct rvu_pfvf
*pfvf
;
3982 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3986 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3988 /* untrusted VF can't overwrite admin(PF) changes */
3989 if (!test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) &&
3990 (from_vf
&& test_bit(PF_SET_VF_MAC
, &pfvf
->flags
))) {
3992 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3996 ether_addr_copy(pfvf
->mac_addr
, req
->mac_addr
);
3998 rvu_npc_install_ucast_entry(rvu
, pcifunc
, nixlf
,
3999 pfvf
->rx_chan_base
, req
->mac_addr
);
4001 if (test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) && from_vf
)
4002 ether_addr_copy(pfvf
->default_mac
, req
->mac_addr
);
4004 rvu_switch_update_rules(rvu
, pcifunc
);
4009 int rvu_mbox_handler_nix_get_mac_addr(struct rvu
*rvu
,
4010 struct msg_req
*req
,
4011 struct nix_get_mac_addr_rsp
*rsp
)
4013 u16 pcifunc
= req
->hdr
.pcifunc
;
4014 struct rvu_pfvf
*pfvf
;
4016 if (!is_nixlf_attached(rvu
, pcifunc
))
4017 return NIX_AF_ERR_AF_LF_INVALID
;
4019 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4021 ether_addr_copy(rsp
->mac_addr
, pfvf
->mac_addr
);
4026 int rvu_mbox_handler_nix_set_rx_mode(struct rvu
*rvu
, struct nix_rx_mode
*req
,
4027 struct msg_rsp
*rsp
)
4029 bool allmulti
, promisc
, nix_rx_multicast
;
4030 u16 pcifunc
= req
->hdr
.pcifunc
;
4031 struct rvu_pfvf
*pfvf
;
4034 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4035 promisc
= req
->mode
& NIX_RX_MODE_PROMISC
? true : false;
4036 allmulti
= req
->mode
& NIX_RX_MODE_ALLMULTI
? true : false;
4037 pfvf
->use_mce_list
= req
->mode
& NIX_RX_MODE_USE_MCE
? true : false;
4039 nix_rx_multicast
= rvu
->hw
->cap
.nix_rx_multicast
& pfvf
->use_mce_list
;
4041 if (is_vf(pcifunc
) && !nix_rx_multicast
&&
4042 (promisc
|| allmulti
)) {
4043 dev_warn_ratelimited(rvu
->dev
,
4044 "VF promisc/multicast not supported\n");
4048 /* untrusted VF can't configure promisc/allmulti */
4049 if (is_vf(pcifunc
) && !test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) &&
4050 (promisc
|| allmulti
))
4053 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
4057 if (nix_rx_multicast
) {
4058 /* add/del this PF_FUNC to/from mcast pkt replication list */
4059 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_ALLMULTI_ENTRY
,
4063 "Failed to update pcifunc 0x%x to multicast list\n",
4068 /* add/del this PF_FUNC to/from promisc pkt replication list */
4069 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_PROMISC_ENTRY
,
4073 "Failed to update pcifunc 0x%x to promisc list\n",
4079 /* install/uninstall allmulti entry */
4081 rvu_npc_install_allmulti_entry(rvu
, pcifunc
, nixlf
,
4082 pfvf
->rx_chan_base
);
4084 if (!nix_rx_multicast
)
4085 rvu_npc_enable_allmulti_entry(rvu
, pcifunc
, nixlf
, false);
4088 /* install/uninstall promisc entry */
4090 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
4094 if (!nix_rx_multicast
)
4095 rvu_npc_enable_promisc_entry(rvu
, pcifunc
, nixlf
, false);
4100 static void nix_find_link_frs(struct rvu
*rvu
,
4101 struct nix_frs_cfg
*req
, u16 pcifunc
)
4103 int pf
= rvu_get_pf(pcifunc
);
4104 struct rvu_pfvf
*pfvf
;
4109 /* Update with requester's min/max lengths */
4110 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4111 pfvf
->maxlen
= req
->maxlen
;
4112 if (req
->update_minlen
)
4113 pfvf
->minlen
= req
->minlen
;
4115 maxlen
= req
->maxlen
;
4116 minlen
= req
->update_minlen
? req
->minlen
: 0;
4118 /* Get this PF's numVFs and starting hwvf */
4119 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
4121 /* For each VF, compare requested max/minlen */
4122 for (vf
= 0; vf
< numvfs
; vf
++) {
4123 pfvf
= &rvu
->hwvf
[hwvf
+ vf
];
4124 if (pfvf
->maxlen
> maxlen
)
4125 maxlen
= pfvf
->maxlen
;
4126 if (req
->update_minlen
&&
4127 pfvf
->minlen
&& pfvf
->minlen
< minlen
)
4128 minlen
= pfvf
->minlen
;
4131 /* Compare requested max/minlen with PF's max/minlen */
4132 pfvf
= &rvu
->pf
[pf
];
4133 if (pfvf
->maxlen
> maxlen
)
4134 maxlen
= pfvf
->maxlen
;
4135 if (req
->update_minlen
&&
4136 pfvf
->minlen
&& pfvf
->minlen
< minlen
)
4137 minlen
= pfvf
->minlen
;
4139 /* Update the request with max/min PF's and it's VF's max/min */
4140 req
->maxlen
= maxlen
;
4141 if (req
->update_minlen
)
4142 req
->minlen
= minlen
;
4146 nix_config_link_credits(struct rvu
*rvu
, int blkaddr
, int link
,
4147 u16 pcifunc
, u64 tx_credits
)
4149 struct rvu_hwinfo
*hw
= rvu
->hw
;
4150 int pf
= rvu_get_pf(pcifunc
);
4151 u8 cgx_id
= 0, lmac_id
= 0;
4152 unsigned long poll_tmo
;
4153 bool restore_tx_en
= 0;
4154 struct nix_hw
*nix_hw
;
4155 u64 cfg
, sw_xoff
= 0;
4160 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4162 return NIX_AF_ERR_INVALID_NIXBLK
;
4164 if (tx_credits
== nix_hw
->tx_credits
[link
])
4167 /* Enable cgx tx if disabled for credits to be back */
4168 if (is_pf_cgxmapped(rvu
, pf
)) {
4169 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
4170 restore_tx_en
= !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id
, rvu
),
4174 mutex_lock(&rvu
->rsrc_lock
);
4175 /* Disable new traffic to link */
4176 if (hw
->cap
.nix_shaping
) {
4177 schq
= nix_get_tx_link(rvu
, pcifunc
);
4178 sw_xoff
= rvu_read64(rvu
, blkaddr
, NIX_AF_TL1X_SW_XOFF(schq
));
4179 rvu_write64(rvu
, blkaddr
,
4180 NIX_AF_TL1X_SW_XOFF(schq
), BIT_ULL(0));
4183 rc
= NIX_AF_ERR_LINK_CREDITS
;
4184 poll_tmo
= jiffies
+ usecs_to_jiffies(200000);
4185 /* Wait for credits to return */
4187 if (time_after(jiffies
, poll_tmo
))
4189 usleep_range(100, 200);
4191 cfg
= rvu_read64(rvu
, blkaddr
,
4192 NIX_AF_TX_LINKX_NORM_CREDIT(link
));
4193 credits
= (cfg
>> 12) & 0xFFFFFULL
;
4194 } while (credits
!= nix_hw
->tx_credits
[link
]);
4196 cfg
&= ~(0xFFFFFULL
<< 12);
4197 cfg
|= (tx_credits
<< 12);
4198 rvu_write64(rvu
, blkaddr
, NIX_AF_TX_LINKX_NORM_CREDIT(link
), cfg
);
4201 nix_hw
->tx_credits
[link
] = tx_credits
;
4204 /* Enable traffic back */
4205 if (hw
->cap
.nix_shaping
&& !sw_xoff
)
4206 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SW_XOFF(schq
), 0);
4208 /* Restore state of cgx tx */
4210 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, false);
4212 mutex_unlock(&rvu
->rsrc_lock
);
4216 int rvu_mbox_handler_nix_set_hw_frs(struct rvu
*rvu
, struct nix_frs_cfg
*req
,
4217 struct msg_rsp
*rsp
)
4219 struct rvu_hwinfo
*hw
= rvu
->hw
;
4220 u16 pcifunc
= req
->hdr
.pcifunc
;
4221 int pf
= rvu_get_pf(pcifunc
);
4222 int blkaddr
, schq
, link
= -1;
4223 struct nix_txsch
*txsch
;
4224 u64 cfg
, lmac_fifo_len
;
4225 struct nix_hw
*nix_hw
;
4226 struct rvu_pfvf
*pfvf
;
4227 u8 cgx
= 0, lmac
= 0;
4230 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
4232 return NIX_AF_ERR_AF_LF_INVALID
;
4234 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4236 return NIX_AF_ERR_INVALID_NIXBLK
;
4238 if (is_afvf(pcifunc
))
4239 rvu_get_lbk_link_max_frs(rvu
, &max_mtu
);
4241 rvu_get_lmac_link_max_frs(rvu
, &max_mtu
);
4243 if (!req
->sdp_link
&& req
->maxlen
> max_mtu
)
4244 return NIX_AF_ERR_FRS_INVALID
;
4246 if (req
->update_minlen
&& req
->minlen
< NIC_HW_MIN_FRS
)
4247 return NIX_AF_ERR_FRS_INVALID
;
4249 /* Check if requester wants to update SMQ's */
4250 if (!req
->update_smq
)
4253 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
4254 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
4255 mutex_lock(&rvu
->rsrc_lock
);
4256 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
4257 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
4259 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(schq
));
4260 cfg
= (cfg
& ~(0xFFFFULL
<< 8)) | ((u64
)req
->maxlen
<< 8);
4261 if (req
->update_minlen
)
4262 cfg
= (cfg
& ~0x7FULL
) | ((u64
)req
->minlen
& 0x7F);
4263 rvu_write64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(schq
), cfg
);
4265 mutex_unlock(&rvu
->rsrc_lock
);
4268 /* Check if config is for SDP link */
4269 if (req
->sdp_link
) {
4271 return NIX_AF_ERR_RX_LINK_INVALID
;
4272 link
= hw
->cgx_links
+ hw
->lbk_links
;
4276 /* Check if the request is from CGX mapped RVU PF */
4277 if (is_pf_cgxmapped(rvu
, pf
)) {
4278 /* Get CGX and LMAC to which this PF is mapped and find link */
4279 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx
, &lmac
);
4280 link
= (cgx
* hw
->lmac_per_cgx
) + lmac
;
4281 } else if (pf
== 0) {
4282 /* For VFs of PF0 ingress is LBK port, so config LBK link */
4283 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4284 link
= hw
->cgx_links
+ pfvf
->lbkid
;
4288 return NIX_AF_ERR_RX_LINK_INVALID
;
4292 nix_find_link_frs(rvu
, req
, pcifunc
);
4294 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
));
4295 cfg
= (cfg
& ~(0xFFFFULL
<< 16)) | ((u64
)req
->maxlen
<< 16);
4296 if (req
->update_minlen
)
4297 cfg
= (cfg
& ~0xFFFFULL
) | req
->minlen
;
4298 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
), cfg
);
4300 if (req
->sdp_link
|| pf
== 0)
4303 /* Update transmit credits for CGX links */
4304 lmac_fifo_len
= rvu_cgx_get_lmac_fifolen(rvu
, cgx
, lmac
);
4305 if (!lmac_fifo_len
) {
4307 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4308 __func__
, cgx
, lmac
);
4311 return nix_config_link_credits(rvu
, blkaddr
, link
, pcifunc
,
4312 (lmac_fifo_len
- req
->maxlen
) / 16);
4315 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu
*rvu
, struct nix_rx_cfg
*req
,
4316 struct msg_rsp
*rsp
)
4318 int nixlf
, blkaddr
, err
;
4321 err
= nix_get_nixlf(rvu
, req
->hdr
.pcifunc
, &nixlf
, &blkaddr
);
4325 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
));
4326 /* Set the interface configuration */
4327 if (req
->len_verify
& BIT(0))
4330 cfg
&= ~BIT_ULL(41);
4332 if (req
->len_verify
& BIT(1))
4335 cfg
&= ~BIT_ULL(40);
4337 if (req
->len_verify
& NIX_RX_DROP_RE
)
4340 cfg
&= ~BIT_ULL(32);
4342 if (req
->csum_verify
& BIT(0))
4345 cfg
&= ~BIT_ULL(37);
4347 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
), cfg
);
4352 static u64
rvu_get_lbk_link_credits(struct rvu
*rvu
, u16 lbk_max_frs
)
4354 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4357 static void nix_link_config(struct rvu
*rvu
, int blkaddr
,
4358 struct nix_hw
*nix_hw
)
4360 struct rvu_hwinfo
*hw
= rvu
->hw
;
4361 int cgx
, lmac_cnt
, slink
, link
;
4362 u16 lbk_max_frs
, lmac_max_frs
;
4363 unsigned long lmac_bmap
;
4364 u64 tx_credits
, cfg
;
4368 rvu_get_lbk_link_max_frs(rvu
, &lbk_max_frs
);
4369 rvu_get_lmac_link_max_frs(rvu
, &lmac_max_frs
);
4371 /* Set default min/max packet lengths allowed on NIX Rx links.
4373 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4374 * as undersize and report them to SW as error pkts, hence
4375 * setting it to 40 bytes.
4377 for (link
= 0; link
< hw
->cgx_links
; link
++) {
4378 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4379 ((u64
)lmac_max_frs
<< 16) | NIC_HW_MIN_FRS
);
4382 for (link
= hw
->cgx_links
; link
< hw
->lbk_links
; link
++) {
4383 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4384 ((u64
)lbk_max_frs
<< 16) | NIC_HW_MIN_FRS
);
4386 if (hw
->sdp_links
) {
4387 link
= hw
->cgx_links
+ hw
->lbk_links
;
4388 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4389 SDP_HW_MAX_FRS
<< 16 | NIC_HW_MIN_FRS
);
4392 /* Set credits for Tx links assuming max packet length allowed.
4393 * This will be reconfigured based on MTU set for PF/VF.
4395 for (cgx
= 0; cgx
< hw
->cgx
; cgx
++) {
4396 lmac_cnt
= cgx_get_lmac_cnt(rvu_cgx_pdata(cgx
, rvu
));
4397 /* Skip when cgx is not available or lmac cnt is zero */
4400 slink
= cgx
* hw
->lmac_per_cgx
;
4402 /* Get LMAC id's from bitmap */
4403 lmac_bmap
= cgx_get_lmac_bmap(rvu_cgx_pdata(cgx
, rvu
));
4404 for_each_set_bit(iter
, &lmac_bmap
, rvu
->hw
->lmac_per_cgx
) {
4405 lmac_fifo_len
= rvu_cgx_get_lmac_fifolen(rvu
, cgx
, iter
);
4406 if (!lmac_fifo_len
) {
4408 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4409 __func__
, cgx
, iter
);
4412 tx_credits
= (lmac_fifo_len
- lmac_max_frs
) / 16;
4413 /* Enable credits and set credit pkt count to max allowed */
4414 cfg
= (tx_credits
<< 12) | (0x1FF << 2) | BIT_ULL(1);
4416 link
= iter
+ slink
;
4417 nix_hw
->tx_credits
[link
] = tx_credits
;
4418 rvu_write64(rvu
, blkaddr
,
4419 NIX_AF_TX_LINKX_NORM_CREDIT(link
), cfg
);
4423 /* Set Tx credits for LBK link */
4424 slink
= hw
->cgx_links
;
4425 for (link
= slink
; link
< (slink
+ hw
->lbk_links
); link
++) {
4426 tx_credits
= rvu_get_lbk_link_credits(rvu
, lbk_max_frs
);
4427 nix_hw
->tx_credits
[link
] = tx_credits
;
4428 /* Enable credits and set credit pkt count to max allowed */
4429 tx_credits
= (tx_credits
<< 12) | (0x1FF << 2) | BIT_ULL(1);
4430 rvu_write64(rvu
, blkaddr
,
4431 NIX_AF_TX_LINKX_NORM_CREDIT(link
), tx_credits
);
4435 static int nix_calibrate_x2p(struct rvu
*rvu
, int blkaddr
)
4440 /* Start X2P bus calibration */
4441 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4442 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) | BIT_ULL(9));
4443 /* Wait for calibration to complete */
4444 err
= rvu_poll_reg(rvu
, blkaddr
,
4445 NIX_AF_STATUS
, BIT_ULL(10), false);
4447 dev_err(rvu
->dev
, "NIX X2P bus calibration failed\n");
4451 status
= rvu_read64(rvu
, blkaddr
, NIX_AF_STATUS
);
4452 /* Check if CGX devices are ready */
4453 for (idx
= 0; idx
< rvu
->cgx_cnt_max
; idx
++) {
4454 /* Skip when cgx port is not available */
4455 if (!rvu_cgx_pdata(idx
, rvu
) ||
4456 (status
& (BIT_ULL(16 + idx
))))
4459 "CGX%d didn't respond to NIX X2P calibration\n", idx
);
4463 /* Check if LBK is ready */
4464 if (!(status
& BIT_ULL(19))) {
4466 "LBK didn't respond to NIX X2P calibration\n");
4470 /* Clear 'calibrate_x2p' bit */
4471 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4472 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) & ~BIT_ULL(9));
4473 if (err
|| (status
& 0x3FFULL
))
4475 "NIX X2P calibration failed, status 0x%llx\n", status
);
4481 static int nix_aq_init(struct rvu
*rvu
, struct rvu_block
*block
)
4486 /* Set admin queue endianness */
4487 cfg
= rvu_read64(rvu
, block
->addr
, NIX_AF_CFG
);
4490 rvu_write64(rvu
, block
->addr
, NIX_AF_CFG
, cfg
);
4493 rvu_write64(rvu
, block
->addr
, NIX_AF_CFG
, cfg
);
4496 /* Do not bypass NDC cache */
4497 cfg
= rvu_read64(rvu
, block
->addr
, NIX_AF_NDC_CFG
);
4499 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4500 /* Disable caching of SQB aka SQEs */
4503 rvu_write64(rvu
, block
->addr
, NIX_AF_NDC_CFG
, cfg
);
4505 /* Result structure can be followed by RQ/SQ/CQ context at
4506 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4507 * operation type. Alloc sufficient result memory for all operations.
4509 err
= rvu_aq_alloc(rvu
, &block
->aq
,
4510 Q_COUNT(AQ_SIZE
), sizeof(struct nix_aq_inst_s
),
4511 ALIGN(sizeof(struct nix_aq_res_s
), 128) + 256);
4515 rvu_write64(rvu
, block
->addr
, NIX_AF_AQ_CFG
, AQ_SIZE
);
4516 rvu_write64(rvu
, block
->addr
,
4517 NIX_AF_AQ_BASE
, (u64
)block
->aq
->inst
->iova
);
4521 static void rvu_nix_setup_capabilities(struct rvu
*rvu
, int blkaddr
)
4523 struct rvu_hwinfo
*hw
= rvu
->hw
;
4526 hw_const
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
4528 /* On OcteonTx2 DWRR quantum is directly configured into each of
4529 * the transmit scheduler queues. And PF/VF drivers were free to
4530 * config any value upto 2^24.
4531 * On CN10K, HW is modified, the quantum configuration at scheduler
4532 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4533 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4534 * 'DWRR MTU * weight' to get the quantum.
4536 * Check if HW uses a common MTU for all DWRR quantum configs.
4537 * On OcteonTx2 this register field is '0'.
4539 if ((((hw_const
>> 56) & 0x10) == 0x10) && !(hw_const
& BIT_ULL(61)))
4540 hw
->cap
.nix_common_dwrr_mtu
= true;
4542 if (hw_const
& BIT_ULL(61))
4543 hw
->cap
.nix_multiple_dwrr_mtu
= true;
4546 static int rvu_nix_block_init(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
4548 const struct npc_lt_def_cfg
*ltdefs
;
4549 struct rvu_hwinfo
*hw
= rvu
->hw
;
4550 int blkaddr
= nix_hw
->blkaddr
;
4551 struct rvu_block
*block
;
4555 block
= &hw
->block
[blkaddr
];
4557 if (is_rvu_96xx_B0(rvu
)) {
4558 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4559 * internal state when conditional clocks are turned off.
4560 * Hence enable them.
4562 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4563 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) | 0x40ULL
);
4565 /* Set chan/link to backpressure TL3 instead of TL2 */
4566 rvu_write64(rvu
, blkaddr
, NIX_AF_PSE_CHANNEL_LEVEL
, 0x01);
4568 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4569 * This sticky mode is known to cause SQ stalls when multiple
4570 * SQs are mapped to same SMQ and transmitting pkts at a time.
4572 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SQM_DBG_CTL_STATUS
);
4573 cfg
&= ~BIT_ULL(15);
4574 rvu_write64(rvu
, blkaddr
, NIX_AF_SQM_DBG_CTL_STATUS
, cfg
);
4577 ltdefs
= rvu
->kpu
.lt_def
;
4578 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4579 err
= nix_calibrate_x2p(rvu
, blkaddr
);
4583 /* Setup capabilities of the NIX block */
4584 rvu_nix_setup_capabilities(rvu
, blkaddr
);
4586 /* Initialize admin queue */
4587 err
= nix_aq_init(rvu
, block
);
4591 /* Restore CINT timer delay to HW reset values */
4592 rvu_write64(rvu
, blkaddr
, NIX_AF_CINT_DELAY
, 0x0ULL
);
4594 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SEB_CFG
);
4596 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4598 if (!is_rvu_otx2(rvu
))
4599 cfg
|= NIX_PTP_1STEP_EN
;
4601 rvu_write64(rvu
, blkaddr
, NIX_AF_SEB_CFG
, cfg
);
4603 if (!is_rvu_otx2(rvu
))
4604 rvu_nix_block_cn10k_init(rvu
, nix_hw
);
4606 if (is_block_implemented(hw
, blkaddr
)) {
4607 err
= nix_setup_txschq(rvu
, nix_hw
, blkaddr
);
4611 err
= nix_setup_ipolicers(rvu
, nix_hw
, blkaddr
);
4615 err
= nix_af_mark_format_setup(rvu
, nix_hw
, blkaddr
);
4619 err
= nix_setup_mcast(rvu
, nix_hw
, blkaddr
);
4623 err
= nix_setup_txvlan(rvu
, nix_hw
);
4627 /* Configure segmentation offload formats */
4628 nix_setup_lso(rvu
, nix_hw
, blkaddr
);
4630 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4631 * This helps HW protocol checker to identify headers
4632 * and validate length and checksums.
4634 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OL2
,
4635 (ltdefs
->rx_ol2
.lid
<< 8) | (ltdefs
->rx_ol2
.ltype_match
<< 4) |
4636 ltdefs
->rx_ol2
.ltype_mask
);
4637 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP4
,
4638 (ltdefs
->rx_oip4
.lid
<< 8) | (ltdefs
->rx_oip4
.ltype_match
<< 4) |
4639 ltdefs
->rx_oip4
.ltype_mask
);
4640 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP4
,
4641 (ltdefs
->rx_iip4
.lid
<< 8) | (ltdefs
->rx_iip4
.ltype_match
<< 4) |
4642 ltdefs
->rx_iip4
.ltype_mask
);
4643 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP6
,
4644 (ltdefs
->rx_oip6
.lid
<< 8) | (ltdefs
->rx_oip6
.ltype_match
<< 4) |
4645 ltdefs
->rx_oip6
.ltype_mask
);
4646 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP6
,
4647 (ltdefs
->rx_iip6
.lid
<< 8) | (ltdefs
->rx_iip6
.ltype_match
<< 4) |
4648 ltdefs
->rx_iip6
.ltype_mask
);
4649 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OTCP
,
4650 (ltdefs
->rx_otcp
.lid
<< 8) | (ltdefs
->rx_otcp
.ltype_match
<< 4) |
4651 ltdefs
->rx_otcp
.ltype_mask
);
4652 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ITCP
,
4653 (ltdefs
->rx_itcp
.lid
<< 8) | (ltdefs
->rx_itcp
.ltype_match
<< 4) |
4654 ltdefs
->rx_itcp
.ltype_mask
);
4655 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OUDP
,
4656 (ltdefs
->rx_oudp
.lid
<< 8) | (ltdefs
->rx_oudp
.ltype_match
<< 4) |
4657 ltdefs
->rx_oudp
.ltype_mask
);
4658 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IUDP
,
4659 (ltdefs
->rx_iudp
.lid
<< 8) | (ltdefs
->rx_iudp
.ltype_match
<< 4) |
4660 ltdefs
->rx_iudp
.ltype_mask
);
4661 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OSCTP
,
4662 (ltdefs
->rx_osctp
.lid
<< 8) | (ltdefs
->rx_osctp
.ltype_match
<< 4) |
4663 ltdefs
->rx_osctp
.ltype_mask
);
4664 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ISCTP
,
4665 (ltdefs
->rx_isctp
.lid
<< 8) | (ltdefs
->rx_isctp
.ltype_match
<< 4) |
4666 ltdefs
->rx_isctp
.ltype_mask
);
4668 if (!is_rvu_otx2(rvu
)) {
4669 /* Enable APAD calculation for other protocols
4670 * matching APAD0 and APAD1 lt def registers.
4672 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_CST_APAD0
,
4673 (ltdefs
->rx_apad0
.valid
<< 11) |
4674 (ltdefs
->rx_apad0
.lid
<< 8) |
4675 (ltdefs
->rx_apad0
.ltype_match
<< 4) |
4676 ltdefs
->rx_apad0
.ltype_mask
);
4677 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_CST_APAD1
,
4678 (ltdefs
->rx_apad1
.valid
<< 11) |
4679 (ltdefs
->rx_apad1
.lid
<< 8) |
4680 (ltdefs
->rx_apad1
.ltype_match
<< 4) |
4681 ltdefs
->rx_apad1
.ltype_mask
);
4683 /* Receive ethertype defination register defines layer
4684 * information in NPC_RESULT_S to identify the Ethertype
4685 * location in L2 header. Used for Ethertype overwriting
4686 * in inline IPsec flow.
4688 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ET(0),
4689 (ltdefs
->rx_et
[0].offset
<< 12) |
4690 (ltdefs
->rx_et
[0].valid
<< 11) |
4691 (ltdefs
->rx_et
[0].lid
<< 8) |
4692 (ltdefs
->rx_et
[0].ltype_match
<< 4) |
4693 ltdefs
->rx_et
[0].ltype_mask
);
4694 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ET(1),
4695 (ltdefs
->rx_et
[1].offset
<< 12) |
4696 (ltdefs
->rx_et
[1].valid
<< 11) |
4697 (ltdefs
->rx_et
[1].lid
<< 8) |
4698 (ltdefs
->rx_et
[1].ltype_match
<< 4) |
4699 ltdefs
->rx_et
[1].ltype_mask
);
4702 err
= nix_rx_flowkey_alg_cfg(rvu
, blkaddr
);
4706 nix_hw
->tx_credits
= kcalloc(hw
->cgx_links
+ hw
->lbk_links
,
4707 sizeof(u64
), GFP_KERNEL
);
4708 if (!nix_hw
->tx_credits
)
4711 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4712 nix_link_config(rvu
, blkaddr
, nix_hw
);
4714 /* Enable Channel backpressure */
4715 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CFG
, BIT_ULL(0));
4720 int rvu_nix_init(struct rvu
*rvu
)
4722 struct rvu_hwinfo
*hw
= rvu
->hw
;
4723 struct nix_hw
*nix_hw
;
4724 int blkaddr
= 0, err
;
4727 hw
->nix
= devm_kcalloc(rvu
->dev
, MAX_NIX_BLKS
, sizeof(struct nix_hw
),
4732 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4734 nix_hw
= &hw
->nix
[i
];
4736 nix_hw
->blkaddr
= blkaddr
;
4737 err
= rvu_nix_block_init(rvu
, nix_hw
);
4740 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4747 static void rvu_nix_block_freemem(struct rvu
*rvu
, int blkaddr
,
4748 struct rvu_block
*block
)
4750 struct nix_txsch
*txsch
;
4751 struct nix_mcast
*mcast
;
4752 struct nix_txvlan
*vlan
;
4753 struct nix_hw
*nix_hw
;
4756 rvu_aq_free(rvu
, block
->aq
);
4758 if (is_block_implemented(rvu
->hw
, blkaddr
)) {
4759 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4763 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
4764 txsch
= &nix_hw
->txsch
[lvl
];
4765 kfree(txsch
->schq
.bmap
);
4768 kfree(nix_hw
->tx_credits
);
4770 nix_ipolicer_freemem(rvu
, nix_hw
);
4772 vlan
= &nix_hw
->txvlan
;
4773 kfree(vlan
->rsrc
.bmap
);
4774 mutex_destroy(&vlan
->rsrc_lock
);
4776 mcast
= &nix_hw
->mcast
;
4777 qmem_free(rvu
->dev
, mcast
->mce_ctx
);
4778 qmem_free(rvu
->dev
, mcast
->mcast_buf
);
4779 mutex_destroy(&mcast
->mce_lock
);
4783 void rvu_nix_freemem(struct rvu
*rvu
)
4785 struct rvu_hwinfo
*hw
= rvu
->hw
;
4786 struct rvu_block
*block
;
4789 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4791 block
= &hw
->block
[blkaddr
];
4792 rvu_nix_block_freemem(rvu
, blkaddr
, block
);
4793 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4797 int rvu_mbox_handler_nix_lf_start_rx(struct rvu
*rvu
, struct msg_req
*req
,
4798 struct msg_rsp
*rsp
)
4800 u16 pcifunc
= req
->hdr
.pcifunc
;
4801 struct rvu_pfvf
*pfvf
;
4804 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
4808 rvu_npc_enable_default_entries(rvu
, pcifunc
, nixlf
);
4810 npc_mcam_enable_flows(rvu
, pcifunc
);
4812 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4813 set_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4815 rvu_switch_update_rules(rvu
, pcifunc
);
4817 return rvu_cgx_start_stop_io(rvu
, pcifunc
, true);
4820 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu
*rvu
, struct msg_req
*req
,
4821 struct msg_rsp
*rsp
)
4823 u16 pcifunc
= req
->hdr
.pcifunc
;
4824 struct rvu_pfvf
*pfvf
;
4827 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
4831 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
4833 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4834 clear_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4836 return rvu_cgx_start_stop_io(rvu
, pcifunc
, false);
4839 #define RX_SA_BASE GENMASK_ULL(52, 7)
4841 void rvu_nix_lf_teardown(struct rvu
*rvu
, u16 pcifunc
, int blkaddr
, int nixlf
)
4843 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4844 struct hwctx_disable_req ctx_req
;
4845 int pf
= rvu_get_pf(pcifunc
);
4846 struct mac_ops
*mac_ops
;
4852 ctx_req
.hdr
.pcifunc
= pcifunc
;
4854 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4855 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
4856 rvu_npc_free_mcam_entries(rvu
, pcifunc
, nixlf
);
4857 nix_interface_deinit(rvu
, pcifunc
, nixlf
);
4858 nix_rx_sync(rvu
, blkaddr
);
4859 nix_txschq_free(rvu
, pcifunc
);
4861 clear_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4863 rvu_cgx_start_stop_io(rvu
, pcifunc
, false);
4866 ctx_req
.ctype
= NIX_AQ_CTYPE_SQ
;
4867 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4869 dev_err(rvu
->dev
, "SQ ctx disable failed\n");
4873 ctx_req
.ctype
= NIX_AQ_CTYPE_RQ
;
4874 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4876 dev_err(rvu
->dev
, "RQ ctx disable failed\n");
4880 ctx_req
.ctype
= NIX_AQ_CTYPE_CQ
;
4881 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4883 dev_err(rvu
->dev
, "CQ ctx disable failed\n");
4886 /* reset HW config done for Switch headers */
4887 rvu_npc_set_parse_mode(rvu
, pcifunc
, OTX2_PRIV_FLAGS_DEFAULT
,
4888 (PKIND_TX
| PKIND_RX
), 0, 0, 0, 0);
4890 /* Disabling CGX and NPC config done for PTP */
4891 if (pfvf
->hw_rx_tstamp_en
) {
4892 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
4893 cgxd
= rvu_cgx_pdata(cgx_id
, rvu
);
4894 mac_ops
= get_mac_ops(cgxd
);
4895 mac_ops
->mac_enadis_ptp_config(cgxd
, lmac_id
, false);
4896 /* Undo NPC config done for PTP */
4897 if (npc_config_ts_kpuaction(rvu
, pf
, pcifunc
, false))
4898 dev_err(rvu
->dev
, "NPC config for PTP failed\n");
4899 pfvf
->hw_rx_tstamp_en
= false;
4902 /* reset priority flow control config */
4903 rvu_cgx_prio_flow_ctrl_cfg(rvu
, pcifunc
, 0, 0, 0);
4905 /* reset 802.3x flow control config */
4906 rvu_cgx_cfg_pause_frm(rvu
, pcifunc
, 0, 0);
4908 nix_ctx_free(rvu
, pfvf
);
4910 nix_free_all_bandprof(rvu
, pcifunc
);
4912 sa_base
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf
));
4913 if (FIELD_GET(RX_SA_BASE
, sa_base
)) {
4914 err
= rvu_cpt_ctx_flush(rvu
, pcifunc
);
4917 "CPT ctx flush failed with error: %d\n", err
);
4921 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
4923 static int rvu_nix_lf_ptp_tx_cfg(struct rvu
*rvu
, u16 pcifunc
, bool enable
)
4925 struct rvu_hwinfo
*hw
= rvu
->hw
;
4926 struct rvu_block
*block
;
4931 pf
= rvu_get_pf(pcifunc
);
4932 if (!is_mac_feature_supported(rvu
, pf
, RVU_LMAC_FEAT_PTP
))
4935 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
4937 return NIX_AF_ERR_AF_LF_INVALID
;
4939 block
= &hw
->block
[blkaddr
];
4940 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
4942 return NIX_AF_ERR_AF_LF_INVALID
;
4944 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
));
4947 cfg
|= NIX_AF_LFX_TX_CFG_PTP_EN
;
4949 cfg
&= ~NIX_AF_LFX_TX_CFG_PTP_EN
;
4951 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
), cfg
);
4956 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu
*rvu
, struct msg_req
*req
,
4957 struct msg_rsp
*rsp
)
4959 return rvu_nix_lf_ptp_tx_cfg(rvu
, req
->hdr
.pcifunc
, true);
4962 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu
*rvu
, struct msg_req
*req
,
4963 struct msg_rsp
*rsp
)
4965 return rvu_nix_lf_ptp_tx_cfg(rvu
, req
->hdr
.pcifunc
, false);
4968 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu
*rvu
,
4969 struct nix_lso_format_cfg
*req
,
4970 struct nix_lso_format_cfg_rsp
*rsp
)
4972 u16 pcifunc
= req
->hdr
.pcifunc
;
4973 struct nix_hw
*nix_hw
;
4974 struct rvu_pfvf
*pfvf
;
4975 int blkaddr
, idx
, f
;
4978 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4979 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
4980 if (!pfvf
->nixlf
|| blkaddr
< 0)
4981 return NIX_AF_ERR_AF_LF_INVALID
;
4983 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4985 return NIX_AF_ERR_INVALID_NIXBLK
;
4987 /* Find existing matching LSO format, if any */
4988 for (idx
= 0; idx
< nix_hw
->lso
.in_use
; idx
++) {
4989 for (f
= 0; f
< NIX_LSO_FIELD_MAX
; f
++) {
4990 reg
= rvu_read64(rvu
, blkaddr
,
4991 NIX_AF_LSO_FORMATX_FIELDX(idx
, f
));
4992 if (req
->fields
[f
] != (reg
& req
->field_mask
))
4996 if (f
== NIX_LSO_FIELD_MAX
)
5000 if (idx
< nix_hw
->lso
.in_use
) {
5002 rsp
->lso_format_idx
= idx
;
5006 if (nix_hw
->lso
.in_use
== nix_hw
->lso
.total
)
5007 return NIX_AF_ERR_LSO_CFG_FAIL
;
5009 rsp
->lso_format_idx
= nix_hw
->lso
.in_use
++;
5011 for (f
= 0; f
< NIX_LSO_FIELD_MAX
; f
++)
5012 rvu_write64(rvu
, blkaddr
,
5013 NIX_AF_LSO_FORMATX_FIELDX(rsp
->lso_format_idx
, f
),
5019 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
5020 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
5021 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
5022 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
5024 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
5025 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5026 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
5028 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
5029 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
5030 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
5032 static void nix_inline_ipsec_cfg(struct rvu
*rvu
, struct nix_inline_ipsec_cfg
*req
,
5035 u8 cpt_idx
, cpt_blkaddr
;
5038 cpt_idx
= (blkaddr
== BLKADDR_NIX0
) ? 0 : 1;
5041 /* Enable context prefetching */
5042 if (!is_rvu_otx2(rvu
))
5045 /* Set OPCODE and EGRP */
5046 val
|= FIELD_PREP(IPSEC_GEN_CFG_EGRP
, req
->gen_cfg
.egrp
);
5047 val
|= FIELD_PREP(IPSEC_GEN_CFG_OPCODE
, req
->gen_cfg
.opcode
);
5048 val
|= FIELD_PREP(IPSEC_GEN_CFG_PARAM1
, req
->gen_cfg
.param1
);
5049 val
|= FIELD_PREP(IPSEC_GEN_CFG_PARAM2
, req
->gen_cfg
.param2
);
5051 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_IPSEC_GEN_CFG
, val
);
5053 /* Set CPT queue for inline IPSec */
5054 val
= FIELD_PREP(CPT_INST_QSEL_SLOT
, req
->inst_qsel
.cpt_slot
);
5055 val
|= FIELD_PREP(CPT_INST_QSEL_PF_FUNC
,
5056 req
->inst_qsel
.cpt_pf_func
);
5058 if (!is_rvu_otx2(rvu
)) {
5059 cpt_blkaddr
= (cpt_idx
== 0) ? BLKADDR_CPT0
:
5061 val
|= FIELD_PREP(CPT_INST_QSEL_BLOCK
, cpt_blkaddr
);
5064 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx
),
5067 /* Set CPT credit */
5068 val
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CPTX_CREDIT(cpt_idx
));
5069 if ((val
& 0x3FFFFF) != 0x3FFFFF)
5070 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CPTX_CREDIT(cpt_idx
),
5073 val
= FIELD_PREP(CPT_INST_CREDIT_CNT
, req
->cpt_credit
);
5074 val
|= FIELD_PREP(CPT_INST_CREDIT_BPID
, req
->bpid
);
5075 val
|= FIELD_PREP(CPT_INST_CREDIT_TH
, req
->credit_th
);
5076 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CPTX_CREDIT(cpt_idx
), val
);
5078 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_IPSEC_GEN_CFG
, 0x0);
5079 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx
),
5081 val
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CPTX_CREDIT(cpt_idx
));
5082 if ((val
& 0x3FFFFF) != 0x3FFFFF)
5083 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CPTX_CREDIT(cpt_idx
),
5088 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu
*rvu
,
5089 struct nix_inline_ipsec_cfg
*req
,
5090 struct msg_rsp
*rsp
)
5092 if (!is_block_implemented(rvu
->hw
, BLKADDR_CPT0
))
5095 nix_inline_ipsec_cfg(rvu
, req
, BLKADDR_NIX0
);
5096 if (is_block_implemented(rvu
->hw
, BLKADDR_CPT1
))
5097 nix_inline_ipsec_cfg(rvu
, req
, BLKADDR_NIX1
);
5102 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu
*rvu
,
5103 struct msg_req
*req
,
5104 struct nix_inline_ipsec_cfg
*rsp
)
5109 if (!is_block_implemented(rvu
->hw
, BLKADDR_CPT0
))
5112 val
= rvu_read64(rvu
, BLKADDR_NIX0
, NIX_AF_RX_IPSEC_GEN_CFG
);
5113 rsp
->gen_cfg
.egrp
= FIELD_GET(IPSEC_GEN_CFG_EGRP
, val
);
5114 rsp
->gen_cfg
.opcode
= FIELD_GET(IPSEC_GEN_CFG_OPCODE
, val
);
5115 rsp
->gen_cfg
.param1
= FIELD_GET(IPSEC_GEN_CFG_PARAM1
, val
);
5116 rsp
->gen_cfg
.param2
= FIELD_GET(IPSEC_GEN_CFG_PARAM2
, val
);
5118 val
= rvu_read64(rvu
, BLKADDR_NIX0
, NIX_AF_RX_CPTX_CREDIT(0));
5119 rsp
->cpt_credit
= FIELD_GET(CPT_INST_CREDIT_CNT
, val
);
5120 rsp
->credit_th
= FIELD_GET(CPT_INST_CREDIT_TH
, val
);
5121 rsp
->bpid
= FIELD_GET(CPT_INST_CREDIT_BPID
, val
);
5126 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu
*rvu
,
5127 struct nix_inline_ipsec_lf_cfg
*req
,
5128 struct msg_rsp
*rsp
)
5130 int lf
, blkaddr
, err
;
5133 if (!is_block_implemented(rvu
->hw
, BLKADDR_CPT0
))
5136 err
= nix_get_nixlf(rvu
, req
->hdr
.pcifunc
, &lf
, &blkaddr
);
5141 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5142 val
= (u64
)req
->ipsec_cfg0
.tt
<< 44 |
5143 (u64
)req
->ipsec_cfg0
.tag_const
<< 20 |
5144 (u64
)req
->ipsec_cfg0
.sa_pow2_size
<< 16 |
5145 req
->ipsec_cfg0
.lenm1_max
;
5147 if (blkaddr
== BLKADDR_NIX1
)
5150 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_CFG0(lf
), val
);
5152 /* Set SA_IDX_W and SA_IDX_MAX */
5153 val
= (u64
)req
->ipsec_cfg1
.sa_idx_w
<< 32 |
5154 req
->ipsec_cfg1
.sa_idx_max
;
5155 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_CFG1(lf
), val
);
5157 /* Set SA base address */
5158 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf
),
5161 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_CFG0(lf
), 0x0);
5162 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_CFG1(lf
), 0x0);
5163 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf
),
5170 void rvu_nix_reset_mac(struct rvu_pfvf
*pfvf
, int pcifunc
)
5172 bool from_vf
= !!(pcifunc
& RVU_PFVF_FUNC_MASK
);
5174 /* overwrite vf mac address with default_mac */
5176 ether_addr_copy(pfvf
->mac_addr
, pfvf
->default_mac
);
5179 /* NIX ingress policers or bandwidth profiles APIs */
5180 static void nix_config_rx_pkt_policer_precolor(struct rvu
*rvu
, int blkaddr
)
5182 struct npc_lt_def_cfg defs
, *ltdefs
;
5185 memcpy(ltdefs
, rvu
->kpu
.lt_def
, sizeof(struct npc_lt_def_cfg
));
5187 /* Extract PCP and DEI fields from outer VLAN from byte offset
5188 * 2 from the start of LB_PTR (ie TAG).
5189 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5190 * fields are considered when 'Tunnel enable' is set in profile.
5192 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_VLAN0_PCP_DEI
,
5193 (2UL << 12) | (ltdefs
->ovlan
.lid
<< 8) |
5194 (ltdefs
->ovlan
.ltype_match
<< 4) |
5195 ltdefs
->ovlan
.ltype_mask
);
5196 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_VLAN1_PCP_DEI
,
5197 (2UL << 12) | (ltdefs
->ivlan
.lid
<< 8) |
5198 (ltdefs
->ivlan
.ltype_match
<< 4) |
5199 ltdefs
->ivlan
.ltype_mask
);
5201 /* DSCP field in outer and tunneled IPv4 packets */
5202 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP4_DSCP
,
5203 (1UL << 12) | (ltdefs
->rx_oip4
.lid
<< 8) |
5204 (ltdefs
->rx_oip4
.ltype_match
<< 4) |
5205 ltdefs
->rx_oip4
.ltype_mask
);
5206 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP4_DSCP
,
5207 (1UL << 12) | (ltdefs
->rx_iip4
.lid
<< 8) |
5208 (ltdefs
->rx_iip4
.ltype_match
<< 4) |
5209 ltdefs
->rx_iip4
.ltype_mask
);
5211 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5212 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP6_DSCP
,
5213 (1UL << 11) | (ltdefs
->rx_oip6
.lid
<< 8) |
5214 (ltdefs
->rx_oip6
.ltype_match
<< 4) |
5215 ltdefs
->rx_oip6
.ltype_mask
);
5216 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP6_DSCP
,
5217 (1UL << 11) | (ltdefs
->rx_iip6
.lid
<< 8) |
5218 (ltdefs
->rx_iip6
.ltype_match
<< 4) |
5219 ltdefs
->rx_iip6
.ltype_mask
);
5222 static int nix_init_policer_context(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
5223 int layer
, int prof_idx
)
5225 struct nix_cn10k_aq_enq_req aq_req
;
5228 memset(&aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
5230 aq_req
.qidx
= (prof_idx
& 0x3FFF) | (layer
<< 14);
5231 aq_req
.ctype
= NIX_AQ_CTYPE_BANDPROF
;
5232 aq_req
.op
= NIX_AQ_INSTOP_INIT
;
5234 /* Context is all zeros, submit to AQ */
5235 rc
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
5236 (struct nix_aq_enq_req
*)&aq_req
, NULL
);
5238 dev_err(rvu
->dev
, "Failed to INIT bandwidth profile layer %d profile %d\n",
5243 static int nix_setup_ipolicers(struct rvu
*rvu
,
5244 struct nix_hw
*nix_hw
, int blkaddr
)
5246 struct rvu_hwinfo
*hw
= rvu
->hw
;
5247 struct nix_ipolicer
*ipolicer
;
5248 int err
, layer
, prof_idx
;
5251 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST
);
5252 if (!(cfg
& BIT_ULL(61))) {
5253 hw
->cap
.ipolicer
= false;
5257 hw
->cap
.ipolicer
= true;
5258 nix_hw
->ipolicer
= devm_kcalloc(rvu
->dev
, BAND_PROF_NUM_LAYERS
,
5259 sizeof(*ipolicer
), GFP_KERNEL
);
5260 if (!nix_hw
->ipolicer
)
5263 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_PL_CONST
);
5265 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5266 ipolicer
= &nix_hw
->ipolicer
[layer
];
5268 case BAND_PROF_LEAF_LAYER
:
5269 ipolicer
->band_prof
.max
= cfg
& 0XFFFF;
5271 case BAND_PROF_MID_LAYER
:
5272 ipolicer
->band_prof
.max
= (cfg
>> 16) & 0XFFFF;
5274 case BAND_PROF_TOP_LAYER
:
5275 ipolicer
->band_prof
.max
= (cfg
>> 32) & 0XFFFF;
5279 if (!ipolicer
->band_prof
.max
)
5282 err
= rvu_alloc_bitmap(&ipolicer
->band_prof
);
5286 ipolicer
->pfvf_map
= devm_kcalloc(rvu
->dev
,
5287 ipolicer
->band_prof
.max
,
5288 sizeof(u16
), GFP_KERNEL
);
5289 if (!ipolicer
->pfvf_map
)
5292 ipolicer
->match_id
= devm_kcalloc(rvu
->dev
,
5293 ipolicer
->band_prof
.max
,
5294 sizeof(u16
), GFP_KERNEL
);
5295 if (!ipolicer
->match_id
)
5299 prof_idx
< ipolicer
->band_prof
.max
; prof_idx
++) {
5300 /* Set AF as current owner for INIT ops to succeed */
5301 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
5303 /* There is no enable bit in the profile context,
5304 * so no context disable. So let's INIT them here
5305 * so that PF/VF later on have to just do WRITE to
5306 * setup policer rates and config.
5308 err
= nix_init_policer_context(rvu
, nix_hw
,
5314 /* Allocate memory for maintaining ref_counts for MID level
5315 * profiles, this will be needed for leaf layer profiles'
5318 if (layer
!= BAND_PROF_MID_LAYER
)
5321 ipolicer
->ref_count
= devm_kcalloc(rvu
->dev
,
5322 ipolicer
->band_prof
.max
,
5323 sizeof(u16
), GFP_KERNEL
);
5324 if (!ipolicer
->ref_count
)
5328 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5329 rvu_write64(rvu
, blkaddr
, NIX_AF_PL_TS
, 19);
5331 nix_config_rx_pkt_policer_precolor(rvu
, blkaddr
);
5336 static void nix_ipolicer_freemem(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
5338 struct nix_ipolicer
*ipolicer
;
5341 if (!rvu
->hw
->cap
.ipolicer
)
5344 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5345 ipolicer
= &nix_hw
->ipolicer
[layer
];
5347 if (!ipolicer
->band_prof
.max
)
5350 kfree(ipolicer
->band_prof
.bmap
);
5354 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req
*req
,
5355 struct nix_hw
*nix_hw
, u16 pcifunc
)
5357 struct nix_ipolicer
*ipolicer
;
5358 int layer
, hi_layer
, prof_idx
;
5360 /* Bits [15:14] in profile index represent layer */
5361 layer
= (req
->qidx
>> 14) & 0x03;
5362 prof_idx
= req
->qidx
& 0x3FFF;
5364 ipolicer
= &nix_hw
->ipolicer
[layer
];
5365 if (prof_idx
>= ipolicer
->band_prof
.max
)
5368 /* Check if the profile is allocated to the requesting PCIFUNC or not
5369 * with the exception of AF. AF is allowed to read and update contexts.
5371 if (pcifunc
&& ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
5374 /* If this profile is linked to higher layer profile then check
5375 * if that profile is also allocated to the requesting PCIFUNC
5378 if (!req
->prof
.hl_en
)
5381 /* Leaf layer profile can link only to mid layer and
5382 * mid layer to top layer.
5384 if (layer
== BAND_PROF_LEAF_LAYER
)
5385 hi_layer
= BAND_PROF_MID_LAYER
;
5386 else if (layer
== BAND_PROF_MID_LAYER
)
5387 hi_layer
= BAND_PROF_TOP_LAYER
;
5391 ipolicer
= &nix_hw
->ipolicer
[hi_layer
];
5392 prof_idx
= req
->prof
.band_prof_id
;
5393 if (prof_idx
>= ipolicer
->band_prof
.max
||
5394 ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
5400 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu
*rvu
,
5401 struct nix_bandprof_alloc_req
*req
,
5402 struct nix_bandprof_alloc_rsp
*rsp
)
5404 int blkaddr
, layer
, prof
, idx
, err
;
5405 u16 pcifunc
= req
->hdr
.pcifunc
;
5406 struct nix_ipolicer
*ipolicer
;
5407 struct nix_hw
*nix_hw
;
5409 if (!rvu
->hw
->cap
.ipolicer
)
5410 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
5412 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
5416 mutex_lock(&rvu
->rsrc_lock
);
5417 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5418 if (layer
== BAND_PROF_INVAL_LAYER
)
5420 if (!req
->prof_count
[layer
])
5423 ipolicer
= &nix_hw
->ipolicer
[layer
];
5424 for (idx
= 0; idx
< req
->prof_count
[layer
]; idx
++) {
5425 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5426 if (idx
== MAX_BANDPROF_PER_PFFUNC
)
5429 prof
= rvu_alloc_rsrc(&ipolicer
->band_prof
);
5432 rsp
->prof_count
[layer
]++;
5433 rsp
->prof_idx
[layer
][idx
] = prof
;
5434 ipolicer
->pfvf_map
[prof
] = pcifunc
;
5437 mutex_unlock(&rvu
->rsrc_lock
);
5441 static int nix_free_all_bandprof(struct rvu
*rvu
, u16 pcifunc
)
5443 int blkaddr
, layer
, prof_idx
, err
;
5444 struct nix_ipolicer
*ipolicer
;
5445 struct nix_hw
*nix_hw
;
5447 if (!rvu
->hw
->cap
.ipolicer
)
5448 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
5450 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
5454 mutex_lock(&rvu
->rsrc_lock
);
5455 /* Free all the profiles allocated to the PCIFUNC */
5456 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5457 if (layer
== BAND_PROF_INVAL_LAYER
)
5459 ipolicer
= &nix_hw
->ipolicer
[layer
];
5461 for (prof_idx
= 0; prof_idx
< ipolicer
->band_prof
.max
; prof_idx
++) {
5462 if (ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
5465 /* Clear ratelimit aggregation, if any */
5466 if (layer
== BAND_PROF_LEAF_LAYER
&&
5467 ipolicer
->match_id
[prof_idx
])
5468 nix_clear_ratelimit_aggr(rvu
, nix_hw
, prof_idx
);
5470 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
5471 ipolicer
->match_id
[prof_idx
] = 0;
5472 rvu_free_rsrc(&ipolicer
->band_prof
, prof_idx
);
5475 mutex_unlock(&rvu
->rsrc_lock
);
5479 int rvu_mbox_handler_nix_bandprof_free(struct rvu
*rvu
,
5480 struct nix_bandprof_free_req
*req
,
5481 struct msg_rsp
*rsp
)
5483 int blkaddr
, layer
, prof_idx
, idx
, err
;
5484 u16 pcifunc
= req
->hdr
.pcifunc
;
5485 struct nix_ipolicer
*ipolicer
;
5486 struct nix_hw
*nix_hw
;
5489 return nix_free_all_bandprof(rvu
, pcifunc
);
5491 if (!rvu
->hw
->cap
.ipolicer
)
5492 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
5494 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
5498 mutex_lock(&rvu
->rsrc_lock
);
5499 /* Free the requested profile indices */
5500 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5501 if (layer
== BAND_PROF_INVAL_LAYER
)
5503 if (!req
->prof_count
[layer
])
5506 ipolicer
= &nix_hw
->ipolicer
[layer
];
5507 for (idx
= 0; idx
< req
->prof_count
[layer
]; idx
++) {
5508 prof_idx
= req
->prof_idx
[layer
][idx
];
5509 if (prof_idx
>= ipolicer
->band_prof
.max
||
5510 ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
5513 /* Clear ratelimit aggregation, if any */
5514 if (layer
== BAND_PROF_LEAF_LAYER
&&
5515 ipolicer
->match_id
[prof_idx
])
5516 nix_clear_ratelimit_aggr(rvu
, nix_hw
, prof_idx
);
5518 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
5519 ipolicer
->match_id
[prof_idx
] = 0;
5520 rvu_free_rsrc(&ipolicer
->band_prof
, prof_idx
);
5521 if (idx
== MAX_BANDPROF_PER_PFFUNC
)
5525 mutex_unlock(&rvu
->rsrc_lock
);
5529 int nix_aq_context_read(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
5530 struct nix_cn10k_aq_enq_req
*aq_req
,
5531 struct nix_cn10k_aq_enq_rsp
*aq_rsp
,
5532 u16 pcifunc
, u8 ctype
, u32 qidx
)
5534 memset(aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
5535 aq_req
->hdr
.pcifunc
= pcifunc
;
5536 aq_req
->ctype
= ctype
;
5537 aq_req
->op
= NIX_AQ_INSTOP_READ
;
5538 aq_req
->qidx
= qidx
;
5540 return rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
5541 (struct nix_aq_enq_req
*)aq_req
,
5542 (struct nix_aq_enq_rsp
*)aq_rsp
);
5545 static int nix_ipolicer_map_leaf_midprofs(struct rvu
*rvu
,
5546 struct nix_hw
*nix_hw
,
5547 struct nix_cn10k_aq_enq_req
*aq_req
,
5548 struct nix_cn10k_aq_enq_rsp
*aq_rsp
,
5549 u32 leaf_prof
, u16 mid_prof
)
5551 memset(aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
5552 aq_req
->hdr
.pcifunc
= 0x00;
5553 aq_req
->ctype
= NIX_AQ_CTYPE_BANDPROF
;
5554 aq_req
->op
= NIX_AQ_INSTOP_WRITE
;
5555 aq_req
->qidx
= leaf_prof
;
5557 aq_req
->prof
.band_prof_id
= mid_prof
;
5558 aq_req
->prof_mask
.band_prof_id
= GENMASK(6, 0);
5559 aq_req
->prof
.hl_en
= 1;
5560 aq_req
->prof_mask
.hl_en
= 1;
5562 return rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
5563 (struct nix_aq_enq_req
*)aq_req
,
5564 (struct nix_aq_enq_rsp
*)aq_rsp
);
5567 int rvu_nix_setup_ratelimit_aggr(struct rvu
*rvu
, u16 pcifunc
,
5568 u16 rq_idx
, u16 match_id
)
5570 int leaf_prof
, mid_prof
, leaf_match
;
5571 struct nix_cn10k_aq_enq_req aq_req
;
5572 struct nix_cn10k_aq_enq_rsp aq_rsp
;
5573 struct nix_ipolicer
*ipolicer
;
5574 struct nix_hw
*nix_hw
;
5575 int blkaddr
, idx
, rc
;
5577 if (!rvu
->hw
->cap
.ipolicer
)
5580 rc
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
5584 /* Fetch the RQ's context to see if policing is enabled */
5585 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, pcifunc
,
5586 NIX_AQ_CTYPE_RQ
, rq_idx
);
5589 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5590 __func__
, rq_idx
, pcifunc
);
5594 if (!aq_rsp
.rq
.policer_ena
)
5597 /* Get the bandwidth profile ID mapped to this RQ */
5598 leaf_prof
= aq_rsp
.rq
.band_prof_id
;
5600 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_LEAF_LAYER
];
5601 ipolicer
->match_id
[leaf_prof
] = match_id
;
5603 /* Check if any other leaf profile is marked with same match_id */
5604 for (idx
= 0; idx
< ipolicer
->band_prof
.max
; idx
++) {
5605 if (idx
== leaf_prof
)
5607 if (ipolicer
->match_id
[idx
] != match_id
)
5614 if (idx
== ipolicer
->band_prof
.max
)
5617 /* Fetch the matching profile's context to check if it's already
5618 * mapped to a mid level profile.
5620 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5621 NIX_AQ_CTYPE_BANDPROF
, leaf_match
);
5624 "%s: Failed to fetch context of leaf profile %d\n",
5625 __func__
, leaf_match
);
5629 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_MID_LAYER
];
5630 if (aq_rsp
.prof
.hl_en
) {
5631 /* Get Mid layer prof index and map leaf_prof index
5632 * also such that flows that are being steered
5633 * to different RQs and marked with same match_id
5634 * are rate limited in a aggregate fashion
5636 mid_prof
= aq_rsp
.prof
.band_prof_id
;
5637 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5639 leaf_prof
, mid_prof
);
5642 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5643 __func__
, leaf_prof
, mid_prof
);
5647 mutex_lock(&rvu
->rsrc_lock
);
5648 ipolicer
->ref_count
[mid_prof
]++;
5649 mutex_unlock(&rvu
->rsrc_lock
);
5653 /* Allocate a mid layer profile and
5654 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5656 mutex_lock(&rvu
->rsrc_lock
);
5657 mid_prof
= rvu_alloc_rsrc(&ipolicer
->band_prof
);
5660 "%s: Unable to allocate mid layer profile\n", __func__
);
5661 mutex_unlock(&rvu
->rsrc_lock
);
5664 mutex_unlock(&rvu
->rsrc_lock
);
5665 ipolicer
->pfvf_map
[mid_prof
] = 0x00;
5666 ipolicer
->ref_count
[mid_prof
] = 0;
5668 /* Initialize mid layer profile same as 'leaf_prof' */
5669 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5670 NIX_AQ_CTYPE_BANDPROF
, leaf_prof
);
5673 "%s: Failed to fetch context of leaf profile %d\n",
5674 __func__
, leaf_prof
);
5678 memset(&aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
5679 aq_req
.hdr
.pcifunc
= 0x00;
5680 aq_req
.qidx
= (mid_prof
& 0x3FFF) | (BAND_PROF_MID_LAYER
<< 14);
5681 aq_req
.ctype
= NIX_AQ_CTYPE_BANDPROF
;
5682 aq_req
.op
= NIX_AQ_INSTOP_WRITE
;
5683 memcpy(&aq_req
.prof
, &aq_rsp
.prof
, sizeof(struct nix_bandprof_s
));
5684 memset((char *)&aq_req
.prof_mask
, 0xff, sizeof(struct nix_bandprof_s
));
5685 /* Clear higher layer enable bit in the mid profile, just in case */
5686 aq_req
.prof
.hl_en
= 0;
5687 aq_req
.prof_mask
.hl_en
= 1;
5689 rc
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
5690 (struct nix_aq_enq_req
*)&aq_req
, NULL
);
5693 "%s: Failed to INIT context of mid layer profile %d\n",
5694 __func__
, mid_prof
);
5698 /* Map both leaf profiles to this mid layer profile */
5699 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5701 leaf_prof
, mid_prof
);
5704 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5705 __func__
, leaf_prof
, mid_prof
);
5709 mutex_lock(&rvu
->rsrc_lock
);
5710 ipolicer
->ref_count
[mid_prof
]++;
5711 mutex_unlock(&rvu
->rsrc_lock
);
5713 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5715 leaf_match
, mid_prof
);
5718 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5719 __func__
, leaf_match
, mid_prof
);
5720 ipolicer
->ref_count
[mid_prof
]--;
5724 mutex_lock(&rvu
->rsrc_lock
);
5725 ipolicer
->ref_count
[mid_prof
]++;
5726 mutex_unlock(&rvu
->rsrc_lock
);
5732 /* Called with mutex rsrc_lock */
5733 static void nix_clear_ratelimit_aggr(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
5736 struct nix_cn10k_aq_enq_req aq_req
;
5737 struct nix_cn10k_aq_enq_rsp aq_rsp
;
5738 struct nix_ipolicer
*ipolicer
;
5742 mutex_unlock(&rvu
->rsrc_lock
);
5744 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5745 NIX_AQ_CTYPE_BANDPROF
, leaf_prof
);
5747 mutex_lock(&rvu
->rsrc_lock
);
5750 "%s: Failed to fetch context of leaf profile %d\n",
5751 __func__
, leaf_prof
);
5755 if (!aq_rsp
.prof
.hl_en
)
5758 mid_prof
= aq_rsp
.prof
.band_prof_id
;
5759 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_MID_LAYER
];
5760 ipolicer
->ref_count
[mid_prof
]--;
5761 /* If ref_count is zero, free mid layer profile */
5762 if (!ipolicer
->ref_count
[mid_prof
]) {
5763 ipolicer
->pfvf_map
[mid_prof
] = 0x00;
5764 rvu_free_rsrc(&ipolicer
->band_prof
, mid_prof
);
5768 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu
*rvu
, struct msg_req
*req
,
5769 struct nix_bandprof_get_hwinfo_rsp
*rsp
)
5771 struct nix_ipolicer
*ipolicer
;
5772 int blkaddr
, layer
, err
;
5773 struct nix_hw
*nix_hw
;
5776 if (!rvu
->hw
->cap
.ipolicer
)
5777 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
5779 err
= nix_get_struct_ptrs(rvu
, req
->hdr
.pcifunc
, &nix_hw
, &blkaddr
);
5783 /* Return number of bandwidth profiles free at each layer */
5784 mutex_lock(&rvu
->rsrc_lock
);
5785 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5786 if (layer
== BAND_PROF_INVAL_LAYER
)
5789 ipolicer
= &nix_hw
->ipolicer
[layer
];
5790 rsp
->prof_count
[layer
] = rvu_rsrc_free_count(&ipolicer
->band_prof
);
5792 mutex_unlock(&rvu
->rsrc_lock
);
5794 /* Set the policer timeunit in nanosec */
5795 tu
= rvu_read64(rvu
, blkaddr
, NIX_AF_PL_TS
) & GENMASK_ULL(9, 0);
5796 rsp
->policer_timeunit
= (tu
+ 1) * 100;