]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
KVM: clean up directives to compile out irqfds
[thirdparty/kernel/stable.git] / drivers / net / ethernet / marvell / octeontx2 / nic / otx2_flows.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <net/ipv6.h>
9 #include <linux/sort.h>
10
11 #include "otx2_common.h"
12
13 #define OTX2_DEFAULT_ACTION 0x1
14
15 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
16
17 struct otx2_flow {
18 struct ethtool_rx_flow_spec flow_spec;
19 struct list_head list;
20 u32 location;
21 u32 entry;
22 bool is_vf;
23 u8 rss_ctx_id;
24 #define DMAC_FILTER_RULE BIT(0)
25 #define PFC_FLOWCTRL_RULE BIT(1)
26 u16 rule_type;
27 int vf;
28 };
29
30 enum dmac_req {
31 DMAC_ADDR_UPDATE,
32 DMAC_ADDR_DEL
33 };
34
35 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
36 {
37 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
38 flow_cfg->flow_ent = NULL;
39 flow_cfg->max_flows = 0;
40 }
41
42 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
43 {
44 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
45 struct npc_mcam_free_entry_req *req;
46 int ent, err;
47
48 if (!flow_cfg->max_flows)
49 return 0;
50
51 mutex_lock(&pfvf->mbox.lock);
52 for (ent = 0; ent < flow_cfg->max_flows; ent++) {
53 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
54 if (!req)
55 break;
56
57 req->entry = flow_cfg->flow_ent[ent];
58
59 /* Send message to AF to free MCAM entries */
60 err = otx2_sync_mbox_msg(&pfvf->mbox);
61 if (err)
62 break;
63 }
64 mutex_unlock(&pfvf->mbox.lock);
65 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
66 return 0;
67 }
68
69 static int mcam_entry_cmp(const void *a, const void *b)
70 {
71 return *(u16 *)a - *(u16 *)b;
72 }
73
74 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
75 {
76 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
77 struct npc_mcam_alloc_entry_req *req;
78 struct npc_mcam_alloc_entry_rsp *rsp;
79 int ent, allocated = 0;
80
81 /* Free current ones and allocate new ones with requested count */
82 otx2_free_ntuple_mcam_entries(pfvf);
83
84 if (!count)
85 return 0;
86
87 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
88 sizeof(u16), GFP_KERNEL);
89 if (!flow_cfg->flow_ent) {
90 netdev_err(pfvf->netdev,
91 "%s: Unable to allocate memory for flow entries\n",
92 __func__);
93 return -ENOMEM;
94 }
95
96 mutex_lock(&pfvf->mbox.lock);
97
98 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
99 * can only be allocated.
100 */
101 while (allocated < count) {
102 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
103 if (!req)
104 goto exit;
105
106 req->contig = false;
107 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
108 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
109
110 /* Allocate higher priority entries for PFs, so that VF's entries
111 * will be on top of PF.
112 */
113 if (!is_otx2_vf(pfvf->pcifunc)) {
114 req->priority = NPC_MCAM_HIGHER_PRIO;
115 req->ref_entry = flow_cfg->def_ent[0];
116 }
117
118 /* Send message to AF */
119 if (otx2_sync_mbox_msg(&pfvf->mbox))
120 goto exit;
121
122 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
123 (&pfvf->mbox.mbox, 0, &req->hdr);
124
125 for (ent = 0; ent < rsp->count; ent++)
126 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
127
128 allocated += rsp->count;
129
130 /* If this request is not fulfilled, no need to send
131 * further requests.
132 */
133 if (rsp->count != req->count)
134 break;
135 }
136
137 /* Multiple MCAM entry alloc requests could result in non-sequential
138 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
139 * otherwise user installed ntuple filter index and MCAM entry index will
140 * not be in sync.
141 */
142 if (allocated)
143 sort(&flow_cfg->flow_ent[0], allocated,
144 sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
145
146 exit:
147 mutex_unlock(&pfvf->mbox.lock);
148
149 flow_cfg->max_flows = allocated;
150
151 if (allocated) {
152 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
153 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
154 }
155
156 if (allocated != count)
157 netdev_info(pfvf->netdev,
158 "Unable to allocate %d MCAM entries, got only %d\n",
159 count, allocated);
160 return allocated;
161 }
162 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
163
164 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
165 {
166 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
167 struct npc_get_field_status_req *freq;
168 struct npc_get_field_status_rsp *frsp;
169 struct npc_mcam_alloc_entry_req *req;
170 struct npc_mcam_alloc_entry_rsp *rsp;
171 int vf_vlan_max_flows;
172 int ent, count;
173
174 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
175 count = OTX2_MAX_UNICAST_FLOWS +
176 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
177
178 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
179 sizeof(u16), GFP_KERNEL);
180 if (!flow_cfg->def_ent)
181 return -ENOMEM;
182
183 mutex_lock(&pfvf->mbox.lock);
184
185 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
186 if (!req) {
187 mutex_unlock(&pfvf->mbox.lock);
188 return -ENOMEM;
189 }
190
191 req->contig = false;
192 req->count = count;
193
194 /* Send message to AF */
195 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
196 mutex_unlock(&pfvf->mbox.lock);
197 return -EINVAL;
198 }
199
200 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
201 (&pfvf->mbox.mbox, 0, &req->hdr);
202
203 if (rsp->count != req->count) {
204 netdev_info(pfvf->netdev,
205 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
206 mutex_unlock(&pfvf->mbox.lock);
207 devm_kfree(pfvf->dev, flow_cfg->def_ent);
208 return 0;
209 }
210
211 for (ent = 0; ent < rsp->count; ent++)
212 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
213
214 flow_cfg->vf_vlan_offset = 0;
215 flow_cfg->unicast_offset = vf_vlan_max_flows;
216 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
217 OTX2_MAX_UNICAST_FLOWS;
218 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
219
220 /* Check if NPC_DMAC field is supported
221 * by the mkex profile before setting VLAN support flag.
222 */
223 freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
224 if (!freq) {
225 mutex_unlock(&pfvf->mbox.lock);
226 return -ENOMEM;
227 }
228
229 freq->field = NPC_DMAC;
230 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
231 mutex_unlock(&pfvf->mbox.lock);
232 return -EINVAL;
233 }
234
235 frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
236 (&pfvf->mbox.mbox, 0, &freq->hdr);
237
238 if (frsp->enable) {
239 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
240 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
241 }
242
243 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
244 mutex_unlock(&pfvf->mbox.lock);
245
246 /* Allocate entries for Ntuple filters */
247 count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
248 if (count <= 0) {
249 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
250 return 0;
251 }
252
253 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
254
255 return 0;
256 }
257
258 /* TODO : revisit on size */
259 #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
260
261 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
262 {
263 struct otx2_flow_config *flow_cfg;
264
265 pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
266 sizeof(struct otx2_flow_config),
267 GFP_KERNEL);
268 if (!pfvf->flow_cfg)
269 return -ENOMEM;
270
271 pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
272 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
273 sizeof(long), GFP_KERNEL);
274 if (!pfvf->flow_cfg->dmacflt_bmap)
275 return -ENOMEM;
276
277 flow_cfg = pfvf->flow_cfg;
278 INIT_LIST_HEAD(&flow_cfg->flow_list);
279 INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
280 flow_cfg->max_flows = 0;
281
282 return 0;
283 }
284 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
285
286 int otx2_mcam_flow_init(struct otx2_nic *pf)
287 {
288 int err;
289
290 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
291 GFP_KERNEL);
292 if (!pf->flow_cfg)
293 return -ENOMEM;
294
295 pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
296 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
297 sizeof(long), GFP_KERNEL);
298 if (!pf->flow_cfg->dmacflt_bmap)
299 return -ENOMEM;
300
301 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
302 INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
303
304 /* Allocate bare minimum number of MCAM entries needed for
305 * unicast and ntuple filters.
306 */
307 err = otx2_mcam_entry_init(pf);
308 if (err)
309 return err;
310
311 /* Check if MCAM entries are allocate or not */
312 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
313 return 0;
314
315 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
316 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
317 if (!pf->mac_table)
318 return -ENOMEM;
319
320 otx2_dmacflt_get_max_cnt(pf);
321
322 /* DMAC filters are not allocated */
323 if (!pf->flow_cfg->dmacflt_max_flows)
324 return 0;
325
326 pf->flow_cfg->bmap_to_dmacindex =
327 devm_kzalloc(pf->dev, sizeof(u32) *
328 pf->flow_cfg->dmacflt_max_flows,
329 GFP_KERNEL);
330
331 if (!pf->flow_cfg->bmap_to_dmacindex)
332 return -ENOMEM;
333
334 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
335
336 return 0;
337 }
338
339 void otx2_mcam_flow_del(struct otx2_nic *pf)
340 {
341 otx2_destroy_mcam_flows(pf);
342 }
343 EXPORT_SYMBOL(otx2_mcam_flow_del);
344
345 /* On success adds mcam entry
346 * On failure enable promisous mode
347 */
348 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
349 {
350 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
351 struct npc_install_flow_req *req;
352 int err, i;
353
354 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
355 return -ENOMEM;
356
357 /* dont have free mcam entries or uc list is greater than alloted */
358 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
359 return -ENOMEM;
360
361 mutex_lock(&pf->mbox.lock);
362 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
363 if (!req) {
364 mutex_unlock(&pf->mbox.lock);
365 return -ENOMEM;
366 }
367
368 /* unicast offset starts with 32 0..31 for ntuple */
369 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
370 if (pf->mac_table[i].inuse)
371 continue;
372 ether_addr_copy(pf->mac_table[i].addr, mac);
373 pf->mac_table[i].inuse = true;
374 pf->mac_table[i].mcam_entry =
375 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
376 req->entry = pf->mac_table[i].mcam_entry;
377 break;
378 }
379
380 ether_addr_copy(req->packet.dmac, mac);
381 eth_broadcast_addr((u8 *)&req->mask.dmac);
382 req->features = BIT_ULL(NPC_DMAC);
383 req->channel = pf->hw.rx_chan_base;
384 req->intf = NIX_INTF_RX;
385 req->op = NIX_RX_ACTION_DEFAULT;
386 req->set_cntr = 1;
387
388 err = otx2_sync_mbox_msg(&pf->mbox);
389 mutex_unlock(&pf->mbox.lock);
390
391 return err;
392 }
393
394 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
395 {
396 struct otx2_nic *pf = netdev_priv(netdev);
397
398 if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
399 pf->flow_cfg->dmacflt_max_flows))
400 netdev_warn(netdev,
401 "Add %pM to CGX/RPM DMAC filters list as well\n",
402 mac);
403
404 return otx2_do_add_macfilter(pf, mac);
405 }
406
407 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
408 int *mcam_entry)
409 {
410 int i;
411
412 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
413 if (!pf->mac_table[i].inuse)
414 continue;
415
416 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
417 *mcam_entry = pf->mac_table[i].mcam_entry;
418 pf->mac_table[i].inuse = false;
419 return true;
420 }
421 }
422 return false;
423 }
424
425 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
426 {
427 struct otx2_nic *pf = netdev_priv(netdev);
428 struct npc_delete_flow_req *req;
429 int err, mcam_entry;
430
431 /* check does mcam entry exists for given mac */
432 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
433 return 0;
434
435 mutex_lock(&pf->mbox.lock);
436 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
437 if (!req) {
438 mutex_unlock(&pf->mbox.lock);
439 return -ENOMEM;
440 }
441 req->entry = mcam_entry;
442 /* Send message to AF */
443 err = otx2_sync_mbox_msg(&pf->mbox);
444 mutex_unlock(&pf->mbox.lock);
445
446 return err;
447 }
448
449 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
450 {
451 struct otx2_flow *iter;
452
453 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
454 if (iter->location == location)
455 return iter;
456 }
457
458 return NULL;
459 }
460
461 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
462 {
463 struct list_head *head = &pfvf->flow_cfg->flow_list;
464 struct otx2_flow *iter;
465
466 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
467 if (iter->location > flow->location)
468 break;
469 head = &iter->list;
470 }
471
472 list_add(&flow->list, head);
473 }
474
475 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
476 {
477 if (!flow_cfg)
478 return 0;
479
480 if (flow_cfg->nr_flows == flow_cfg->max_flows ||
481 !bitmap_empty(flow_cfg->dmacflt_bmap,
482 flow_cfg->dmacflt_max_flows))
483 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
484 else
485 return flow_cfg->max_flows;
486 }
487 EXPORT_SYMBOL(otx2_get_maxflows);
488
489 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
490 u32 location)
491 {
492 struct otx2_flow *iter;
493
494 if (location >= otx2_get_maxflows(pfvf->flow_cfg))
495 return -EINVAL;
496
497 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
498 if (iter->location == location) {
499 nfc->fs = iter->flow_spec;
500 nfc->rss_context = iter->rss_ctx_id;
501 return 0;
502 }
503 }
504
505 return -ENOENT;
506 }
507
508 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
509 u32 *rule_locs)
510 {
511 u32 rule_cnt = nfc->rule_cnt;
512 u32 location = 0;
513 int idx = 0;
514 int err = 0;
515
516 nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
517 while ((!err || err == -ENOENT) && idx < rule_cnt) {
518 err = otx2_get_flow(pfvf, nfc, location);
519 if (!err)
520 rule_locs[idx++] = location;
521 location++;
522 }
523 nfc->rule_cnt = rule_cnt;
524
525 return err;
526 }
527
528 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
529 struct npc_install_flow_req *req,
530 u32 flow_type)
531 {
532 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
533 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
534 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
535 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
536 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
537 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
538 struct flow_msg *pmask = &req->mask;
539 struct flow_msg *pkt = &req->packet;
540
541 switch (flow_type) {
542 case IP_USER_FLOW:
543 if (ipv4_usr_mask->ip4src) {
544 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
545 sizeof(pkt->ip4src));
546 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
547 sizeof(pmask->ip4src));
548 req->features |= BIT_ULL(NPC_SIP_IPV4);
549 }
550 if (ipv4_usr_mask->ip4dst) {
551 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
552 sizeof(pkt->ip4dst));
553 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
554 sizeof(pmask->ip4dst));
555 req->features |= BIT_ULL(NPC_DIP_IPV4);
556 }
557 if (ipv4_usr_mask->tos) {
558 pkt->tos = ipv4_usr_hdr->tos;
559 pmask->tos = ipv4_usr_mask->tos;
560 req->features |= BIT_ULL(NPC_TOS);
561 }
562 if (ipv4_usr_mask->proto) {
563 switch (ipv4_usr_hdr->proto) {
564 case IPPROTO_ICMP:
565 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
566 break;
567 case IPPROTO_TCP:
568 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
569 break;
570 case IPPROTO_UDP:
571 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
572 break;
573 case IPPROTO_SCTP:
574 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
575 break;
576 case IPPROTO_AH:
577 req->features |= BIT_ULL(NPC_IPPROTO_AH);
578 break;
579 case IPPROTO_ESP:
580 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
581 break;
582 default:
583 return -EOPNOTSUPP;
584 }
585 }
586 pkt->etype = cpu_to_be16(ETH_P_IP);
587 pmask->etype = cpu_to_be16(0xFFFF);
588 req->features |= BIT_ULL(NPC_ETYPE);
589 break;
590 case TCP_V4_FLOW:
591 case UDP_V4_FLOW:
592 case SCTP_V4_FLOW:
593 pkt->etype = cpu_to_be16(ETH_P_IP);
594 pmask->etype = cpu_to_be16(0xFFFF);
595 req->features |= BIT_ULL(NPC_ETYPE);
596 if (ipv4_l4_mask->ip4src) {
597 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
598 sizeof(pkt->ip4src));
599 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
600 sizeof(pmask->ip4src));
601 req->features |= BIT_ULL(NPC_SIP_IPV4);
602 }
603 if (ipv4_l4_mask->ip4dst) {
604 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
605 sizeof(pkt->ip4dst));
606 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
607 sizeof(pmask->ip4dst));
608 req->features |= BIT_ULL(NPC_DIP_IPV4);
609 }
610 if (ipv4_l4_mask->tos) {
611 pkt->tos = ipv4_l4_hdr->tos;
612 pmask->tos = ipv4_l4_mask->tos;
613 req->features |= BIT_ULL(NPC_TOS);
614 }
615 if (ipv4_l4_mask->psrc) {
616 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
617 sizeof(pkt->sport));
618 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
619 sizeof(pmask->sport));
620 if (flow_type == UDP_V4_FLOW)
621 req->features |= BIT_ULL(NPC_SPORT_UDP);
622 else if (flow_type == TCP_V4_FLOW)
623 req->features |= BIT_ULL(NPC_SPORT_TCP);
624 else
625 req->features |= BIT_ULL(NPC_SPORT_SCTP);
626 }
627 if (ipv4_l4_mask->pdst) {
628 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
629 sizeof(pkt->dport));
630 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
631 sizeof(pmask->dport));
632 if (flow_type == UDP_V4_FLOW)
633 req->features |= BIT_ULL(NPC_DPORT_UDP);
634 else if (flow_type == TCP_V4_FLOW)
635 req->features |= BIT_ULL(NPC_DPORT_TCP);
636 else
637 req->features |= BIT_ULL(NPC_DPORT_SCTP);
638 }
639 if (flow_type == UDP_V4_FLOW)
640 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
641 else if (flow_type == TCP_V4_FLOW)
642 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
643 else
644 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
645 break;
646 case AH_V4_FLOW:
647 case ESP_V4_FLOW:
648 pkt->etype = cpu_to_be16(ETH_P_IP);
649 pmask->etype = cpu_to_be16(0xFFFF);
650 req->features |= BIT_ULL(NPC_ETYPE);
651 if (ah_esp_mask->ip4src) {
652 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
653 sizeof(pkt->ip4src));
654 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
655 sizeof(pmask->ip4src));
656 req->features |= BIT_ULL(NPC_SIP_IPV4);
657 }
658 if (ah_esp_mask->ip4dst) {
659 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
660 sizeof(pkt->ip4dst));
661 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
662 sizeof(pmask->ip4dst));
663 req->features |= BIT_ULL(NPC_DIP_IPV4);
664 }
665 if (ah_esp_mask->tos) {
666 pkt->tos = ah_esp_hdr->tos;
667 pmask->tos = ah_esp_mask->tos;
668 req->features |= BIT_ULL(NPC_TOS);
669 }
670
671 /* NPC profile doesn't extract AH/ESP header fields */
672 if (ah_esp_mask->spi & ah_esp_hdr->spi)
673 return -EOPNOTSUPP;
674
675 if (flow_type == AH_V4_FLOW)
676 req->features |= BIT_ULL(NPC_IPPROTO_AH);
677 else
678 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
679 break;
680 default:
681 break;
682 }
683
684 return 0;
685 }
686
687 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
688 struct npc_install_flow_req *req,
689 u32 flow_type)
690 {
691 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
692 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
693 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
694 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
695 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
696 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
697 struct flow_msg *pmask = &req->mask;
698 struct flow_msg *pkt = &req->packet;
699
700 switch (flow_type) {
701 case IPV6_USER_FLOW:
702 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
703 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
704 sizeof(pkt->ip6src));
705 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
706 sizeof(pmask->ip6src));
707 req->features |= BIT_ULL(NPC_SIP_IPV6);
708 }
709 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
710 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
711 sizeof(pkt->ip6dst));
712 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
713 sizeof(pmask->ip6dst));
714 req->features |= BIT_ULL(NPC_DIP_IPV6);
715 }
716 if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
717 pkt->next_header = ipv6_usr_hdr->l4_proto;
718 pmask->next_header = ipv6_usr_mask->l4_proto;
719 req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
720 }
721 pkt->etype = cpu_to_be16(ETH_P_IPV6);
722 pmask->etype = cpu_to_be16(0xFFFF);
723 req->features |= BIT_ULL(NPC_ETYPE);
724 break;
725 case TCP_V6_FLOW:
726 case UDP_V6_FLOW:
727 case SCTP_V6_FLOW:
728 pkt->etype = cpu_to_be16(ETH_P_IPV6);
729 pmask->etype = cpu_to_be16(0xFFFF);
730 req->features |= BIT_ULL(NPC_ETYPE);
731 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
732 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
733 sizeof(pkt->ip6src));
734 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
735 sizeof(pmask->ip6src));
736 req->features |= BIT_ULL(NPC_SIP_IPV6);
737 }
738 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
739 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
740 sizeof(pkt->ip6dst));
741 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
742 sizeof(pmask->ip6dst));
743 req->features |= BIT_ULL(NPC_DIP_IPV6);
744 }
745 if (ipv6_l4_mask->psrc) {
746 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
747 sizeof(pkt->sport));
748 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
749 sizeof(pmask->sport));
750 if (flow_type == UDP_V6_FLOW)
751 req->features |= BIT_ULL(NPC_SPORT_UDP);
752 else if (flow_type == TCP_V6_FLOW)
753 req->features |= BIT_ULL(NPC_SPORT_TCP);
754 else
755 req->features |= BIT_ULL(NPC_SPORT_SCTP);
756 }
757 if (ipv6_l4_mask->pdst) {
758 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
759 sizeof(pkt->dport));
760 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
761 sizeof(pmask->dport));
762 if (flow_type == UDP_V6_FLOW)
763 req->features |= BIT_ULL(NPC_DPORT_UDP);
764 else if (flow_type == TCP_V6_FLOW)
765 req->features |= BIT_ULL(NPC_DPORT_TCP);
766 else
767 req->features |= BIT_ULL(NPC_DPORT_SCTP);
768 }
769 if (flow_type == UDP_V6_FLOW)
770 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
771 else if (flow_type == TCP_V6_FLOW)
772 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
773 else
774 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
775 break;
776 case AH_V6_FLOW:
777 case ESP_V6_FLOW:
778 pkt->etype = cpu_to_be16(ETH_P_IPV6);
779 pmask->etype = cpu_to_be16(0xFFFF);
780 req->features |= BIT_ULL(NPC_ETYPE);
781 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
782 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
783 sizeof(pkt->ip6src));
784 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
785 sizeof(pmask->ip6src));
786 req->features |= BIT_ULL(NPC_SIP_IPV6);
787 }
788 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
789 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
790 sizeof(pkt->ip6dst));
791 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
792 sizeof(pmask->ip6dst));
793 req->features |= BIT_ULL(NPC_DIP_IPV6);
794 }
795
796 /* NPC profile doesn't extract AH/ESP header fields */
797 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
798 (ah_esp_mask->tclass & ah_esp_hdr->tclass))
799 return -EOPNOTSUPP;
800
801 if (flow_type == AH_V6_FLOW)
802 req->features |= BIT_ULL(NPC_IPPROTO_AH);
803 else
804 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
805 break;
806 default:
807 break;
808 }
809
810 return 0;
811 }
812
813 static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
814 struct npc_install_flow_req *req)
815 {
816 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
817 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
818 struct flow_msg *pmask = &req->mask;
819 struct flow_msg *pkt = &req->packet;
820 u32 flow_type;
821 int ret;
822
823 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
824 switch (flow_type) {
825 /* bits not set in mask are don't care */
826 case ETHER_FLOW:
827 if (!is_zero_ether_addr(eth_mask->h_source)) {
828 ether_addr_copy(pkt->smac, eth_hdr->h_source);
829 ether_addr_copy(pmask->smac, eth_mask->h_source);
830 req->features |= BIT_ULL(NPC_SMAC);
831 }
832 if (!is_zero_ether_addr(eth_mask->h_dest)) {
833 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
834 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
835 req->features |= BIT_ULL(NPC_DMAC);
836 }
837 if (eth_hdr->h_proto) {
838 memcpy(&pkt->etype, &eth_hdr->h_proto,
839 sizeof(pkt->etype));
840 memcpy(&pmask->etype, &eth_mask->h_proto,
841 sizeof(pmask->etype));
842 req->features |= BIT_ULL(NPC_ETYPE);
843 }
844 break;
845 case IP_USER_FLOW:
846 case TCP_V4_FLOW:
847 case UDP_V4_FLOW:
848 case SCTP_V4_FLOW:
849 case AH_V4_FLOW:
850 case ESP_V4_FLOW:
851 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
852 if (ret)
853 return ret;
854 break;
855 case IPV6_USER_FLOW:
856 case TCP_V6_FLOW:
857 case UDP_V6_FLOW:
858 case SCTP_V6_FLOW:
859 case AH_V6_FLOW:
860 case ESP_V6_FLOW:
861 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
862 if (ret)
863 return ret;
864 break;
865 default:
866 return -EOPNOTSUPP;
867 }
868 if (fsp->flow_type & FLOW_EXT) {
869 u16 vlan_etype;
870
871 if (fsp->m_ext.vlan_etype) {
872 /* Partial masks not supported */
873 if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
874 return -EINVAL;
875
876 vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
877
878 /* Drop rule with vlan_etype == 802.1Q
879 * and vlan_id == 0 is not supported
880 */
881 if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
882 fsp->ring_cookie == RX_CLS_FLOW_DISC)
883 return -EINVAL;
884
885 /* Only ETH_P_8021Q and ETH_P_802AD types supported */
886 if (vlan_etype != ETH_P_8021Q &&
887 vlan_etype != ETH_P_8021AD)
888 return -EINVAL;
889
890 memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
891 sizeof(pkt->vlan_etype));
892 memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
893 sizeof(pmask->vlan_etype));
894
895 if (vlan_etype == ETH_P_8021Q)
896 req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
897 else
898 req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
899 }
900
901 if (fsp->m_ext.vlan_tci) {
902 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
903 sizeof(pkt->vlan_tci));
904 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
905 sizeof(pmask->vlan_tci));
906 req->features |= BIT_ULL(NPC_OUTER_VID);
907 }
908
909 if (fsp->m_ext.data[1]) {
910 if (flow_type == IP_USER_FLOW) {
911 if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
912 return -EINVAL;
913
914 pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
915 pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
916 req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
917 } else if (fsp->h_ext.data[1] ==
918 cpu_to_be32(OTX2_DEFAULT_ACTION)) {
919 /* Not Drop/Direct to queue but use action
920 * in default entry
921 */
922 req->op = NIX_RX_ACTION_DEFAULT;
923 }
924 }
925 }
926
927 if (fsp->flow_type & FLOW_MAC_EXT &&
928 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
929 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
930 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
931 req->features |= BIT_ULL(NPC_DMAC);
932 }
933
934 if (!req->features)
935 return -EOPNOTSUPP;
936
937 return 0;
938 }
939
940 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
941 struct ethtool_rx_flow_spec *fsp)
942 {
943 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
944 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
945 u64 ring_cookie = fsp->ring_cookie;
946 u32 flow_type;
947
948 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
949 return false;
950
951 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
952
953 /* CGX/RPM block dmac filtering configured for white listing
954 * check for action other than DROP
955 */
956 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
957 !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
958 if (is_zero_ether_addr(eth_mask->h_dest) &&
959 is_valid_ether_addr(eth_hdr->h_dest))
960 return true;
961 }
962
963 return false;
964 }
965
966 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
967 {
968 u64 ring_cookie = flow->flow_spec.ring_cookie;
969 #ifdef CONFIG_DCB
970 int vlan_prio, qidx, pfc_rule = 0;
971 #endif
972 struct npc_install_flow_req *req;
973 int err, vf = 0;
974
975 mutex_lock(&pfvf->mbox.lock);
976 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
977 if (!req) {
978 mutex_unlock(&pfvf->mbox.lock);
979 return -ENOMEM;
980 }
981
982 err = otx2_prepare_flow_request(&flow->flow_spec, req);
983 if (err) {
984 /* free the allocated msg above */
985 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
986 mutex_unlock(&pfvf->mbox.lock);
987 return err;
988 }
989
990 req->entry = flow->entry;
991 req->intf = NIX_INTF_RX;
992 req->set_cntr = 1;
993 req->channel = pfvf->hw.rx_chan_base;
994 if (ring_cookie == RX_CLS_FLOW_DISC) {
995 req->op = NIX_RX_ACTIONOP_DROP;
996 } else {
997 /* change to unicast only if action of default entry is not
998 * requested by user
999 */
1000 if (flow->flow_spec.flow_type & FLOW_RSS) {
1001 req->op = NIX_RX_ACTIONOP_RSS;
1002 req->index = flow->rss_ctx_id;
1003 req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
1004 } else {
1005 req->op = NIX_RX_ACTIONOP_UCAST;
1006 req->index = ethtool_get_flow_spec_ring(ring_cookie);
1007 }
1008 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
1009 if (vf > pci_num_vf(pfvf->pdev)) {
1010 mutex_unlock(&pfvf->mbox.lock);
1011 return -EINVAL;
1012 }
1013
1014 #ifdef CONFIG_DCB
1015 /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
1016 if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
1017 pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
1018 vlan_prio = ntohs(req->packet.vlan_tci) &
1019 ntohs(req->mask.vlan_tci);
1020
1021 /* Get the priority */
1022 vlan_prio >>= 13;
1023 flow->rule_type |= PFC_FLOWCTRL_RULE;
1024 /* Check if PFC enabled for this priority */
1025 if (pfvf->pfc_en & BIT(vlan_prio)) {
1026 pfc_rule = true;
1027 qidx = req->index;
1028 }
1029 }
1030 #endif
1031 }
1032
1033 /* ethtool ring_cookie has (VF + 1) for VF */
1034 if (vf) {
1035 req->vf = vf;
1036 flow->is_vf = true;
1037 flow->vf = vf;
1038 }
1039
1040 /* Send message to AF */
1041 err = otx2_sync_mbox_msg(&pfvf->mbox);
1042
1043 #ifdef CONFIG_DCB
1044 if (!err && pfc_rule)
1045 otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
1046 #endif
1047
1048 mutex_unlock(&pfvf->mbox.lock);
1049 return err;
1050 }
1051
1052 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
1053 struct otx2_flow *flow)
1054 {
1055 struct otx2_flow *pf_mac;
1056 struct ethhdr *eth_hdr;
1057
1058 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
1059 if (!pf_mac)
1060 return -ENOMEM;
1061
1062 pf_mac->entry = 0;
1063 pf_mac->rule_type |= DMAC_FILTER_RULE;
1064 pf_mac->location = pfvf->flow_cfg->max_flows;
1065 memcpy(&pf_mac->flow_spec, &flow->flow_spec,
1066 sizeof(struct ethtool_rx_flow_spec));
1067 pf_mac->flow_spec.location = pf_mac->location;
1068
1069 /* Copy PF mac address */
1070 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
1071 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
1072
1073 /* Install DMAC filter with PF mac address */
1074 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
1075
1076 otx2_add_flow_to_list(pfvf, pf_mac);
1077 pfvf->flow_cfg->nr_flows++;
1078 set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1079
1080 return 0;
1081 }
1082
1083 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
1084 {
1085 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1086 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
1087 struct otx2_flow *flow;
1088 struct ethhdr *eth_hdr;
1089 bool new = false;
1090 int err = 0;
1091 u32 ring;
1092
1093 if (!flow_cfg->max_flows) {
1094 netdev_err(pfvf->netdev,
1095 "Ntuple rule count is 0, allocate and retry\n");
1096 return -EINVAL;
1097 }
1098
1099 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1100 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1101 return -ENOMEM;
1102
1103 if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
1104 return -EINVAL;
1105
1106 if (fsp->location >= otx2_get_maxflows(flow_cfg))
1107 return -EINVAL;
1108
1109 flow = otx2_find_flow(pfvf, fsp->location);
1110 if (!flow) {
1111 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1112 if (!flow)
1113 return -ENOMEM;
1114 flow->location = fsp->location;
1115 flow->entry = flow_cfg->flow_ent[flow->location];
1116 new = true;
1117 }
1118 /* struct copy */
1119 flow->flow_spec = *fsp;
1120
1121 if (fsp->flow_type & FLOW_RSS)
1122 flow->rss_ctx_id = nfc->rss_context;
1123
1124 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1125 eth_hdr = &flow->flow_spec.h_u.ether_spec;
1126
1127 /* Sync dmac filter table with updated fields */
1128 if (flow->rule_type & DMAC_FILTER_RULE)
1129 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1130 flow->entry);
1131
1132 if (bitmap_full(flow_cfg->dmacflt_bmap,
1133 flow_cfg->dmacflt_max_flows)) {
1134 netdev_warn(pfvf->netdev,
1135 "Can't insert the rule %d as max allowed dmac filters are %d\n",
1136 flow->location +
1137 flow_cfg->dmacflt_max_flows,
1138 flow_cfg->dmacflt_max_flows);
1139 err = -EINVAL;
1140 if (new)
1141 kfree(flow);
1142 return err;
1143 }
1144
1145 /* Install PF mac address to DMAC filter list */
1146 if (!test_bit(0, flow_cfg->dmacflt_bmap))
1147 otx2_add_flow_with_pfmac(pfvf, flow);
1148
1149 flow->rule_type |= DMAC_FILTER_RULE;
1150 flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
1151 flow_cfg->dmacflt_max_flows);
1152 fsp->location = flow_cfg->max_flows + flow->entry;
1153 flow->flow_spec.location = fsp->location;
1154 flow->location = fsp->location;
1155
1156 set_bit(flow->entry, flow_cfg->dmacflt_bmap);
1157 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1158
1159 } else {
1160 if (flow->location >= pfvf->flow_cfg->max_flows) {
1161 netdev_warn(pfvf->netdev,
1162 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1163 flow->location,
1164 flow_cfg->max_flows - 1);
1165 err = -EINVAL;
1166 } else {
1167 err = otx2_add_flow_msg(pfvf, flow);
1168 }
1169 }
1170
1171 if (err) {
1172 if (err == MBOX_MSG_INVALID)
1173 err = -EINVAL;
1174 if (new)
1175 kfree(flow);
1176 return err;
1177 }
1178
1179 /* add the new flow installed to list */
1180 if (new) {
1181 otx2_add_flow_to_list(pfvf, flow);
1182 flow_cfg->nr_flows++;
1183 }
1184
1185 return 0;
1186 }
1187
1188 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1189 {
1190 struct npc_delete_flow_req *req;
1191 int err;
1192
1193 mutex_lock(&pfvf->mbox.lock);
1194 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1195 if (!req) {
1196 mutex_unlock(&pfvf->mbox.lock);
1197 return -ENOMEM;
1198 }
1199
1200 req->entry = entry;
1201 if (all)
1202 req->all = 1;
1203
1204 /* Send message to AF */
1205 err = otx2_sync_mbox_msg(&pfvf->mbox);
1206 mutex_unlock(&pfvf->mbox.lock);
1207 return err;
1208 }
1209
1210 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1211 {
1212 struct otx2_flow *iter;
1213 struct ethhdr *eth_hdr;
1214 bool found = false;
1215
1216 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1217 if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
1218 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1219 if (req == DMAC_ADDR_DEL) {
1220 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1221 0);
1222 clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1223 found = true;
1224 } else {
1225 ether_addr_copy(eth_hdr->h_dest,
1226 pfvf->netdev->dev_addr);
1227
1228 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1229 }
1230 break;
1231 }
1232 }
1233
1234 if (found) {
1235 list_del(&iter->list);
1236 kfree(iter);
1237 pfvf->flow_cfg->nr_flows--;
1238 }
1239 }
1240
1241 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1242 {
1243 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1244 struct otx2_flow *flow;
1245 int err;
1246
1247 if (location >= otx2_get_maxflows(flow_cfg))
1248 return -EINVAL;
1249
1250 flow = otx2_find_flow(pfvf, location);
1251 if (!flow)
1252 return -ENOENT;
1253
1254 if (flow->rule_type & DMAC_FILTER_RULE) {
1255 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1256
1257 /* user not allowed to remove dmac filter with interface mac */
1258 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1259 return -EPERM;
1260
1261 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1262 flow->entry);
1263 clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
1264 /* If all dmac filters are removed delete macfilter with
1265 * interface mac address and configure CGX/RPM block in
1266 * promiscuous mode
1267 */
1268 if (bitmap_weight(flow_cfg->dmacflt_bmap,
1269 flow_cfg->dmacflt_max_flows) == 1)
1270 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1271 } else {
1272 #ifdef CONFIG_DCB
1273 if (flow->rule_type & PFC_FLOWCTRL_RULE)
1274 otx2_update_bpid_in_rqctx(pfvf, 0,
1275 flow->flow_spec.ring_cookie,
1276 false);
1277 #endif
1278
1279 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1280 }
1281
1282 if (err)
1283 return err;
1284
1285 list_del(&flow->list);
1286 kfree(flow);
1287 flow_cfg->nr_flows--;
1288
1289 return 0;
1290 }
1291
1292 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1293 {
1294 struct otx2_flow *flow, *tmp;
1295 int err;
1296
1297 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1298 if (flow->rss_ctx_id != ctx_id)
1299 continue;
1300 err = otx2_remove_flow(pfvf, flow->location);
1301 if (err)
1302 netdev_warn(pfvf->netdev,
1303 "Can't delete the rule %d associated with this rss group err:%d",
1304 flow->location, err);
1305 }
1306 }
1307
1308 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1309 {
1310 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1311 struct npc_delete_flow_req *req;
1312 struct otx2_flow *iter, *tmp;
1313 int err;
1314
1315 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1316 return 0;
1317
1318 if (!flow_cfg->max_flows)
1319 return 0;
1320
1321 mutex_lock(&pfvf->mbox.lock);
1322 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1323 if (!req) {
1324 mutex_unlock(&pfvf->mbox.lock);
1325 return -ENOMEM;
1326 }
1327
1328 req->start = flow_cfg->flow_ent[0];
1329 req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1330 err = otx2_sync_mbox_msg(&pfvf->mbox);
1331 mutex_unlock(&pfvf->mbox.lock);
1332
1333 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1334 list_del(&iter->list);
1335 kfree(iter);
1336 flow_cfg->nr_flows--;
1337 }
1338 return err;
1339 }
1340
1341 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1342 {
1343 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1344 struct npc_mcam_free_entry_req *req;
1345 struct otx2_flow *iter, *tmp;
1346 int err;
1347
1348 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1349 return 0;
1350
1351 /* remove all flows */
1352 err = otx2_remove_flow_msg(pfvf, 0, true);
1353 if (err)
1354 return err;
1355
1356 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1357 list_del(&iter->list);
1358 kfree(iter);
1359 flow_cfg->nr_flows--;
1360 }
1361
1362 mutex_lock(&pfvf->mbox.lock);
1363 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1364 if (!req) {
1365 mutex_unlock(&pfvf->mbox.lock);
1366 return -ENOMEM;
1367 }
1368
1369 req->all = 1;
1370 /* Send message to AF to free MCAM entries */
1371 err = otx2_sync_mbox_msg(&pfvf->mbox);
1372 if (err) {
1373 mutex_unlock(&pfvf->mbox.lock);
1374 return err;
1375 }
1376
1377 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1378 mutex_unlock(&pfvf->mbox.lock);
1379
1380 return 0;
1381 }
1382
1383 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1384 {
1385 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1386 struct npc_install_flow_req *req;
1387 int err;
1388
1389 mutex_lock(&pfvf->mbox.lock);
1390 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1391 if (!req) {
1392 mutex_unlock(&pfvf->mbox.lock);
1393 return -ENOMEM;
1394 }
1395
1396 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1397 req->intf = NIX_INTF_RX;
1398 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1399 eth_broadcast_addr((u8 *)&req->mask.dmac);
1400 req->channel = pfvf->hw.rx_chan_base;
1401 req->op = NIX_RX_ACTION_DEFAULT;
1402 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1403 req->vtag0_valid = true;
1404 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1405
1406 /* Send message to AF */
1407 err = otx2_sync_mbox_msg(&pfvf->mbox);
1408 mutex_unlock(&pfvf->mbox.lock);
1409 return err;
1410 }
1411
1412 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1413 {
1414 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1415 struct npc_delete_flow_req *req;
1416 int err;
1417
1418 mutex_lock(&pfvf->mbox.lock);
1419 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1420 if (!req) {
1421 mutex_unlock(&pfvf->mbox.lock);
1422 return -ENOMEM;
1423 }
1424
1425 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1426 /* Send message to AF */
1427 err = otx2_sync_mbox_msg(&pfvf->mbox);
1428 mutex_unlock(&pfvf->mbox.lock);
1429 return err;
1430 }
1431
1432 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1433 {
1434 struct nix_vtag_config *req;
1435 struct mbox_msghdr *rsp_hdr;
1436 int err;
1437
1438 /* Dont have enough mcam entries */
1439 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1440 return -ENOMEM;
1441
1442 if (enable) {
1443 err = otx2_install_rxvlan_offload_flow(pf);
1444 if (err)
1445 return err;
1446 } else {
1447 err = otx2_delete_rxvlan_offload_flow(pf);
1448 if (err)
1449 return err;
1450 }
1451
1452 mutex_lock(&pf->mbox.lock);
1453 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1454 if (!req) {
1455 mutex_unlock(&pf->mbox.lock);
1456 return -ENOMEM;
1457 }
1458
1459 /* config strip, capture and size */
1460 req->vtag_size = VTAGSIZE_T4;
1461 req->cfg_type = 1; /* rx vlan cfg */
1462 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1463 req->rx.strip_vtag = enable;
1464 req->rx.capture_vtag = enable;
1465
1466 err = otx2_sync_mbox_msg(&pf->mbox);
1467 if (err) {
1468 mutex_unlock(&pf->mbox.lock);
1469 return err;
1470 }
1471
1472 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1473 if (IS_ERR(rsp_hdr)) {
1474 mutex_unlock(&pf->mbox.lock);
1475 return PTR_ERR(rsp_hdr);
1476 }
1477
1478 mutex_unlock(&pf->mbox.lock);
1479 return rsp_hdr->rc;
1480 }
1481
1482 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1483 {
1484 struct otx2_flow *iter;
1485 struct ethhdr *eth_hdr;
1486
1487 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1488 if (iter->rule_type & DMAC_FILTER_RULE) {
1489 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1490 otx2_dmacflt_add(pf, eth_hdr->h_dest,
1491 iter->entry);
1492 }
1493 }
1494 }
1495
1496 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1497 {
1498 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1499 }